From 9d3e997acda8e3e7e577adb4536cdbf8e8389dc6 Mon Sep 17 00:00:00 2001 From: Dusan Morhac <55763425+dudo50@users.noreply.github.com> Date: Sun, 5 Jan 2025 00:39:22 +0100 Subject: [PATCH] Update SDK --- .cargo/config.toml | 4 - .config/lychee.toml | 4 +- .config/nextest.toml | 8 +- .config/taplo.toml | 7 - .github/actions/workflow-stopper/action.yml | 28 - .github/env | 2 +- .../check-missing-readme-generation.sh | 36 - .github/scripts/cmd/cmd.py | 195 +- .github/scripts/cmd/test_cmd.py | 38 +- .github/scripts/common/lib.sh | 71 +- .github/scripts/generate-prdoc.py | 18 +- .../scripts/release/build-linux-release.sh | 4 +- .../scripts/release/build-macos-release.sh | 37 - .github/scripts/release/distributions | 39 - .github/scripts/release/release_lib.sh | 76 +- .github/workflows/benchmarks-networking.yml | 109 - .github/workflows/build-misc.yml | 23 +- .../workflows/check-frame-omni-bencher.yml | 15 - .github/workflows/check-links.yml | 2 +- .github/workflows/check-runtime-migration.yml | 13 +- .github/workflows/check-semver.yml | 19 +- .github/workflows/checks-quick.yml | 36 +- .github/workflows/checks.yml | 23 - .github/workflows/cmd.yml | 70 +- .github/workflows/command-backport.yml | 4 +- .github/workflows/command-prdoc.yml | 2 +- .github/workflows/docs.yml | 14 - .github/workflows/publish-check-compile.yml | 48 - .github/workflows/publish-check-crates.yml | 2 +- .github/workflows/publish-claim-crates.yml | 2 +- ...ation.yml => release-10_rc-automation.yml} | 0 .github/workflows/release-20_build-rc.yml | 263 - .../release-30_publish_release_draft.yml | 208 +- .../release-31_promote-rc-to-final.yml | 125 - .../release-40_publish-deb-package.yml | 152 - .../workflows/release-50_publish-docker.yml | 95 +- ...table.yml => release-branchoff-stable.yml} | 0 .github/workflows/release-build-rc.yml | 82 + .../release-reusable-promote-to-final.yml | 83 - .../workflows/release-reusable-rc-buid.yml | 273 +- .../workflows/release-reusable-s3-upload.yml | 15 +- .github/workflows/release-srtool.yml | 18 +- .github/workflows/runtimes-matrix.json | 33 +- ...subsystem.yml => subsystem-benchmarks.yml} | 0 .../workflows/tests-linux-stable-coverage.yml | 2 +- .github/workflows/tests-linux-stable.yml | 28 - .github/workflows/tests-misc.yml | 8 - .gitlab-ci.yml | 2 +- .../pipeline/zombienet/parachain-template.yml | 2 +- .gitlab/pipeline/zombienet/polkadot.yml | 34 +- Cargo.lock | 13845 ++++++---------- Cargo.toml | 38 +- README.md | 4 +- bridges/bin/runtime-common/Cargo.toml | 1 - bridges/bin/runtime-common/src/extensions.rs | 23 +- bridges/bin/runtime-common/src/integrity.rs | 95 +- bridges/bin/runtime-common/src/mock.rs | 1 - .../chains/chain-asset-hub-rococo/Cargo.toml | 6 - .../chains/chain-asset-hub-rococo/src/lib.rs | 25 - .../chains/chain-asset-hub-westend/Cargo.toml | 6 - .../chains/chain-asset-hub-westend/src/lib.rs | 25 - .../chain-bridge-hub-cumulus/Cargo.toml | 4 +- .../chains/chain-bridge-hub-kusama/Cargo.toml | 2 +- .../chain-bridge-hub-polkadot/Cargo.toml | 2 +- .../chains/chain-bridge-hub-rococo/Cargo.toml | 2 +- .../chain-bridge-hub-westend/Cargo.toml | 2 +- .../chains/chain-polkadot-bulletin/src/lib.rs | 2 +- bridges/modules/beefy/Cargo.toml | 4 +- bridges/modules/grandpa/Cargo.toml | 2 +- bridges/modules/messages/Cargo.toml | 2 +- bridges/modules/relayers/Cargo.toml | 8 +- bridges/modules/relayers/src/extension/mod.rs | 33 +- bridges/modules/relayers/src/lib.rs | 5 +- bridges/modules/relayers/src/mock.rs | 15 +- .../modules/relayers/src/payment_adapter.rs | 24 +- .../modules/xcm-bridge-hub-router/Cargo.toml | 1 - .../xcm-bridge-hub-router/src/benchmarking.rs | 27 +- .../modules/xcm-bridge-hub-router/src/lib.rs | 230 +- .../modules/xcm-bridge-hub-router/src/mock.rs | 1 - .../xcm-bridge-hub-router/src/weights.rs | 27 - bridges/modules/xcm-bridge-hub/Cargo.toml | 9 +- .../modules/xcm-bridge-hub/src/exporter.rs | 115 +- bridges/modules/xcm-bridge-hub/src/lib.rs | 4 +- bridges/modules/xcm-bridge-hub/src/mock.rs | 150 +- bridges/primitives/beefy/Cargo.toml | 2 +- bridges/primitives/header-chain/Cargo.toml | 2 +- bridges/primitives/messages/Cargo.toml | 6 +- bridges/primitives/relayers/Cargo.toml | 2 +- .../xcm-bridge-hub-router/Cargo.toml | 2 +- bridges/primitives/xcm-bridge-hub/Cargo.toml | 6 +- bridges/primitives/xcm-bridge-hub/src/lib.rs | 5 - bridges/relays/client-substrate/Cargo.toml | 6 +- bridges/relays/lib-substrate-relay/Cargo.toml | 10 +- bridges/relays/utils/Cargo.toml | 8 +- bridges/relays/utils/src/initialize.rs | 7 +- .../pallets/ethereum-client/Cargo.toml | 22 +- .../ethereum-client/fixtures/Cargo.toml | 4 +- .../pallets/inbound-queue/Cargo.toml | 21 +- .../pallets/inbound-queue/fixtures/Cargo.toml | 4 +- .../pallets/inbound-queue/src/envelope.rs | 7 +- .../pallets/inbound-queue/src/mock.rs | 14 + .../pallets/inbound-queue/src/test.rs | 25 +- .../pallets/outbound-queue/Cargo.toml | 10 +- .../outbound-queue/merkle-tree/Cargo.toml | 4 +- .../outbound-queue/runtime-api/Cargo.toml | 6 +- bridges/snowbridge/pallets/system/Cargo.toml | 9 +- .../pallets/system/runtime-api/Cargo.toml | 4 +- .../snowbridge/primitives/beacon/Cargo.toml | 14 +- bridges/snowbridge/primitives/core/Cargo.toml | 11 +- .../snowbridge/primitives/ethereum/Cargo.toml | 10 +- .../snowbridge/primitives/router/Cargo.toml | 3 +- .../primitives/router/src/inbound/mod.rs | 5 +- .../runtime/runtime-common/Cargo.toml | 5 +- .../snowbridge/runtime/test-common/Cargo.toml | 5 +- .../snowbridge/runtime/test-common/src/lib.rs | 6 +- .../rococo-westend/bridges_rococo_westend.sh | 121 +- .../js-helpers/wrapped-assets-balance.js | 14 +- bridges/testing/framework/utils/bridges.sh | 4 +- .../roc-reaches-westend.zndsl | 2 +- .../wnd-reaches-rococo.zndsl | 2 +- cumulus/README.md | 6 +- cumulus/bin/pov-validator/Cargo.toml | 14 +- cumulus/client/cli/Cargo.toml | 6 +- cumulus/client/collator/Cargo.toml | 8 +- cumulus/client/consensus/aura/Cargo.toml | 17 +- .../slot_based/block_builder_task.rs | 144 +- .../src/collators/slot_based/block_import.rs | 144 - .../collators/slot_based/collation_task.rs | 41 +- .../aura/src/collators/slot_based/mod.rs | 120 +- .../slot_based/relay_chain_data_cache.rs | 127 - cumulus/client/consensus/common/Cargo.toml | 6 +- cumulus/client/consensus/proposer/Cargo.toml | 2 - .../client/consensus/relay-chain/Cargo.toml | 4 +- cumulus/client/network/Cargo.toml | 8 +- cumulus/client/parachain-inherent/Cargo.toml | 2 - cumulus/client/parachain-inherent/src/mock.rs | 13 +- cumulus/client/pov-recovery/Cargo.toml | 16 +- .../Cargo.toml | 6 +- .../client/relay-chain-interface/Cargo.toml | 10 +- .../relay-chain-minimal-node/Cargo.toml | 20 +- .../relay-chain-minimal-node/src/lib.rs | 4 +- .../relay-chain-minimal-node/src/network.rs | 26 +- .../relay-chain-rpc-interface/Cargo.toml | 36 +- cumulus/client/service/Cargo.toml | 17 +- cumulus/client/service/src/lib.rs | 3 +- cumulus/docs/release.md | 135 + cumulus/pallets/aura-ext/Cargo.toml | 2 - cumulus/pallets/collator-selection/Cargo.toml | 14 +- cumulus/pallets/dmp-queue/Cargo.toml | 3 +- cumulus/pallets/parachain-system/Cargo.toml | 17 +- .../parachain-system/proc-macro/Cargo.toml | 6 +- cumulus/pallets/parachain-system/src/lib.rs | 2 +- .../src/validate_block/trie_cache.rs | 5 +- .../src/validate_block/trie_recorder.rs | 5 +- .../pallets/session-benchmarking/Cargo.toml | 4 +- cumulus/pallets/solo-to-para/Cargo.toml | 2 - cumulus/pallets/xcm/Cargo.toml | 6 +- cumulus/pallets/xcmp-queue/Cargo.toml | 15 +- .../chain-specs/asset-hub-kusama.json | 3 +- .../chain-specs/asset-hub-polkadot.json | 3 +- .../chain-specs/asset-hub-westend.json | 3 +- .../chain-specs/bridge-hub-kusama.json | 3 +- .../chain-specs/bridge-hub-polkadot.json | 3 +- .../chain-specs/bridge-hub-westend.json | 3 +- .../chain-specs/collectives-polkadot.json | 3 +- .../chain-specs/collectives-westend.json | 3 +- .../chain-specs/coretime-kusama.json | 3 +- .../chain-specs/coretime-polkadot.json | 3 +- .../chain-specs/coretime-westend.json | 3 +- .../parachains/chain-specs/people-kusama.json | 3 +- .../chain-specs/people-polkadot.json | 3 +- .../chain-specs/people-westend.json | 3 +- cumulus/parachains/common/Cargo.toml | 5 +- .../assets/asset-hub-rococo/Cargo.toml | 6 +- .../assets/asset-hub-westend/Cargo.toml | 8 +- .../bridges/bridge-hub-rococo/Cargo.toml | 8 +- .../bridges/bridge-hub-westend/Cargo.toml | 8 +- .../collectives-westend/Cargo.toml | 6 +- .../coretime/coretime-rococo/Cargo.toml | 6 +- .../coretime/coretime-westend/Cargo.toml | 6 +- .../people/people-rococo/Cargo.toml | 4 +- .../people/people-westend/Cargo.toml | 4 +- .../parachains/testing/penpal/Cargo.toml | 4 +- .../emulated/chains/relays/rococo/Cargo.toml | 10 +- .../emulated/chains/relays/westend/Cargo.toml | 12 +- .../emulated/common/Cargo.toml | 30 +- .../emulated/common/src/impls.rs | 2 - .../emulated/common/src/macros.rs | 3 - .../emulated/common/src/xcm_helpers.rs | 4 +- .../networks/rococo-system/Cargo.toml | 8 +- .../networks/rococo-westend-system/Cargo.toml | 6 +- .../networks/westend-system/Cargo.toml | 6 +- .../tests/assets/asset-hub-rococo/Cargo.toml | 18 +- .../src/tests/hybrid_transfers.rs | 4 - .../src/tests/reserve_transfer.rs | 19 +- .../asset-hub-rococo/src/tests/treasury.rs | 4 - .../tests/assets/asset-hub-westend/Cargo.toml | 21 +- .../tests/assets/asset-hub-westend/src/lib.rs | 1 - .../src/tests/claim_assets.rs | 82 - .../src/tests/hybrid_transfers.rs | 145 +- .../src/tests/reserve_transfer.rs | 19 +- .../src/tests/set_asset_claimer.rs | 4 +- .../asset-hub-westend/src/tests/transact.rs | 4 +- .../asset-hub-westend/src/tests/treasury.rs | 3 - .../bridges/bridge-hub-rococo/Cargo.toml | 14 +- .../src/tests/asset_transfers.rs | 2 +- .../src/tests/register_bridged_assets.rs | 2 +- .../bridge-hub-rococo/src/tests/send_xcm.rs | 6 +- .../bridge-hub-rococo/src/tests/snowbridge.rs | 18 +- .../bridges/bridge-hub-westend/Cargo.toml | 16 +- .../src/tests/asset_transfers.rs | 4 +- .../src/tests/register_bridged_assets.rs | 2 +- .../bridge-hub-westend/src/tests/send_xcm.rs | 6 +- .../bridge-hub-westend/src/tests/transact.rs | 4 +- .../collectives-westend/Cargo.toml | 18 +- .../src/tests/fellowship.rs | 1 - .../src/tests/fellowship_treasury.rs | 3 - .../tests/coretime/coretime-rococo/Cargo.toml | 2 +- .../src/tests/coretime_interface.rs | 5 - .../coretime/coretime-westend/Cargo.toml | 2 +- .../src/tests/coretime_interface.rs | 5 - .../tests/people/people-rococo/Cargo.toml | 2 +- .../tests/people/people-westend/Cargo.toml | 3 +- .../people-westend/src/tests/governance.rs | 550 - .../people/people-westend/src/tests/mod.rs | 1 - .../pallets/collective-content/Cargo.toml | 2 - .../pallets/parachain-info/Cargo.toml | 2 - cumulus/parachains/pallets/ping/Cargo.toml | 6 +- cumulus/parachains/pallets/ping/src/lib.rs | 2 - .../assets/asset-hub-rococo/Cargo.toml | 20 +- .../assets/asset-hub-rococo/src/lib.rs | 56 +- .../src/weights/pallet_xcm.rs | 187 +- .../weights/pallet_xcm_bridge_hub_router.rs | 30 +- .../asset-hub-rococo/src/weights/xcm/mod.rs | 22 +- .../xcm/pallet_xcm_benchmarks_generic.rs | 9 +- .../assets/asset-hub-rococo/src/xcm_config.rs | 2 - .../assets/asset-hub-rococo/tests/tests.rs | 77 +- .../assets/asset-hub-westend/Cargo.toml | 18 +- .../src/genesis_config_presets.rs | 17 +- .../assets/asset-hub-westend/src/lib.rs | 100 +- .../src/weights/pallet_xcm.rs | 187 +- .../weights/pallet_xcm_bridge_hub_router.rs | 32 +- .../asset-hub-westend/src/weights/xcm/mod.rs | 22 +- .../xcm/pallet_xcm_benchmarks_generic.rs | 9 +- .../asset-hub-westend/src/xcm_config.rs | 2 - .../assets/asset-hub-westend/tests/tests.rs | 70 +- .../runtimes/assets/common/Cargo.toml | 13 +- .../runtimes/assets/common/src/lib.rs | 79 +- .../runtimes/assets/test-utils/Cargo.toml | 14 +- .../assets/test-utils/src/test_cases.rs | 124 +- .../test-utils/src/test_cases_over_bridge.rs | 5 +- .../bridge-hubs/bridge-hub-rococo/Cargo.toml | 22 +- .../src/bridge_to_bulletin_config.rs | 42 +- .../src/bridge_to_westend_config.rs | 46 +- .../src/genesis_config_presets.rs | 38 +- .../bridge-hubs/bridge-hub-rococo/src/lib.rs | 21 +- .../src/weights/pallet_xcm.rs | 169 +- .../bridge-hub-rococo/src/weights/xcm/mod.rs | 22 +- .../xcm/pallet_xcm_benchmarks_generic.rs | 9 +- .../bridge-hub-rococo/src/xcm_config.rs | 2 - .../bridge-hub-rococo/tests/snowbridge.rs | 6 +- .../bridge-hub-rococo/tests/tests.rs | 58 +- .../bridge-hubs/bridge-hub-westend/Cargo.toml | 26 +- .../src/bridge_to_rococo_config.rs | 46 +- .../src/genesis_config_presets.rs | 38 +- .../bridge-hubs/bridge-hub-westend/src/lib.rs | 20 +- .../src/weights/pallet_xcm.rs | 169 +- .../bridge-hub-westend/src/weights/xcm/mod.rs | 22 +- .../xcm/pallet_xcm_benchmarks_generic.rs | 9 +- .../bridge-hub-westend/src/xcm_config.rs | 2 - .../bridge-hub-westend/tests/snowbridge.rs | 6 +- .../bridge-hub-westend/tests/tests.rs | 37 +- .../runtimes/bridge-hubs/common/Cargo.toml | 11 +- .../bridge-hubs/test-utils/Cargo.toml | 14 +- .../src/test_cases/from_grandpa_chain.rs | 8 +- .../src/test_cases/from_parachain.rs | 8 +- .../test-utils/src/test_cases/helpers.rs | 120 +- .../test-utils/src/test_cases/mod.rs | 28 +- .../collectives-westend/Cargo.toml | 22 +- .../src/genesis_config_presets.rs | 17 +- .../collectives-westend/src/lib.rs | 21 +- .../src/weights/pallet_xcm.rs | 179 +- .../collectives-westend/src/xcm_config.rs | 9 +- .../collectives-westend/tests/tests.rs | 14 +- .../parachains/runtimes/constants/Cargo.toml | 2 - .../contracts/contracts-rococo/Cargo.toml | 39 +- .../contracts/contracts-rococo/src/lib.rs | 17 +- .../contracts-rococo/src/xcm_config.rs | 2 - .../coretime/coretime-rococo/Cargo.toml | 13 +- .../coretime/coretime-rococo/src/coretime.rs | 4 - .../coretime/coretime-rococo/src/lib.rs | 43 +- .../src/weights/pallet_broker.rs | 264 +- .../coretime-rococo/src/weights/pallet_xcm.rs | 173 +- .../coretime-rococo/src/weights/xcm/mod.rs | 22 +- .../xcm/pallet_xcm_benchmarks_generic.rs | 9 +- .../coretime-rococo/src/xcm_config.rs | 2 - .../coretime/coretime-rococo/tests/tests.rs | 14 +- .../coretime/coretime-westend/Cargo.toml | 12 +- .../coretime/coretime-westend/src/coretime.rs | 16 - .../coretime/coretime-westend/src/lib.rs | 47 +- .../src/weights/pallet_broker.rs | 268 +- .../src/weights/pallet_xcm.rs | 173 +- .../coretime-westend/src/weights/xcm/mod.rs | 25 +- .../xcm/pallet_xcm_benchmarks_generic.rs | 162 +- .../coretime-westend/src/xcm_config.rs | 12 +- .../coretime/coretime-westend/tests/tests.rs | 14 +- .../glutton/glutton-westend/Cargo.toml | 7 +- .../glutton/glutton-westend/src/lib.rs | 2 +- .../runtimes/people/people-rococo/Cargo.toml | 8 +- .../runtimes/people/people-rococo/src/lib.rs | 21 +- .../people-rococo/src/weights/pallet_xcm.rs | 177 +- .../people-rococo/src/weights/xcm/mod.rs | 22 +- .../xcm/pallet_xcm_benchmarks_generic.rs | 9 +- .../people/people-rococo/tests/tests.rs | 14 +- .../runtimes/people/people-westend/Cargo.toml | 8 +- .../runtimes/people/people-westend/src/lib.rs | 27 +- .../people-westend/src/weights/pallet_xcm.rs | 177 +- .../people-westend/src/weights/xcm/mod.rs | 25 +- .../xcm/pallet_xcm_benchmarks_generic.rs | 178 +- .../people/people-westend/src/xcm_config.rs | 10 +- .../people/people-westend/tests/tests.rs | 14 +- .../parachains/runtimes/test-utils/Cargo.toml | 16 +- .../parachains/runtimes/test-utils/src/lib.rs | 28 +- .../runtimes/test-utils/src/test_cases.rs | 67 +- .../runtimes/testing/penpal/Cargo.toml | 13 +- .../runtimes/testing/penpal/src/lib.rs | 14 +- .../runtimes/testing/penpal/src/xcm_config.rs | 6 +- .../testing/rococo-parachain/Cargo.toml | 11 +- cumulus/polkadot-omni-node/Cargo.toml | 2 - cumulus/polkadot-omni-node/README.md | 4 +- cumulus/polkadot-omni-node/lib/Cargo.toml | 67 +- cumulus/polkadot-omni-node/lib/src/cli.rs | 11 +- cumulus/polkadot-omni-node/lib/src/command.rs | 15 +- .../lib/src/common/runtime.rs | 151 +- .../polkadot-omni-node/lib/src/common/spec.rs | 344 +- .../lib/src/common/types.rs | 14 +- .../polkadot-omni-node/lib/src/nodes/aura.rs | 142 +- .../lib/src/nodes/manual_seal.rs | 43 +- cumulus/polkadot-parachain/Cargo.toml | 21 +- cumulus/primitives/aura/Cargo.toml | 2 - cumulus/primitives/core/Cargo.toml | 3 - .../primitives/parachain-inherent/Cargo.toml | 2 - .../proof-size-hostfunction/Cargo.toml | 6 +- .../storage-weight-reclaim/Cargo.toml | 4 +- .../storage-weight-reclaim/src/tests.rs | 26 +- cumulus/primitives/timestamp/Cargo.toml | 2 - cumulus/primitives/utility/Cargo.toml | 7 +- cumulus/test/client/Cargo.toml | 34 +- cumulus/test/client/src/lib.rs | 6 +- cumulus/test/relay-sproof-builder/Cargo.toml | 2 - cumulus/test/runtime/Cargo.toml | 19 +- cumulus/test/runtime/build.rs | 8 - cumulus/test/runtime/src/lib.rs | 5 - cumulus/test/service/Cargo.toml | 44 +- cumulus/test/service/src/chain_spec.rs | 10 - cumulus/test/service/src/cli.rs | 8 +- cumulus/test/service/src/lib.rs | 38 +- cumulus/xcm/xcm-emulator/Cargo.toml | 28 +- docs/RELEASE.md | 6 +- docs/contributor/container.md | 2 +- docs/contributor/prdoc.md | 124 +- docs/sdk/Cargo.toml | 93 +- .../packages/guides/first-pallet/Cargo.toml | 4 +- .../packages/guides/first-runtime/src/lib.rs | 12 +- docs/sdk/src/guides/your_first_node.rs | 24 +- docs/sdk/src/polkadot_sdk/frame_runtime.rs | 1 - .../src/reference_docs/chain_spec_genesis.rs | 8 +- .../chain_spec_runtime/Cargo.toml | 9 +- .../chain_spec_runtime/src/presets.rs | 6 +- .../tests/chain_spec_builder_tests.rs | 296 +- docs/sdk/src/reference_docs/omni_node.rs | 16 - polkadot/Cargo.toml | 6 +- polkadot/cli/Cargo.toml | 22 +- polkadot/core-primitives/Cargo.toml | 6 +- polkadot/erasure-coding/Cargo.toml | 10 +- polkadot/erasure-coding/fuzzer/Cargo.toml | 4 +- polkadot/grafana/README.md | 2 +- polkadot/grafana/parachains/status.json | 2 +- polkadot/node/collation-generation/Cargo.toml | 10 +- .../core/approval-voting-parallel/Cargo.toml | 30 +- polkadot/node/core/approval-voting/Cargo.toml | 40 +- .../src/approval_db/v3/tests.rs | 52 +- polkadot/node/core/approval-voting/src/lib.rs | 12 +- polkadot/node/core/approval-voting/src/ops.rs | 2 +- .../node/core/approval-voting/src/tests.rs | 314 - polkadot/node/core/av-store/Cargo.toml | 18 +- polkadot/node/core/backing/Cargo.toml | 34 +- polkadot/node/core/backing/src/error.rs | 3 - polkadot/node/core/backing/src/lib.rs | 638 +- polkadot/node/core/backing/src/tests/mod.rs | 2527 +-- .../src/tests/prospective_parachains.rs | 1742 ++ .../node/core/bitfield-signing/Cargo.toml | 6 +- .../node/core/candidate-validation/Cargo.toml | 22 +- polkadot/node/core/chain-api/Cargo.toml | 8 +- polkadot/node/core/chain-selection/Cargo.toml | 14 +- .../node/core/dispute-coordinator/Cargo.toml | 22 +- .../node/core/parachains-inherent/Cargo.toml | 6 +- .../core/prospective-parachains/Cargo.toml | 12 +- .../src/fragment_chain/mod.rs | 2 +- .../src/fragment_chain/tests.rs | 63 +- polkadot/node/core/provisioner/Cargo.toml | 16 +- polkadot/node/core/pvf-checker/Cargo.toml | 16 +- polkadot/node/core/pvf/Cargo.toml | 11 +- polkadot/node/core/pvf/common/Cargo.toml | 2 - .../node/core/pvf/execute-worker/Cargo.toml | 6 +- .../node/core/pvf/prepare-worker/Cargo.toml | 6 +- polkadot/node/core/pvf/src/execute/queue.rs | 2 + .../node/core/pvf/src/worker_interface.rs | 6 +- polkadot/node/core/runtime-api/Cargo.toml | 12 +- polkadot/node/gum/Cargo.toml | 4 +- polkadot/node/gum/proc-macro/Cargo.toml | 10 +- polkadot/node/malus/Cargo.toml | 28 +- polkadot/node/metrics/Cargo.toml | 24 +- polkadot/node/metrics/src/tests.rs | 2 +- .../network/approval-distribution/Cargo.toml | 10 +- .../network/approval-distribution/src/lib.rs | 162 +- .../approval-distribution/src/tests.rs | 137 +- .../availability-distribution/Cargo.toml | 28 +- .../network/availability-recovery/Cargo.toml | 26 +- .../network/bitfield-distribution/Cargo.toml | 18 +- polkadot/node/network/bridge/Cargo.toml | 20 +- .../node/network/collator-protocol/Cargo.toml | 18 +- .../src/collator_side/mod.rs | 2 +- .../network/collator-protocol/src/error.rs | 5 +- .../src/validator_side/claim_queue_state.rs | 1055 -- .../src/validator_side/collation.rs | 184 +- .../src/validator_side/mod.rs | 602 +- .../src/validator_side/tests/mod.rs | 630 +- .../tests/prospective_parachains.rs | 1192 +- .../network/dispute-distribution/Cargo.toml | 26 +- .../node/network/gossip-support/Cargo.toml | 14 +- polkadot/node/network/protocol/Cargo.toml | 20 +- .../network/protocol/src/grid_topology.rs | 60 - .../network/statement-distribution/Cargo.toml | 44 +- polkadot/node/overseer/Cargo.toml | 24 +- polkadot/node/primitives/Cargo.toml | 16 +- polkadot/node/primitives/src/lib.rs | 2 +- polkadot/node/service/Cargo.toml | 79 +- polkadot/node/service/src/lib.rs | 4 +- polkadot/node/service/src/overseer.rs | 9 +- polkadot/node/subsystem-bench/Cargo.toml | 84 +- .../node/subsystem-test-helpers/Cargo.toml | 10 +- polkadot/node/subsystem-types/Cargo.toml | 22 +- polkadot/node/subsystem-types/src/messages.rs | 15 +- polkadot/node/subsystem-util/Cargo.toml | 28 +- .../src/backing_implicit_view.rs | 452 +- polkadot/node/subsystem/Cargo.toml | 4 +- polkadot/node/test/client/Cargo.toml | 24 +- polkadot/node/test/service/Cargo.toml | 30 +- polkadot/node/test/service/src/lib.rs | 4 +- polkadot/node/tracking-allocator/Cargo.toml | 2 - .../node/zombienet-backchannel/Cargo.toml | 12 +- polkadot/parachain/Cargo.toml | 10 +- polkadot/parachain/src/primitives.rs | 5 - polkadot/parachain/test-parachains/Cargo.toml | 2 +- .../test-parachains/adder/Cargo.toml | 4 +- .../test-parachains/adder/collator/Cargo.toml | 14 +- .../adder/collator/tests/integration.rs | 2 +- .../parachain/test-parachains/halt/Cargo.toml | 2 +- .../test-parachains/undying/Cargo.toml | 4 +- .../undying/collator/Cargo.toml | 14 +- .../undying/collator/tests/integration.rs | 2 +- polkadot/primitives/Cargo.toml | 14 +- polkadot/primitives/test-helpers/Cargo.toml | 8 +- .../src/node/collators/collator-protocol.md | 6 + .../src/node/subsystems-and-jobs.md | 7 +- .../src/node/utility/provisioner.md | 5 +- .../src/types/overseer-protocol.md | 13 + polkadot/rpc/Cargo.toml | 30 +- polkadot/runtime/common/Cargo.toml | 37 +- .../common/slot_range_helper/Cargo.toml | 6 +- polkadot/runtime/common/src/auctions.rs | 1934 +++ .../common/src/auctions/benchmarking.rs | 282 - polkadot/runtime/common/src/auctions/mock.rs | 258 - polkadot/runtime/common/src/auctions/mod.rs | 677 - polkadot/runtime/common/src/auctions/tests.rs | 821 - polkadot/runtime/common/src/claims.rs | 1755 ++ .../runtime/common/src/claims/benchmarking.rs | 318 - polkadot/runtime/common/src/claims/mock.rs | 129 - polkadot/runtime/common/src/claims/mod.rs | 723 - polkadot/runtime/common/src/claims/tests.rs | 666 - .../runtime/common/src/identity_migrator.rs | 16 - .../src/paras_registrar/benchmarking.rs | 171 - .../common/src/paras_registrar/mock.rs | 254 - .../runtime/common/src/paras_registrar/mod.rs | 996 +- .../common/src/paras_registrar/tests.rs | 588 - .../runtime/common/src/paras_sudo_wrapper.rs | 12 +- polkadot/runtime/common/src/purchase.rs | 1178 ++ polkadot/runtime/common/src/purchase/mock.rs | 181 - polkadot/runtime/common/src/purchase/mod.rs | 482 - polkadot/runtime/common/src/purchase/tests.rs | 547 - polkadot/runtime/common/src/xcm_sender.rs | 29 +- polkadot/runtime/metrics/Cargo.toml | 6 +- polkadot/runtime/parachains/Cargo.toml | 49 +- .../parachains/src/coretime/benchmarking.rs | 2 - .../runtime/parachains/src/coretime/mod.rs | 6 - .../parachains/src/disputes/benchmarking.rs | 16 +- .../parachains/src/disputes/slashing.rs | 6 +- .../src/disputes/slashing/benchmarking.rs | 38 +- polkadot/runtime/parachains/src/dmp.rs | 37 +- polkadot/runtime/parachains/src/dmp/tests.rs | 44 - .../parachains/src/inclusion/benchmarking.rs | 63 +- polkadot/runtime/parachains/src/lib.rs | 16 - polkadot/runtime/parachains/src/mock.rs | 2 - .../parachains/src/paras/benchmarking.rs | 168 +- polkadot/runtime/rococo/Cargo.toml | 63 +- polkadot/runtime/rococo/constants/Cargo.toml | 4 +- .../rococo/src/genesis_config_presets.rs | 35 +- polkadot/runtime/rococo/src/impls.rs | 19 +- polkadot/runtime/rococo/src/lib.rs | 24 +- polkadot/runtime/rococo/src/tests.rs | 2 +- .../runtime/rococo/src/weights/pallet_xcm.rs | 251 +- .../weights/pallet_xcm_benchmarks_generic.rs | 7 - .../runtime/rococo/src/weights/xcm/mod.rs | 22 +- .../xcm/pallet_xcm_benchmarks_generic.rs | 137 +- polkadot/runtime/test-runtime/Cargo.toml | 45 +- polkadot/runtime/test-runtime/src/lib.rs | 4 +- polkadot/runtime/westend/Cargo.toml | 59 +- polkadot/runtime/westend/constants/Cargo.toml | 4 +- .../westend/src/genesis_config_presets.rs | 21 +- polkadot/runtime/westend/src/impls.rs | 19 +- polkadot/runtime/westend/src/lib.rs | 25 +- polkadot/runtime/westend/src/tests.rs | 2 +- .../westend/src/weights/pallet_balances.rs | 68 +- .../runtime/westend/src/weights/pallet_xcm.rs | 241 +- ...ot_runtime_parachains_disputes_slashing.rs | 2 +- .../runtime/westend/src/weights/xcm/mod.rs | 27 +- .../xcm/pallet_xcm_benchmarks_generic.rs | 158 +- polkadot/runtime/westend/src/xcm_config.rs | 22 +- polkadot/statement-table/Cargo.toml | 6 +- polkadot/statement-table/src/generic.rs | 112 +- polkadot/statement-table/src/lib.rs | 2 +- polkadot/utils/generate-bags/Cargo.toml | 2 - .../remote-ext-tests/bags-list/Cargo.toml | 4 +- polkadot/xcm/Cargo.toml | 20 +- polkadot/xcm/docs/Cargo.toml | 16 +- polkadot/xcm/pallet-xcm-benchmarks/Cargo.toml | 15 +- .../src/fungible/benchmarking.rs | 35 +- .../src/generic/benchmarking.rs | 658 +- polkadot/xcm/pallet-xcm/Cargo.toml | 7 +- polkadot/xcm/pallet-xcm/src/benchmarking.rs | 466 +- polkadot/xcm/pallet-xcm/src/lib.rs | 33 +- polkadot/xcm/procedural/Cargo.toml | 6 +- .../xcm/procedural/src/builder_pattern.rs | 394 +- polkadot/xcm/procedural/src/lib.rs | 9 - .../xcm/procedural/tests/builder_pattern.rs | 59 - .../loads_holding_no_operands.rs} | 36 +- .../loads_holding_no_operands.stderr | 6 + .../unexpected_attribute.stderr | 5 - .../unpaid_execution_named_fields.rs} | 23 +- .../unpaid_execution_named_fields.stderr | 5 + polkadot/xcm/src/v3/traits.rs | 4 +- polkadot/xcm/src/v4/mod.rs | 25 +- polkadot/xcm/src/v4/traits.rs | 4 +- polkadot/xcm/src/v5/junction.rs | 4 - polkadot/xcm/src/v5/mod.rs | 133 +- polkadot/xcm/src/v5/traits.rs | 16 +- polkadot/xcm/xcm-builder/Cargo.toml | 29 +- polkadot/xcm/xcm-builder/src/barriers.rs | 3 +- polkadot/xcm/xcm-builder/src/pay.rs | 11 +- .../xcm-builder/src/process_xcm_message.rs | 2 +- polkadot/xcm/xcm-builder/src/routing.rs | 15 - .../xcm/xcm-builder/src/tests/barriers.rs | 20 - polkadot/xcm/xcm-builder/src/tests/pay/pay.rs | 2 +- .../xcm/xcm-builder/src/tests/transacting.rs | 11 - .../xcm/xcm-builder/src/universal_exports.rs | 281 +- polkadot/xcm/xcm-builder/src/weight.rs | 3 +- polkadot/xcm/xcm-executor/Cargo.toml | 15 +- .../xcm-executor/integration-tests/Cargo.toml | 9 +- .../xcm-executor/integration-tests/src/lib.rs | 31 +- polkadot/xcm/xcm-executor/src/lib.rs | 157 +- .../src/tests/execute_with_origin.rs | 177 - polkadot/xcm/xcm-executor/src/tests/mod.rs | 1 - .../src/tests/set_asset_claimer.rs | 6 +- .../xcm/xcm-executor/src/traits/export.rs | 4 +- polkadot/xcm/xcm-runtime-apis/Cargo.toml | 15 +- .../xcm-runtime-apis/tests/fee_estimation.rs | 23 - polkadot/xcm/xcm-runtime-apis/tests/mock.rs | 3 +- polkadot/xcm/xcm-simulator/Cargo.toml | 14 +- polkadot/xcm/xcm-simulator/example/Cargo.toml | 21 +- .../xcm/xcm-simulator/example/src/tests.rs | 4 - polkadot/xcm/xcm-simulator/fuzzer/Cargo.toml | 21 +- polkadot/zombienet-sdk-tests/Cargo.toml | 8 +- polkadot/zombienet-sdk-tests/build.rs | 65 +- .../tests/elastic_scaling/helpers.rs | 60 - .../tests/elastic_scaling/mod.rs | 8 - .../elastic_scaling/slot_based_3cores.rs | 166 - polkadot/zombienet-sdk-tests/tests/lib.rs | 3 - .../tests/smoke/coretime_revenue.rs | 4 +- .../zombienet-sdk-tests/tests/smoke/mod.rs | 1 + ...astic-scaling-doesnt-break-parachains.toml | 2 +- ...stic-scaling-doesnt-break-parachains.zndsl | 2 +- .../0018-shared-core-idle-parachain.toml | 2 +- ...-coretime-collation-fetching-fairness.toml | 58 - ...coretime-collation-fetching-fairness.zndsl | 16 - .../functional/0019-verify-included-events.js | 51 - prdoc/{stable2412 => }/pr_3151.prdoc | 0 prdoc/{stable2412 => }/pr_3685.prdoc | 0 prdoc/{stable2412 => }/pr_3881.prdoc | 0 prdoc/{stable2412 => }/pr_3970.prdoc | 0 prdoc/{stable2412 => }/pr_4012.prdoc | 0 prdoc/{stable2412 => }/pr_4251.prdoc | 0 prdoc/{stable2412 => }/pr_4257.prdoc | 0 prdoc/pr_4273.prdoc | 19 - prdoc/{stable2412 => }/pr_4639.prdoc | 0 prdoc/{stable2412 => }/pr_4826.prdoc | 0 prdoc/{stable2412 => }/pr_4837.prdoc | 0 prdoc/{stable2412 => }/pr_4846.prdoc | 0 prdoc/{stable2412 => }/pr_4849.prdoc | 0 prdoc/{stable2412 => }/pr_4851.prdoc | 0 prdoc/pr_4880.prdoc | 31 - prdoc/{stable2412 => }/pr_4889.prdoc | 0 prdoc/{stable2412 => }/pr_4974.prdoc | 0 prdoc/{stable2412 => }/pr_4982.prdoc | 0 prdoc/{stable2412 => }/pr_5038.prdoc | 0 prdoc/{stable2412 => }/pr_5194.prdoc | 0 prdoc/{stable2412 => }/pr_5198.prdoc | 0 prdoc/{stable2412 => }/pr_5201.prdoc | 0 prdoc/{stable2412 => }/pr_5274.prdoc | 0 prdoc/{stable2412 => }/pr_5322.prdoc | 0 prdoc/{stable2412 => }/pr_5343.prdoc | 0 prdoc/pr_5363.prdoc | 14 - prdoc/{stable2412 => }/pr_5372.prdoc | 0 prdoc/{stable2412 => }/pr_5390.prdoc | 0 prdoc/{stable2412 => }/pr_5420.prdoc | 0 prdoc/{stable2412 => }/pr_5423.prdoc | 0 prdoc/{stable2412 => }/pr_5435.prdoc | 0 prdoc/{stable2412 => }/pr_5461.prdoc | 0 prdoc/{stable2412 => }/pr_5469.prdoc | 0 prdoc/{stable2412 => }/pr_5502.prdoc | 0 prdoc/{stable2412 => }/pr_5515.prdoc | 0 prdoc/{stable2412 => }/pr_5521.prdoc | 0 prdoc/{stable2412 => }/pr_5526.prdoc | 0 prdoc/{stable2412 => }/pr_5540.prdoc | 0 prdoc/{stable2412 => }/pr_5548.prdoc | 0 prdoc/{stable2412 => }/pr_5554.prdoc | 0 prdoc/{stable2412 => }/pr_5555.prdoc | 0 prdoc/{stable2412 => }/pr_5556.prdoc | 0 prdoc/{stable2412 => }/pr_5572.prdoc | 0 prdoc/{stable2412 => }/pr_5585.prdoc | 0 prdoc/{stable2412 => }/pr_5592.prdoc | 0 prdoc/{stable2412 => }/pr_5601.prdoc | 0 prdoc/{stable2412 => }/pr_5606.prdoc | 0 prdoc/{stable2412 => }/pr_5608.prdoc | 0 prdoc/{stable2412 => }/pr_5609.prdoc | 0 prdoc/{stable2412 => }/pr_5616.prdoc | 0 prdoc/{stable2412 => }/pr_5623.prdoc | 0 prdoc/{stable2412 => }/pr_5630.prdoc | 0 prdoc/{stable2412 => }/pr_5635.prdoc | 0 prdoc/{stable2412 => }/pr_5640.prdoc | 0 prdoc/pr_5656.prdoc | 18 - prdoc/{stable2412 => }/pr_5664.prdoc | 0 prdoc/{stable2412 => }/pr_5665.prdoc | 0 prdoc/{stable2412 => }/pr_5666.prdoc | 0 prdoc/{stable2412 => }/pr_5675.prdoc | 0 prdoc/{stable2412 => }/pr_5676.prdoc | 0 prdoc/{stable2412 => }/pr_5679.prdoc | 0 prdoc/{stable2412 => }/pr_5682.prdoc | 0 prdoc/{stable2412 => }/pr_5684.prdoc | 0 prdoc/{stable2412 => }/pr_5686.prdoc | 0 prdoc/{stable2412 => }/pr_5687.prdoc | 0 prdoc/{stable2412 => }/pr_5693.prdoc | 0 prdoc/{stable2412 => }/pr_5701.prdoc | 0 prdoc/pr_5703.prdoc | 13 - prdoc/{stable2412 => }/pr_5707.prdoc | 0 prdoc/{stable2412 => }/pr_5716.prdoc | 0 prdoc/pr_5723.prdoc | 24 - prdoc/pr_5724.prdoc | 37 - prdoc/{stable2412 => }/pr_5726.prdoc | 0 prdoc/{stable2412 => }/pr_5737.prdoc | 0 prdoc/{stable2412 => }/pr_5741.prdoc | 0 prdoc/{stable2412 => }/pr_5743.prdoc | 0 prdoc/{stable2412 => }/pr_5745.prdoc | 0 prdoc/{stable2412 => }/pr_5756.prdoc | 0 prdoc/{stable2412 => }/pr_5762.prdoc | 0 prdoc/{stable2412 => }/pr_5765.prdoc | 0 prdoc/{stable2412 => }/pr_5768.prdoc | 0 prdoc/{stable2412 => }/pr_5774.prdoc | 0 prdoc/{stable2412 => }/pr_5779.prdoc | 0 prdoc/{stable2412 => }/pr_5787.prdoc | 0 prdoc/{stable2412 => }/pr_5789.prdoc | 0 prdoc/{stable2412 => }/pr_5796.prdoc | 0 prdoc/{stable2412 => }/pr_5804.prdoc | 0 prdoc/{stable2412 => }/pr_5807.prdoc | 0 prdoc/{stable2412 => }/pr_5811.prdoc | 0 prdoc/{stable2412 => }/pr_5813.prdoc | 0 prdoc/{stable2412 => }/pr_5824.prdoc | 0 prdoc/{stable2412 => }/pr_5830.prdoc | 0 prdoc/{stable2412 => }/pr_5838.prdoc | 0 prdoc/{stable2412 => }/pr_5839.prdoc | 0 prdoc/pr_5842.prdoc | 18 - prdoc/{stable2412 => }/pr_5845.prdoc | 0 prdoc/{stable2412 => }/pr_5847.prdoc | 0 prdoc/pr_5855.prdoc | 15 - prdoc/{stable2412 => }/pr_5856.prdoc | 0 prdoc/{stable2412 => }/pr_5857.prdoc | 0 prdoc/{stable2412 => }/pr_5859.prdoc | 0 prdoc/{stable2412 => }/pr_5861.prdoc | 0 prdoc/{stable2412 => }/pr_5866.prdoc | 0 prdoc/{stable2412 => }/pr_5872.prdoc | 0 prdoc/{stable2412 => }/pr_5875.prdoc | 0 prdoc/{stable2412 => }/pr_5876.prdoc | 0 prdoc/{stable2412 => }/pr_5880.prdoc | 0 prdoc/{stable2412 => }/pr_5883.prdoc | 0 prdoc/{stable2412 => }/pr_5886.prdoc | 0 prdoc/{stable2412 => }/pr_5888.prdoc | 0 prdoc/{stable2412 => }/pr_5891.prdoc | 0 prdoc/{stable2412 => }/pr_5892.prdoc | 0 prdoc/pr_5899.prdoc | 52 - prdoc/{stable2412 => }/pr_5901.prdoc | 0 prdoc/{stable2412 => }/pr_5908.prdoc | 0 prdoc/{stable2412 => }/pr_5911.prdoc | 0 prdoc/{stable2412 => }/pr_5915.prdoc | 0 prdoc/{stable2412 => }/pr_5917.prdoc | 0 prdoc/{stable2412 => }/pr_5919.prdoc | 0 prdoc/{stable2412 => }/pr_5924.prdoc | 0 prdoc/{stable2412 => }/pr_5939.prdoc | 0 prdoc/{stable2412 => }/pr_5941.prdoc | 0 prdoc/{stable2412 => }/pr_5946.prdoc | 0 prdoc/{stable2412 => }/pr_5954.prdoc | 0 prdoc/{stable2412 => }/pr_5961.prdoc | 0 prdoc/{stable2412 => }/pr_5971.prdoc | 0 prdoc/{stable2412 => }/pr_5984.prdoc | 0 prdoc/{stable2412 => }/pr_5994.prdoc | 0 prdoc/{stable2412 => }/pr_5995.prdoc | 0 prdoc/{stable2412 => }/pr_5998.prdoc | 0 prdoc/{stable2412 => }/pr_5999.prdoc | 0 prdoc/{stable2412 => }/pr_6011.prdoc | 0 prdoc/{stable2412 => }/pr_6015.prdoc | 0 prdoc/{stable2412 => }/pr_6016.prdoc | 0 prdoc/{stable2412 => }/pr_6022.prdoc | 0 prdoc/{stable2412 => }/pr_6023.prdoc | 0 prdoc/{stable2412 => }/pr_6025.prdoc | 0 prdoc/{stable2412 => }/pr_6027.prdoc | 0 prdoc/{stable2412 => }/pr_6032.prdoc | 0 prdoc/{stable2412 => }/pr_6039.prdoc | 0 prdoc/{stable2412 => }/pr_6045.prdoc | 0 prdoc/{stable2412 => }/pr_6058.prdoc | 0 prdoc/{stable2412 => }/pr_6061.prdoc | 0 prdoc/{stable2412 => }/pr_6073.prdoc | 0 prdoc/{stable2412 => }/pr_6077.prdoc | 0 prdoc/{stable2412 => }/pr_6080.prdoc | 0 prdoc/{stable2412 => }/pr_6087.prdoc | 0 prdoc/{stable2412 => }/pr_6088.prdoc | 0 prdoc/{stable2412 => }/pr_6094.prdoc | 0 prdoc/{stable2412 => }/pr_6096.prdoc | 0 prdoc/{stable2412 => }/pr_6104.prdoc | 0 prdoc/{stable2412 => }/pr_6105.prdoc | 0 prdoc/pr_6111.prdoc | 17 - prdoc/{stable2412 => }/pr_6129.prdoc | 0 prdoc/{stable2412 => }/pr_6141.prdoc | 0 prdoc/{stable2412 => }/pr_6147.prdoc | 0 prdoc/{stable2412 => }/pr_6148.prdoc | 0 prdoc/{stable2412 => }/pr_6156.prdoc | 0 prdoc/{stable2412 => }/pr_6169.prdoc | 0 prdoc/{stable2412 => }/pr_6171.prdoc | 0 prdoc/{stable2412 => }/pr_6174.prdoc | 0 prdoc/pr_6184.prdoc | 24 - prdoc/{stable2412 => }/pr_6187.prdoc | 0 prdoc/{stable2412 => }/pr_6192.prdoc | 0 prdoc/{stable2412 => }/pr_6205.prdoc | 0 prdoc/{stable2412 => }/pr_6212.prdoc | 0 prdoc/{stable2412 => }/pr_6214.prdoc | 0 prdoc/pr_6215.prdoc | 16 - prdoc/{stable2412 => }/pr_6217.prdoc | 0 prdoc/{stable2412 => }/pr_6218.prdoc | 0 prdoc/pr_6220.prdoc | 10 - prdoc/{stable2412 => }/pr_6221.prdoc | 0 prdoc/{stable2412 => }/pr_6228.prdoc | 0 prdoc/{stable2412 => }/pr_6246.prdoc | 0 prdoc/pr_6248.prdoc | 16 - prdoc/pr_6249.prdoc | 10 - prdoc/{stable2412 => }/pr_6255.prdoc | 0 prdoc/{stable2412 => }/pr_6257.prdoc | 0 prdoc/{stable2412 => }/pr_6260.prdoc | 0 prdoc/{stable2412 => }/pr_6261.prdoc | 0 prdoc/pr_6262.prdoc | 10 - prdoc/{stable2412 => }/pr_6263.prdoc | 0 prdoc/{stable2412 => }/pr_6264.prdoc | 0 prdoc/{stable2412 => }/pr_6268.prdoc | 0 prdoc/{stable2412 => }/pr_6278.prdoc | 0 prdoc/pr_6284.prdoc | 22 - prdoc/{stable2412 => }/pr_6288.prdoc | 0 prdoc/pr_6290.prdoc | 11 - prdoc/{stable2412 => }/pr_6291.prdoc | 0 prdoc/{stable2412 => }/pr_6295.prdoc | 0 prdoc/{stable2412 => }/pr_6296.prdoc | 0 prdoc/{stable2412 => }/pr_6298.prdoc | 0 prdoc/{stable2412 => }/pr_6299.prdoc | 0 prdoc/pr_6301.prdoc | 11 - prdoc/pr_6302.prdoc | 8 - prdoc/{stable2412 => }/pr_6305.prdoc | 0 prdoc/pr_6310.prdoc | 12 - prdoc/pr_6311.prdoc | 13 - prdoc/{stable2412 => }/pr_6314.prdoc | 0 prdoc/{stable2412 => }/pr_6315.prdoc | 0 prdoc/{stable2412 => }/pr_6316.prdoc | 0 prdoc/{stable2412 => }/pr_6317.prdoc | 0 prdoc/{stable2412 => }/pr_6318.prdoc | 0 prdoc/{stable2412 => }/pr_6337.prdoc | 0 prdoc/pr_6349.prdoc | 44 - prdoc/{stable2412 => }/pr_6353.prdoc | 0 prdoc/{stable2412 => }/pr_6357.prdoc | 0 prdoc/{stable2412 => }/pr_6360.prdoc | 0 prdoc/{stable2412 => }/pr_6365.prdoc | 0 prdoc/pr_6367.prdoc | 14 - prdoc/pr_6368.prdoc | 7 - prdoc/{stable2412 => }/pr_6373.prdoc | 0 prdoc/{stable2412 => }/pr_6380.prdoc | 0 prdoc/{stable2412 => }/pr_6382.prdoc | 0 prdoc/{stable2412 => }/pr_6384.prdoc | 0 prdoc/pr_6393.prdoc | 16 - prdoc/pr_6400.prdoc | 41 - prdoc/pr_6405.prdoc | 9 - prdoc/{stable2412 => }/pr_6406.prdoc | 0 prdoc/pr_6411.prdoc | 10 - prdoc/pr_6417.prdoc | 9 - prdoc/pr_6419.prdoc | 12 - prdoc/pr_6425.prdoc | 27 - prdoc/pr_6435.prdoc | 16 - prdoc/pr_6439.prdoc | 10 - prdoc/pr_6440.prdoc | 8 - prdoc/pr_6446.prdoc | 16 - prdoc/pr_6450.prdoc | 21 - prdoc/pr_6452.prdoc | 16 - prdoc/pr_6453.prdoc | 7 - prdoc/pr_6455.prdoc | 8 - prdoc/pr_6459.prdoc | 22 - prdoc/pr_6460.prdoc | 9 - prdoc/pr_6461.prdoc | 12 - prdoc/pr_6463.prdoc | 8 - prdoc/pr_6466.prdoc | 12 - prdoc/pr_6481.prdoc | 10 - prdoc/pr_6486.prdoc | 10 - prdoc/pr_6502.prdoc | 10 - prdoc/pr_6503.prdoc | 10 - prdoc/pr_6506.prdoc | 10 - prdoc/pr_6509.prdoc | 13 - prdoc/pr_6521.prdoc | 10 - prdoc/pr_6522.prdoc | 18 - prdoc/pr_6526.prdoc | 8 - prdoc/pr_6528.prdoc | 18 - prdoc/pr_6533.prdoc | 20 - prdoc/pr_6534.prdoc | 10 - prdoc/pr_6540.prdoc | 16 - prdoc/pr_6544.prdoc | 14 - prdoc/pr_6546.prdoc | 13 - prdoc/pr_6549.prdoc | 247 - prdoc/pr_6553.prdoc | 13 - prdoc/pr_6561.prdoc | 11 - prdoc/pr_6562.prdoc | 14 - prdoc/pr_6565.prdoc | 35 - prdoc/pr_6583.prdoc | 7 - prdoc/pr_6604.prdoc | 106 - prdoc/pr_6605.prdoc | 10 - prdoc/pr_6608.prdoc | 14 - prdoc/pr_6624.prdoc | 11 - prdoc/pr_6628.prdoc | 12 - prdoc/pr_6636.prdoc | 9 - prdoc/pr_6665.prdoc | 15 - prdoc/pr_6673.prdoc | 7 - prdoc/pr_6681.prdoc | 406 - prdoc/pr_6695.prdoc | 8 - prdoc/pr_6703.prdoc | 7 - prdoc/pr_6711.prdoc | 13 - prdoc/pr_6728.prdoc | 12 - prdoc/pr_6741.prdoc | 16 - prdoc/pr_6743.prdoc | 10 - prdoc/pr_6759.prdoc | 16 - prdoc/pr_6768.prdoc | 14 - prdoc/pr_6792.prdoc | 11 - prdoc/pr_6796.prdoc | 9 - prdoc/pr_6832.prdoc | 13 - prdoc/pr_6835.prdoc | 12 - prdoc/pr_6844.prdoc | 8 - prdoc/pr_6857.prdoc | 14 - prdoc/pr_6865.prdoc | 9 - prdoc/pr_6866.prdoc | 13 - prdoc/pr_6880.prdoc | 14 - prdoc/pr_6889.prdoc | 13 - prdoc/pr_6896.prdoc | 16 - prdoc/pr_6908.prdoc | 12 - prdoc/pr_6917.prdoc | 14 - prdoc/pr_6920.prdoc | 14 - prdoc/pr_6923.prdoc | 12 - prdoc/pr_6926.prdoc | 13 - prdoc/pr_6928.prdoc | 34 - prdoc/pr_6937.prdoc | 12 - prdoc/pr_6954.prdoc | 13 - prdoc/pr_6963.prdoc | 10 - prdoc/pr_6964.prdoc | 15 - prdoc/pr_6979.prdoc | 8 - prdoc/pr_6981.prdoc | 7 - prdoc/pr_6986.prdoc | 18 - prdoc/pr_6989.prdoc | 10 - prdoc/pr_7005.prdoc | 7 - prdoc/pr_7011.prdoc | 16 - prdoc/pr_7013.prdoc | 7 - prdoc/pr_7020.prdoc | 18 - prdoc/pr_7021.prdoc | 8 - prdoc/pr_7028.prdoc | 25 - prdoc/stable2412/pr_4834.prdoc | 15 - prdoc/stable2412/pr_5311.prdoc | 16 - prdoc/stable2412/pr_5732.prdoc | 29 - prdoc/stable2412/pr_5997.prdoc | 18 - prdoc/stable2412/pr_6304.prdoc | 45 - prdoc/stable2412/pr_6323.prdoc | 32 - prdoc/stable2412/pr_6418.prdoc | 151 - prdoc/stable2412/pr_6454.prdoc | 7 - prdoc/stable2412/pr_6484.prdoc | 10 - prdoc/stable2412/pr_6505.prdoc | 14 - prdoc/stable2412/pr_6536.prdoc | 24 - prdoc/stable2412/pr_6566.prdoc | 45 - prdoc/stable2412/pr_6588.prdoc | 14 - prdoc/stable2412/pr_6603.prdoc | 16 - prdoc/stable2412/pr_6643.prdoc | 47 - prdoc/stable2412/pr_6645.prdoc | 14 - prdoc/stable2412/pr_6646.prdoc | 19 - prdoc/stable2412/pr_6652.prdoc | 13 - prdoc/stable2412/pr_6677.prdoc | 11 - prdoc/stable2412/pr_6690.prdoc | 17 - prdoc/stable2412/pr_6696.prdoc | 15 - prdoc/stable2412/pr_6729.prdoc | 15 - prdoc/stable2412/pr_6742.prdoc | 11 - prdoc/stable2412/pr_6760.prdoc | 9 - prdoc/stable2412/pr_6781.prdoc | 28 - prdoc/stable2412/pr_6814.prdoc | 32 - prdoc/stable2412/pr_6860.prdoc | 10 - prdoc/stable2412/pr_6863.prdoc | 9 - prdoc/stable2412/pr_6864.prdoc | 18 - prdoc/stable2412/pr_6885.prdoc | 11 - scripts/generate-umbrella.py | 2 - scripts/release/templates/audience.md.tera | 2 +- substrate/.config/nextest.toml | 124 + .../frame-umbrella-weight-template.hbs | 2 +- substrate/bin/node/bench/Cargo.toml | 33 +- substrate/bin/node/bench/src/construct.rs | 48 +- substrate/bin/node/cli/Cargo.toml | 26 +- substrate/bin/node/cli/src/chain_spec.rs | 2 +- substrate/bin/node/cli/src/service.rs | 9 +- substrate/bin/node/inspect/Cargo.toml | 2 +- substrate/bin/node/rpc/Cargo.toml | 6 +- substrate/bin/node/runtime/Cargo.toml | 4 +- substrate/bin/node/runtime/src/lib.rs | 40 +- substrate/bin/node/testing/Cargo.toml | 14 +- substrate/bin/node/testing/src/bench.rs | 5 +- substrate/bin/node/testing/src/keyring.rs | 5 +- .../bin/utils/chain-spec-builder/Cargo.toml | 4 +- substrate/client/allocator/Cargo.toml | 2 +- substrate/client/api/Cargo.toml | 2 +- .../client/authority-discovery/Cargo.toml | 9 +- .../client/authority-discovery/src/tests.rs | 2 +- .../client/authority-discovery/src/worker.rs | 9 +- .../src/worker/schema/tests.rs | 14 +- .../authority-discovery/src/worker/tests.rs | 24 +- .../basic-authorship/src/basic_authorship.rs | 22 +- substrate/client/basic-authorship/src/lib.rs | 2 +- substrate/client/block-builder/Cargo.toml | 2 +- substrate/client/chain-spec/Cargo.toml | 20 +- substrate/client/chain-spec/src/chain_spec.rs | 14 +- substrate/client/cli/Cargo.toml | 6 +- .../client/cli/src/params/import_params.rs | 10 +- .../client/cli/src/params/shared_params.rs | 18 +- substrate/client/consensus/aura/Cargo.toml | 4 +- substrate/client/consensus/babe/Cargo.toml | 6 +- .../client/consensus/babe/rpc/Cargo.toml | 10 +- substrate/client/consensus/beefy/Cargo.toml | 8 +- .../client/consensus/beefy/rpc/Cargo.toml | 10 +- substrate/client/consensus/common/Cargo.toml | 4 +- substrate/client/consensus/grandpa/Cargo.toml | 20 +- .../client/consensus/grandpa/rpc/Cargo.toml | 8 +- .../consensus/grandpa/src/warp_proof.rs | 16 +- .../client/consensus/manual-seal/Cargo.toml | 8 +- .../client/consensus/manual-seal/src/lib.rs | 2 +- substrate/client/consensus/pow/Cargo.toml | 2 +- substrate/client/db/Cargo.toml | 8 +- substrate/client/db/src/lib.rs | 104 +- substrate/client/executor/Cargo.toml | 20 +- substrate/client/executor/common/Cargo.toml | 6 +- substrate/client/executor/common/src/error.rs | 4 +- .../common/src/runtime_blob/runtime_blob.rs | 13 +- substrate/client/executor/polkavm/src/lib.rs | 206 +- substrate/client/executor/wasmtime/Cargo.toml | 20 +- substrate/client/informant/Cargo.toml | 2 +- substrate/client/keystore/Cargo.toml | 2 +- .../client/merkle-mountain-range/Cargo.toml | 6 +- substrate/client/network-gossip/Cargo.toml | 6 +- substrate/client/network-gossip/src/bridge.rs | 30 +- substrate/client/network-gossip/src/lib.rs | 13 +- substrate/client/network/Cargo.toml | 36 +- .../network/benches/notifications_protocol.rs | 394 +- .../benches/request_response_protocol.rs | 361 +- substrate/client/network/light/Cargo.toml | 6 +- substrate/client/network/src/behaviour.rs | 38 +- substrate/client/network/src/discovery.rs | 258 +- substrate/client/network/src/event.rs | 17 +- .../client/network/src/litep2p/discovery.rs | 45 +- substrate/client/network/src/litep2p/mod.rs | 195 +- .../client/network/src/litep2p/service.rs | 32 +- .../src/litep2p/shim/request_response/mod.rs | 7 +- substrate/client/network/src/network_state.rs | 2 +- substrate/client/network/src/peer_info.rs | 91 +- substrate/client/network/src/protocol.rs | 48 +- .../network/src/protocol/notifications.rs | 2 +- .../src/protocol/notifications/behaviour.rs | 102 +- .../src/protocol/notifications/handler.rs | 123 +- .../src/protocol/notifications/tests.rs | 324 +- .../notifications/upgrade/notifications.rs | 30 +- .../client/network/src/protocol_controller.rs | 2 +- .../client/network/src/request_responses.rs | 193 +- substrate/client/network/src/service.rs | 122 +- .../client/network/src/service/traits.rs | 30 +- substrate/client/network/src/transport.rs | 27 +- substrate/client/network/src/types.rs | 2 + substrate/client/network/statement/Cargo.toml | 2 +- substrate/client/network/statement/src/lib.rs | 23 +- substrate/client/network/sync/Cargo.toml | 16 +- substrate/client/network/sync/src/engine.rs | 24 +- .../network/sync/src/strategy/state_sync.rs | 291 +- substrate/client/network/sync/src/types.rs | 4 - substrate/client/network/test/Cargo.toml | 8 +- substrate/client/network/test/src/lib.rs | 6 +- .../client/network/transactions/Cargo.toml | 2 +- .../client/network/transactions/src/lib.rs | 25 +- substrate/client/network/types/Cargo.toml | 2 - substrate/client/network/types/src/kad.rs | 185 - substrate/client/network/types/src/lib.rs | 2 +- substrate/client/offchain/Cargo.toml | 12 +- substrate/client/proposer-metrics/src/lib.rs | 2 +- substrate/client/rpc-api/Cargo.toml | 10 +- substrate/client/rpc-servers/src/lib.rs | 102 +- substrate/client/rpc-servers/src/utils.rs | 23 +- substrate/client/rpc-spec-v2/Cargo.toml | 56 +- .../client/rpc-spec-v2/src/archive/api.rs | 31 +- .../client/rpc-spec-v2/src/archive/archive.rs | 251 +- .../src/archive/archive_storage.rs | 880 +- .../client/rpc-spec-v2/src/archive/mod.rs | 2 +- .../client/rpc-spec-v2/src/archive/tests.rs | 721 +- .../rpc-spec-v2/src/chain_head/chain_head.rs | 10 +- .../rpc-spec-v2/src/chain_head/event.rs | 3 +- .../src/chain_head/subscription/inner.rs | 6 +- .../rpc-spec-v2/src/chain_head/tests.rs | 59 +- .../client/rpc-spec-v2/src/common/events.rs | 267 +- .../client/rpc-spec-v2/src/common/storage.rs | 144 +- .../src/transaction/tests/middleware_pool.rs | 96 +- .../tests/transaction_broadcast_tests.rs | 2 +- .../transaction/tests/transaction_tests.rs | 2 +- substrate/client/rpc/Cargo.toml | 8 +- substrate/client/rpc/src/author/mod.rs | 17 +- substrate/client/rpc/src/author/tests.rs | 20 +- substrate/client/rpc/src/state/tests.rs | 12 +- substrate/client/runtime-utilities/Cargo.toml | 36 - .../client/runtime-utilities/src/error.rs | 35 - substrate/client/service/Cargo.toml | 88 +- substrate/client/service/src/builder.rs | 118 +- substrate/client/service/src/client/client.rs | 40 +- substrate/client/service/src/client/mod.rs | 3 +- substrate/client/service/src/lib.rs | 29 +- substrate/client/service/test/Cargo.toml | 12 +- .../client/service/test/src/client/mod.rs | 116 +- substrate/client/statement-store/Cargo.toml | 12 +- substrate/client/storage-monitor/Cargo.toml | 4 +- substrate/client/sync-state-rpc/Cargo.toml | 6 +- substrate/client/sysinfo/Cargo.toml | 4 +- substrate/client/telemetry/Cargo.toml | 6 +- substrate/client/telemetry/src/node.rs | 13 +- substrate/client/tracing/Cargo.toml | 20 +- .../client/tracing/src/logging/directives.rs | 7 +- substrate/client/transaction-pool/Cargo.toml | 6 +- .../client/transaction-pool/api/Cargo.toml | 2 +- .../client/transaction-pool/api/src/lib.rs | 55 +- .../client/transaction-pool/benches/basics.rs | 4 +- .../src/fork_aware_txpool/dropped_watcher.rs | 398 +- .../fork_aware_txpool/fork_aware_txpool.rs | 431 +- .../import_notification_sink.rs | 19 +- .../src/fork_aware_txpool/mod.rs | 12 +- .../fork_aware_txpool/multi_view_listener.rs | 38 +- .../fork_aware_txpool/revalidation_worker.rs | 11 +- .../src/fork_aware_txpool/tx_mem_pool.rs | 241 +- .../src/fork_aware_txpool/view.rs | 31 +- .../src/fork_aware_txpool/view_store.rs | 279 +- .../transaction-pool/src/graph/base_pool.rs | 159 +- .../transaction-pool/src/graph/listener.rs | 47 +- .../client/transaction-pool/src/graph/mod.rs | 2 +- .../client/transaction-pool/src/graph/pool.rs | 32 +- .../transaction-pool/src/graph/ready.rs | 5 +- .../transaction-pool/src/graph/rotator.rs | 5 +- .../transaction-pool/src/graph/tracked_map.rs | 28 +- .../src/graph/validated_pool.rs | 27 +- .../transaction-pool/src/graph/watcher.rs | 6 - substrate/client/transaction-pool/src/lib.rs | 9 +- .../src/single_state_txpool/revalidation.rs | 27 +- .../single_state_txpool.rs | 133 +- .../src/transaction_pool_wrapper.rs | 52 +- .../client/transaction-pool/tests/fatp.rs | 16 +- .../transaction-pool/tests/fatp_common/mod.rs | 20 +- .../transaction-pool/tests/fatp_limits.rs | 655 +- .../transaction-pool/tests/fatp_prios.rs | 249 - .../client/transaction-pool/tests/pool.rs | 30 +- substrate/docs/Upgrading-2.0-to-3.0.md | 4 +- substrate/frame/Cargo.toml | 16 +- substrate/frame/alliance/Cargo.toml | 4 +- substrate/frame/alliance/src/weights.rs | 420 +- substrate/frame/asset-conversion/Cargo.toml | 8 +- .../frame/asset-conversion/ops/Cargo.toml | 8 +- .../frame/asset-conversion/ops/src/weights.rs | 28 +- .../frame/asset-conversion/src/weights.rs | 107 +- substrate/frame/asset-rate/Cargo.toml | 6 +- substrate/frame/asset-rate/src/weights.rs | 60 +- substrate/frame/assets-freezer/Cargo.toml | 8 +- substrate/frame/assets/Cargo.toml | 4 +- substrate/frame/assets/src/lib.rs | 12 +- substrate/frame/assets/src/weights.rs | 362 +- substrate/frame/atomic-swap/Cargo.toml | 16 +- substrate/frame/atomic-swap/src/lib.rs | 16 +- substrate/frame/atomic-swap/src/tests.rs | 8 +- substrate/frame/aura/Cargo.toml | 4 +- .../frame/authority-discovery/Cargo.toml | 2 +- substrate/frame/authorship/Cargo.toml | 4 +- substrate/frame/authorship/src/lib.rs | 1 - substrate/frame/babe/Cargo.toml | 4 +- substrate/frame/babe/src/benchmarking.rs | 27 +- substrate/frame/babe/src/mock.rs | 2 +- substrate/frame/babe/src/tests.rs | 2 +- substrate/frame/bags-list/Cargo.toml | 12 +- substrate/frame/bags-list/fuzzer/Cargo.toml | 4 +- .../frame/bags-list/remote-tests/Cargo.toml | 10 +- substrate/frame/bags-list/src/weights.rs | 40 +- substrate/frame/balances/Cargo.toml | 10 +- substrate/frame/balances/src/lib.rs | 4 +- .../balances/src/tests/currency_tests.rs | 4 - substrate/frame/balances/src/weights.rs | 152 +- substrate/frame/beefy-mmr/Cargo.toml | 10 +- substrate/frame/beefy-mmr/src/weights.rs | 60 +- substrate/frame/beefy/Cargo.toml | 6 +- substrate/frame/beefy/src/mock.rs | 2 +- substrate/frame/beefy/src/tests.rs | 4 +- substrate/frame/benchmarking/Cargo.toml | 12 +- substrate/frame/benchmarking/pov/Cargo.toml | 2 +- .../frame/benchmarking/src/tests_instance.rs | 61 +- substrate/frame/benchmarking/src/utils.rs | 3 - substrate/frame/benchmarking/src/v1.rs | 3 +- substrate/frame/benchmarking/src/weights.rs | 60 +- substrate/frame/bounties/Cargo.toml | 4 +- substrate/frame/bounties/src/benchmarking.rs | 30 +- substrate/frame/bounties/src/lib.rs | 20 +- substrate/frame/bounties/src/weights.rs | 176 +- substrate/frame/broker/Cargo.toml | 12 +- substrate/frame/broker/src/benchmarking.rs | 236 +- .../frame/broker/src/dispatchable_impls.rs | 31 +- substrate/frame/broker/src/lib.rs | 37 +- substrate/frame/broker/src/migration.rs | 252 - substrate/frame/broker/src/tests.rs | 303 - substrate/frame/broker/src/tick_impls.rs | 4 +- substrate/frame/broker/src/types.rs | 24 +- substrate/frame/broker/src/utility_impls.rs | 3 +- substrate/frame/broker/src/weights.rs | 521 +- substrate/frame/child-bounties/Cargo.toml | 4 +- .../frame/child-bounties/src/benchmarking.rs | 236 +- substrate/frame/child-bounties/src/lib.rs | 8 +- substrate/frame/child-bounties/src/weights.rs | 148 +- substrate/frame/collective/Cargo.toml | 4 +- substrate/frame/collective/src/weights.rs | 396 +- substrate/frame/contracts/Cargo.toml | 21 +- substrate/frame/contracts/fixtures/Cargo.toml | 4 +- .../frame/contracts/fixtures/build/Cargo.toml | 2 +- .../frame/contracts/mock-network/Cargo.toml | 5 +- substrate/frame/contracts/src/tests.rs | 1 - substrate/frame/contracts/src/weights.rs | 982 +- substrate/frame/contracts/uapi/Cargo.toml | 6 +- substrate/frame/conviction-voting/Cargo.toml | 4 +- substrate/frame/conviction-voting/src/lib.rs | 7 +- .../frame/conviction-voting/src/tests.rs | 85 +- .../frame/conviction-voting/src/types.rs | 9 +- .../frame/conviction-voting/src/weights.rs | 76 +- substrate/frame/core-fellowship/Cargo.toml | 6 +- .../core-fellowship/src/tests/integration.rs | 47 +- .../frame/core-fellowship/src/weights.rs | 120 +- substrate/frame/delegated-staking/Cargo.toml | 18 +- substrate/frame/democracy/Cargo.toml | 10 +- substrate/frame/democracy/src/benchmarking.rs | 548 +- substrate/frame/democracy/src/weights.rs | 316 +- .../election-provider-multi-phase/Cargo.toml | 14 +- .../src/weights.rs | 152 +- .../test-staking-e2e/Cargo.toml | 24 +- .../test-staking-e2e/src/lib.rs | 29 +- .../test-staking-e2e/src/mock.rs | 13 +- .../election-provider-support/Cargo.toml | 4 +- .../solution-type/Cargo.toml | 6 +- .../solution-type/fuzzer/Cargo.toml | 4 +- substrate/frame/elections-phragmen/Cargo.toml | 4 +- .../frame/elections-phragmen/src/weights.rs | 224 +- substrate/frame/examples/Cargo.toml | 4 +- .../src/extensions.rs | 3 +- .../authorization-tx-extension/src/tests.rs | 29 +- substrate/frame/examples/basic/Cargo.toml | 4 +- substrate/frame/examples/basic/src/lib.rs | 2 - substrate/frame/examples/basic/src/tests.rs | 5 +- .../frame/examples/default-config/Cargo.toml | 4 +- .../frame/examples/default-config/src/lib.rs | 4 +- substrate/frame/examples/dev-mode/Cargo.toml | 4 +- .../multi-block-migrations/Cargo.toml | 4 +- .../frame/examples/offchain-worker/Cargo.toml | 4 +- .../examples/offchain-worker/src/tests.rs | 2 +- .../single-block-migrations/Cargo.toml | 10 +- substrate/frame/examples/tasks/Cargo.toml | 2 +- substrate/frame/executive/Cargo.toml | 4 +- substrate/frame/fast-unstake/Cargo.toml | 10 +- .../frame/fast-unstake/src/benchmarking.rs | 116 +- substrate/frame/fast-unstake/src/weights.rs | 104 +- substrate/frame/glutton/Cargo.toml | 6 +- substrate/frame/glutton/src/weights.rs | 132 +- substrate/frame/grandpa/Cargo.toml | 4 +- substrate/frame/grandpa/src/mock.rs | 2 +- substrate/frame/grandpa/src/tests.rs | 2 +- substrate/frame/identity/Cargo.toml | 4 +- substrate/frame/identity/src/weights.rs | 848 +- substrate/frame/im-online/Cargo.toml | 4 +- substrate/frame/im-online/src/weights.rs | 20 +- substrate/frame/indices/Cargo.toml | 2 +- substrate/frame/indices/src/benchmarking.rs | 70 +- substrate/frame/indices/src/weights.rs | 48 +- .../Cargo.toml | 4 +- substrate/frame/lottery/Cargo.toml | 2 +- substrate/frame/lottery/src/weights.rs | 76 +- substrate/frame/membership/Cargo.toml | 4 +- .../frame/membership/src/benchmarking.rs | 5 +- substrate/frame/membership/src/weights.rs | 108 +- .../frame/merkle-mountain-range/Cargo.toml | 6 +- substrate/frame/message-queue/Cargo.toml | 10 +- substrate/frame/message-queue/src/weights.rs | 84 +- .../frame/metadata-hash-extension/Cargo.toml | 18 +- substrate/frame/migrations/Cargo.toml | 2 +- substrate/frame/migrations/src/weights.rs | 156 +- substrate/frame/mixnet/Cargo.toml | 24 +- substrate/frame/mixnet/src/lib.rs | 60 +- substrate/frame/multisig/Cargo.toml | 2 +- substrate/frame/multisig/src/lib.rs | 10 +- substrate/frame/multisig/src/tests.rs | 1 - substrate/frame/multisig/src/weights.rs | 2 +- .../frame/nft-fractionalization/Cargo.toml | 4 +- .../nft-fractionalization/src/benchmarking.rs | 70 +- .../frame/nft-fractionalization/src/mock.rs | 1 - .../nft-fractionalization/src/weights.rs | 40 +- substrate/frame/nfts/Cargo.toml | 4 +- substrate/frame/nfts/src/benchmarking.rs | 22 +- .../frame/nfts/src/features/approvals.rs | 6 +- .../frame/nfts/src/features/atomic_swap.rs | 8 +- .../frame/nfts/src/features/attributes.rs | 2 +- .../nfts/src/features/create_delete_item.rs | 2 +- substrate/frame/nfts/src/features/settings.rs | 6 +- substrate/frame/nfts/src/lib.rs | 29 +- substrate/frame/nfts/src/mock.rs | 1 - substrate/frame/nfts/src/types.rs | 12 +- substrate/frame/nfts/src/weights.rs | 372 +- substrate/frame/nis/Cargo.toml | 2 +- substrate/frame/nis/src/weights.rs | 144 +- substrate/frame/node-authorization/Cargo.toml | 4 +- substrate/frame/nomination-pools/Cargo.toml | 6 +- .../nomination-pools/benchmarking/Cargo.toml | 4 +- .../benchmarking/src/inner.rs | 687 +- .../frame/nomination-pools/fuzzer/Cargo.toml | 6 +- .../nomination-pools/runtime-api/Cargo.toml | 2 +- .../nomination-pools/runtime-api/src/lib.rs | 3 - substrate/frame/nomination-pools/src/lib.rs | 23 +- .../frame/nomination-pools/src/weights.rs | 2 +- .../test-delegate-stake/Cargo.toml | 20 +- .../test-delegate-stake/src/lib.rs | 51 +- .../test-transfer-stake/Cargo.toml | 18 +- .../test-transfer-stake/src/lib.rs | 12 +- substrate/frame/offences/Cargo.toml | 6 +- .../frame/offences/benchmarking/Cargo.toml | 4 +- .../frame/offences/benchmarking/src/inner.rs | 107 +- .../frame/offences/benchmarking/src/mock.rs | 5 +- substrate/frame/paged-list/Cargo.toml | 2 +- substrate/frame/paged-list/fuzzer/Cargo.toml | 2 +- substrate/frame/parameters/Cargo.toml | 10 +- substrate/frame/parameters/src/weights.rs | 12 +- substrate/frame/preimage/Cargo.toml | 4 +- substrate/frame/preimage/src/benchmarking.rs | 296 +- substrate/frame/preimage/src/weights.rs | 224 +- substrate/frame/proxy/Cargo.toml | 2 +- substrate/frame/proxy/src/benchmarking.rs | 6 +- substrate/frame/proxy/src/lib.rs | 12 +- substrate/frame/proxy/src/tests.rs | 1 - substrate/frame/proxy/src/weights.rs | 2 +- substrate/frame/ranked-collective/Cargo.toml | 6 +- .../ranked-collective/src/benchmarking.rs | 273 +- .../frame/ranked-collective/src/weights.rs | 92 +- substrate/frame/recovery/Cargo.toml | 2 +- substrate/frame/recovery/src/lib.rs | 25 +- substrate/frame/recovery/src/mock.rs | 1 - substrate/frame/recovery/src/weights.rs | 148 +- substrate/frame/referenda/Cargo.toml | 9 +- substrate/frame/referenda/src/weights.rs | 248 +- substrate/frame/remark/Cargo.toml | 4 +- substrate/frame/remark/src/weights.rs | 16 +- substrate/frame/revive/Cargo.toml | 46 +- substrate/frame/revive/README.md | 2 +- substrate/frame/revive/fixtures/Cargo.toml | 18 +- substrate/frame/revive/fixtures/build.rs | 108 +- .../build/{_Cargo.toml => Cargo.toml} | 7 +- .../fixtures/build/_rust-toolchain.toml | 4 - .../revive/fixtures/contracts/base_fee.rs | 36 - .../fixtures/contracts/call_data_copy.rs | 53 - .../fixtures/contracts/call_data_load.rs | 44 - .../fixtures/contracts/call_data_size.rs | 34 - .../fixtures/contracts/caller_contract.rs | 8 +- .../fixtures/contracts/common/src/lib.rs | 5 +- .../contracts/create_storage_and_call.rs | 8 +- .../create_storage_and_instantiate.rs | 8 +- .../create_transient_storage_and_call.rs | 2 +- .../fixtures/contracts/delegate_call.rs | 8 +- .../contracts/delegate_call_deposit_limit.rs | 50 - .../contracts/delegate_call_simple.rs | 6 +- .../revive/fixtures/contracts/extcodesize.rs | 4 +- .../revive/fixtures/contracts/gas_limit.rs | 34 - .../revive/fixtures/contracts/gas_price.rs | 34 - .../contracts/locking_delegate_dependency.rs | 3 +- .../fixtures/contracts/ref_time_left.rs | 34 - .../fixtures/contracts/return_data_api.rs | 4 +- .../revive/fixtures/contracts/rpc_demo.rs | 8 +- .../fixtures/contracts/set_code_hash.rs | 2 +- .../fixtures/contracts/unknown_syscall.rs | 44 - .../fixtures/contracts/unstable_interface.rs | 44 - .../riscv32emac-unknown-none-polkavm.json | 26 + substrate/frame/revive/fixtures/src/lib.rs | 14 +- .../frame/revive/mock-network/Cargo.toml | 18 +- .../frame/revive/mock-network/src/tests.rs | 4 +- substrate/frame/revive/proc-macro/src/lib.rs | 172 +- substrate/frame/revive/rpc/Cargo.toml | 43 +- substrate/frame/revive/rpc/examples/README.md | 34 +- substrate/frame/revive/rpc/examples/bun.lockb | Bin 10962 -> 0 bytes .../revive/rpc/examples/js/.prettierrc.json | 6 - .../revive/rpc/examples/js/abi/Errors.json | 106 - .../revive/rpc/examples/js/abi/Errors.ts | 106 - .../rpc/examples/js/abi/EventExample.json | 34 - .../rpc/examples/js/abi/EventExample.ts | 34 - .../revive/rpc/examples/js/abi/Flipper.json | 35 - .../revive/rpc/examples/js/abi/Flipper.ts | 35 - .../rpc/examples/js/abi/FlipperCaller.json | 46 - .../rpc/examples/js/abi/FlipperCaller.ts | 46 - .../revive/rpc/examples/js/abi/PiggyBank.json | 65 - .../rpc/examples/js/abi/RevertExample.ts | 14 - .../revive/rpc/examples/js/abi/piggyBank.ts | 65 - .../frame/revive/rpc/examples/js/bun.lockb | Bin 40649 -> 23039 bytes .../rpc/examples/js/contracts/.solhint.json | 3 - .../rpc/examples/js/contracts/Errors.sol | 51 - .../rpc/examples/js/contracts/Event.sol | 13 - .../rpc/examples/js/contracts/Flipper.sol | 35 - .../rpc/examples/js/contracts/PiggyBank.sol | 32 - .../frame/revive/rpc/examples/js/evm/.gitkeep | 0 .../frame/revive/rpc/examples/js/index.html | 53 +- .../revive/rpc/examples/js/package-lock.json | 443 - .../frame/revive/rpc/examples/js/package.json | 37 +- .../revive/rpc/examples/js/pvm/Errors.polkavm | Bin 7274 -> 0 bytes .../rpc/examples/js/pvm/EventExample.polkavm | Bin 2615 -> 0 bytes .../rpc/examples/js/pvm/Flipper.polkavm | Bin 1738 -> 0 bytes .../rpc/examples/js/pvm/FlipperCaller.polkavm | Bin 4532 -> 0 bytes .../rpc/examples/js/pvm/PiggyBank.polkavm | Bin 5062 -> 0 bytes .../revive/rpc/examples/js/src/balance.ts | 8 - .../rpc/examples/js/src/build-contracts.ts | 96 - .../frame/revive/rpc/examples/js/src/event.ts | 29 - .../rpc/examples/js/src/geth-diff-setup.ts | 177 - .../rpc/examples/js/src/geth-diff.test.ts | 315 - .../frame/revive/rpc/examples/js/src/lib.ts | 128 - .../frame/revive/rpc/examples/js/src/main.ts | 141 + .../revive/rpc/examples/js/src/piggy-bank.ts | 69 - .../revive/rpc/examples/js/src/script.ts | 49 + .../revive/rpc/examples/js/src/solc.d.ts | 83 - .../revive/rpc/examples/js/src/transfer.ts | 18 - .../frame/revive/rpc/examples/js/src/web.ts | 129 - .../revive/rpc/examples/js/tsconfig.json | 38 +- .../frame/revive/rpc/examples/rust/deploy.rs | 20 +- .../revive/rpc/examples/rust/transfer.rs | 19 +- .../frame/revive/rpc/revive_chain.metadata | Bin 659977 -> 655430 bytes substrate/frame/revive/rpc/src/client.rs | 282 +- substrate/frame/revive/rpc/src/example.rs | 196 +- substrate/frame/revive/rpc/src/lib.rs | 120 +- .../frame/revive/rpc/src/rpc_methods_gen.rs | 1 - .../frame/revive/rpc/src/subxt_client.rs | 21 +- substrate/frame/revive/rpc/src/tests.rs | 278 +- .../revive/src/benchmarking/call_builder.rs | 14 +- .../frame/revive/src/benchmarking/mod.rs | 183 +- substrate/frame/revive/src/chain_extension.rs | 12 +- substrate/frame/revive/src/evm/api/account.rs | 30 +- .../frame/revive/src/evm/api/rlp_codec.rs | 487 +- .../frame/revive/src/evm/api/rpc_types.rs | 262 +- .../frame/revive/src/evm/api/rpc_types_gen.rs | 47 +- .../frame/revive/src/evm/api/signature.rs | 190 +- substrate/frame/revive/src/evm/api/type_id.rs | 22 +- substrate/frame/revive/src/evm/runtime.rs | 428 +- substrate/frame/revive/src/exec.rs | 426 +- substrate/frame/revive/src/lib.rs | 438 +- substrate/frame/revive/src/limits.rs | 48 +- substrate/frame/revive/src/primitives.rs | 45 +- substrate/frame/revive/src/storage/meter.rs | 52 +- .../frame/revive/src/test_utils/builder.rs | 11 +- substrate/frame/revive/src/tests.rs | 477 +- .../frame/revive/src/tests/test_debug.rs | 5 +- substrate/frame/revive/src/wasm/mod.rs | 75 +- substrate/frame/revive/src/wasm/runtime.rs | 989 +- substrate/frame/revive/src/weights.rs | 1142 +- substrate/frame/revive/uapi/Cargo.toml | 13 +- substrate/frame/revive/uapi/src/flags.rs | 2 +- substrate/frame/revive/uapi/src/host.rs | 692 +- .../uapi/src/host/{riscv64.rs => riscv32.rs} | 467 +- substrate/frame/revive/uapi/src/lib.rs | 25 +- substrate/frame/root-offences/Cargo.toml | 2 +- substrate/frame/root-offences/src/lib.rs | 2 +- substrate/frame/root-offences/src/mock.rs | 2 +- substrate/frame/root-testing/Cargo.toml | 2 +- substrate/frame/safe-mode/Cargo.toml | 14 +- substrate/frame/safe-mode/src/mock.rs | 1 - substrate/frame/safe-mode/src/weights.rs | 124 +- substrate/frame/salary/Cargo.toml | 6 +- .../frame/salary/src/tests/integration.rs | 52 +- substrate/frame/salary/src/weights.rs | 64 +- substrate/frame/sassafras/Cargo.toml | 2 +- substrate/frame/scheduler/Cargo.toml | 6 +- substrate/frame/scheduler/src/benchmarking.rs | 306 +- substrate/frame/scheduler/src/weights.rs | 210 +- substrate/frame/scored-pool/Cargo.toml | 2 +- substrate/frame/session/Cargo.toml | 8 +- .../frame/session/benchmarking/Cargo.toml | 4 +- .../frame/session/benchmarking/src/inner.rs | 68 +- .../frame/session/benchmarking/src/mock.rs | 6 +- substrate/frame/session/src/lib.rs | 21 +- substrate/frame/session/src/weights.rs | 36 +- substrate/frame/society/Cargo.toml | 8 +- substrate/frame/society/src/benchmarking.rs | 329 +- substrate/frame/society/src/lib.rs | 29 +- substrate/frame/society/src/weights.rs | 172 +- substrate/frame/src/lib.rs | 25 +- substrate/frame/staking/CHANGELOG.md | 12 - substrate/frame/staking/Cargo.toml | 30 +- substrate/frame/staking/src/benchmarking.rs | 22 +- substrate/frame/staking/src/lib.rs | 179 +- substrate/frame/staking/src/migrations.rs | 76 - substrate/frame/staking/src/mock.rs | 24 +- substrate/frame/staking/src/pallet/impls.rs | 70 +- substrate/frame/staking/src/pallet/mod.rs | 194 +- substrate/frame/staking/src/slashing.rs | 51 +- substrate/frame/staking/src/testing_utils.rs | 2 +- substrate/frame/staking/src/tests.rs | 922 +- substrate/frame/staking/src/weights.rs | 2 +- .../frame/state-trie-migration/Cargo.toml | 14 +- .../frame/state-trie-migration/src/lib.rs | 176 +- .../frame/state-trie-migration/src/weights.rs | 116 +- substrate/frame/statement/Cargo.toml | 10 +- substrate/frame/sudo/Cargo.toml | 2 +- substrate/frame/sudo/src/benchmarking.rs | 2 +- substrate/frame/sudo/src/extension.rs | 3 +- substrate/frame/sudo/src/weights.rs | 64 +- substrate/frame/support/Cargo.toml | 63 +- substrate/frame/support/procedural/Cargo.toml | 22 +- .../src/construct_runtime/expand/metadata.rs | 4 +- .../procedural/src/construct_runtime/mod.rs | 3 +- .../procedural/src/runtime/parse/pallet.rs | 15 +- .../frame/support/procedural/tools/Cargo.toml | 2 +- substrate/frame/support/src/dispatch.rs | 12 +- .../support/src/generate_genesis_config.rs | 560 +- substrate/frame/support/src/lib.rs | 12 +- substrate/frame/support/src/traits.rs | 16 +- substrate/frame/support/src/traits/misc.rs | 4 +- substrate/frame/support/src/traits/voting.rs | 48 +- .../support/src/weights/block_weights.rs | 22 +- .../support/src/weights/extrinsic_weights.rs | 22 +- substrate/frame/support/test/Cargo.toml | 24 +- .../support/test/compile_pass/Cargo.toml | 2 +- .../frame/support/test/pallet/Cargo.toml | 4 +- substrate/frame/support/test/tests/pallet.rs | 67 +- .../support/test/tests/runtime_metadata.rs | 49 +- substrate/frame/system/Cargo.toml | 4 +- .../frame/system/benchmarking/Cargo.toml | 4 +- .../system/benchmarking/src/extensions.rs | 26 +- .../frame/system/rpc/runtime-api/Cargo.toml | 2 +- .../system/src/extensions/check_mortality.rs | 11 +- .../src/extensions/check_non_zero_sender.rs | 15 +- .../system/src/extensions/check_nonce.rs | 51 +- .../system/src/extensions/check_weight.rs | 21 +- .../frame/system/src/extensions/weights.rs | 96 +- substrate/frame/system/src/lib.rs | 3 +- substrate/frame/system/src/weights.rs | 150 +- substrate/frame/timestamp/Cargo.toml | 4 +- substrate/frame/timestamp/src/lib.rs | 2 +- substrate/frame/timestamp/src/weights.rs | 20 +- substrate/frame/tips/Cargo.toml | 6 +- substrate/frame/tips/src/weights.rs | 128 +- .../frame/transaction-payment/Cargo.toml | 6 +- .../asset-conversion-tx-payment/Cargo.toml | 8 +- .../src/benchmarking.rs | 6 +- .../asset-conversion-tx-payment/src/lib.rs | 2 - .../asset-conversion-tx-payment/src/tests.rs | 57 +- .../src/weights.rs | 78 +- .../asset-tx-payment/Cargo.toml | 2 +- .../asset-tx-payment/src/benchmarking.rs | 6 +- .../asset-tx-payment/src/lib.rs | 3 +- .../asset-tx-payment/src/tests.rs | 29 +- .../skip-feeless-payment/src/lib.rs | 18 +- .../skip-feeless-payment/src/mock.rs | 1 - .../skip-feeless-payment/src/tests.rs | 30 +- .../transaction-payment/src/benchmarking.rs | 2 +- .../frame/transaction-payment/src/lib.rs | 3 - .../frame/transaction-payment/src/payment.rs | 17 +- .../frame/transaction-payment/src/tests.rs | 116 +- .../frame/transaction-payment/src/weights.rs | 42 +- .../frame/transaction-storage/Cargo.toml | 6 +- .../transaction-storage/src/benchmarking.rs | 51 +- .../frame/transaction-storage/src/weights.rs | 44 +- substrate/frame/treasury/Cargo.toml | 12 +- substrate/frame/treasury/src/benchmarking.rs | 2 +- substrate/frame/treasury/src/lib.rs | 40 +- substrate/frame/treasury/src/weights.rs | 166 +- substrate/frame/tx-pause/Cargo.toml | 12 +- substrate/frame/tx-pause/src/mock.rs | 1 - substrate/frame/tx-pause/src/weights.rs | 20 +- substrate/frame/uniques/Cargo.toml | 4 +- substrate/frame/uniques/src/weights.rs | 348 +- substrate/frame/utility/Cargo.toml | 4 +- substrate/frame/utility/src/weights.rs | 68 +- substrate/frame/verify-signature/Cargo.toml | 4 +- .../verify-signature/src/benchmarking.rs | 24 +- .../frame/verify-signature/src/extension.rs | 3 +- substrate/frame/verify-signature/src/tests.rs | 30 +- .../frame/verify-signature/src/weights.rs | 25 +- substrate/frame/vesting/Cargo.toml | 4 +- substrate/frame/vesting/src/weights.rs | 280 +- substrate/frame/whitelist/Cargo.toml | 2 +- substrate/frame/whitelist/src/weights.rs | 68 +- substrate/primitives/api/Cargo.toml | 18 +- .../primitives/api/proc-macro/Cargo.toml | 10 +- .../api/proc-macro/src/runtime_metadata.rs | 6 +- substrate/primitives/api/test/Cargo.toml | 19 +- .../api/test/tests/decl_and_impl.rs | 2 - .../api/test/tests/runtime_calls.rs | 4 +- .../primitives/application-crypto/Cargo.toml | 2 +- substrate/primitives/arithmetic/Cargo.toml | 4 +- substrate/primitives/blockchain/Cargo.toml | 4 +- .../primitives/consensus/beefy/Cargo.toml | 2 +- .../primitives/consensus/common/Cargo.toml | 2 +- substrate/primitives/core/Cargo.toml | 40 +- substrate/primitives/core/src/lib.rs | 5 +- .../primitives/crypto/ec-utils/Cargo.toml | 14 +- .../crypto/hashing/proc-macro/Cargo.toml | 2 +- substrate/primitives/debug-derive/Cargo.toml | 2 +- .../primitives/genesis-builder/Cargo.toml | 2 +- substrate/primitives/inherents/Cargo.toml | 4 +- substrate/primitives/io/Cargo.toml | 16 +- substrate/primitives/keyring/Cargo.toml | 2 +- substrate/primitives/keyring/src/lib.rs | 9 + .../merkle-mountain-range/Cargo.toml | 2 +- substrate/primitives/metadata-ir/Cargo.toml | 2 +- substrate/primitives/metadata-ir/src/lib.rs | 35 +- substrate/primitives/metadata-ir/src/types.rs | 6 +- .../primitives/metadata-ir/src/unstable.rs | 211 - substrate/primitives/metadata-ir/src/v14.rs | 5 +- substrate/primitives/metadata-ir/src/v15.rs | 2 +- substrate/primitives/panic-handler/src/lib.rs | 4 +- .../primitives/runtime-interface/Cargo.toml | 20 +- .../runtime-interface/proc-macro/Cargo.toml | 2 +- .../runtime-interface/test/Cargo.toml | 4 +- substrate/primitives/runtime/Cargo.toml | 6 +- .../runtime/src/generic/checked_extrinsic.rs | 39 +- .../primitives/runtime/src/generic/digest.rs | 55 +- .../primitives/runtime/src/generic/mod.rs | 4 +- .../src/generic/unchecked_extrinsic.rs | 41 +- .../primitives/runtime/src/traits/mod.rs | 17 +- .../as_transaction_extension.rs | 3 +- .../dispatch_transaction.rs | 41 +- .../src/traits/transaction_extension/mod.rs | 266 +- .../runtime/src/type_with_default.rs | 146 +- substrate/primitives/session/Cargo.toml | 2 +- substrate/primitives/staking/Cargo.toml | 4 +- substrate/primitives/staking/src/offence.rs | 25 - substrate/primitives/state-machine/Cargo.toml | 10 +- .../primitives/state-machine/fuzz/Cargo.toml | 2 +- .../state-machine/src/trie_backend.rs | 20 +- .../primitives/statement-store/Cargo.toml | 14 +- substrate/primitives/timestamp/Cargo.toml | 2 +- substrate/primitives/trie/Cargo.toml | 8 +- substrate/primitives/trie/src/node_codec.rs | 8 - substrate/primitives/trie/src/recorder.rs | 5 +- .../primitives/trie/src/storage_proof.rs | 10 +- substrate/primitives/version/Cargo.toml | 2 +- .../primitives/wasm-interface/Cargo.toml | 2 +- substrate/primitives/weights/Cargo.toml | 2 +- .../ci/node-template-release/Cargo.toml | 2 +- substrate/test-utils/Cargo.toml | 2 +- substrate/test-utils/cli/Cargo.toml | 12 +- substrate/test-utils/client/Cargo.toml | 8 +- substrate/test-utils/client/src/lib.rs | 4 +- substrate/test-utils/runtime/Cargo.toml | 52 +- .../test-utils/runtime/client/src/lib.rs | 2 +- .../runtime/client/src/trait_tests.rs | 38 +- substrate/test-utils/runtime/src/extrinsic.rs | 10 +- .../test-utils/runtime/src/genesismap.rs | 10 +- substrate/test-utils/runtime/src/lib.rs | 39 +- .../runtime/transaction-pool/Cargo.toml | 2 +- .../runtime/transaction-pool/src/lib.rs | 6 +- substrate/utils/binary-merkle-tree/Cargo.toml | 6 +- .../utils/frame/benchmarking-cli/Cargo.toml | 39 +- .../utils/frame/benchmarking-cli/src/lib.rs | 1 + .../benchmarking-cli/src/overhead/command.rs | 28 +- .../benchmarking-cli/src/overhead/mod.rs | 1 + .../src/overhead/remark_builder.rs | 8 +- .../src/overhead/runtime_utilities.rs} | 95 +- .../benchmarking-cli/src/pallet/command.rs | 92 +- .../utils/frame/generate-bags/Cargo.toml | 2 +- .../generate-bags/node-runtime/Cargo.toml | 2 +- substrate/utils/frame/omni-bencher/Cargo.toml | 8 +- .../utils/frame/omni-bencher/src/main.rs | 2 + .../frame/remote-externalities/Cargo.toml | 12 +- .../frame/remote-externalities/src/lib.rs | 219 +- .../frame/remote-externalities/src/logging.rs | 86 - substrate/utils/frame/rpc/client/Cargo.toml | 6 +- substrate/utils/frame/rpc/support/Cargo.toml | 10 +- substrate/utils/frame/rpc/system/Cargo.toml | 8 +- substrate/utils/frame/rpc/system/src/lib.rs | 16 +- substrate/utils/prometheus/Cargo.toml | 2 +- substrate/utils/prometheus/src/lib.rs | 4 +- substrate/utils/wasm-builder/Cargo.toml | 20 +- substrate/utils/wasm-builder/src/builder.rs | 3 +- substrate/utils/wasm-builder/src/lib.rs | 105 +- .../utils/wasm-builder/src/prerequisites.rs | 7 +- .../utils/wasm-builder/src/wasm_project.rs | 28 +- templates/minimal/README.md | 9 +- templates/minimal/node/Cargo.toml | 4 +- templates/minimal/node/src/service.rs | 3 +- templates/minimal/pallets/template/Cargo.toml | 2 +- templates/minimal/runtime/Cargo.toml | 2 +- templates/minimal/runtime/src/lib.rs | 13 +- templates/minimal/zombienet-omni-node.toml | 2 +- templates/parachain/README.md | 40 +- templates/parachain/node/Cargo.toml | 12 +- templates/parachain/node/src/chain_spec.rs | 15 +- templates/parachain/node/src/service.rs | 4 +- templates/parachain/runtime/Cargo.toml | 6 +- .../runtime/src/genesis_config_presets.rs | 21 +- templates/parachain/runtime/src/lib.rs | 1 - templates/solochain/node/Cargo.toml | 30 +- templates/solochain/node/src/service.rs | 3 +- templates/solochain/runtime/Cargo.toml | 8 +- .../runtime/src/genesis_config_presets.rs | 27 +- templates/zombienet/Cargo.toml | 2 +- umbrella/Cargo.toml | 771 +- umbrella/src/lib.rs | 8 +- 1647 files changed, 35868 insertions(+), 64462 deletions(-) delete mode 100644 .github/actions/workflow-stopper/action.yml delete mode 100755 .github/scripts/check-missing-readme-generation.sh delete mode 100755 .github/scripts/release/build-macos-release.sh delete mode 100644 .github/scripts/release/distributions delete mode 100644 .github/workflows/benchmarks-networking.yml delete mode 100644 .github/workflows/publish-check-compile.yml rename .github/workflows/{release-11_rc-automation.yml => release-10_rc-automation.yml} (100%) delete mode 100644 .github/workflows/release-20_build-rc.yml delete mode 100644 .github/workflows/release-31_promote-rc-to-final.yml delete mode 100644 .github/workflows/release-40_publish-deb-package.yml rename .github/workflows/{release-10_branchoff-stable.yml => release-branchoff-stable.yml} (100%) create mode 100644 .github/workflows/release-build-rc.yml delete mode 100644 .github/workflows/release-reusable-promote-to-final.yml rename .github/workflows/{benchmarks-subsystem.yml => subsystem-benchmarks.yml} (100%) delete mode 100644 cumulus/client/consensus/aura/src/collators/slot_based/block_import.rs delete mode 100644 cumulus/client/consensus/aura/src/collators/slot_based/relay_chain_data_cache.rs create mode 100644 cumulus/docs/release.md delete mode 100644 cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/governance.rs create mode 100644 polkadot/node/core/backing/src/tests/prospective_parachains.rs delete mode 100644 polkadot/node/network/collator-protocol/src/validator_side/claim_queue_state.rs create mode 100644 polkadot/runtime/common/src/auctions.rs delete mode 100644 polkadot/runtime/common/src/auctions/benchmarking.rs delete mode 100644 polkadot/runtime/common/src/auctions/mock.rs delete mode 100644 polkadot/runtime/common/src/auctions/mod.rs delete mode 100644 polkadot/runtime/common/src/auctions/tests.rs create mode 100644 polkadot/runtime/common/src/claims.rs delete mode 100644 polkadot/runtime/common/src/claims/benchmarking.rs delete mode 100644 polkadot/runtime/common/src/claims/mock.rs delete mode 100644 polkadot/runtime/common/src/claims/mod.rs delete mode 100644 polkadot/runtime/common/src/claims/tests.rs delete mode 100644 polkadot/runtime/common/src/paras_registrar/benchmarking.rs delete mode 100644 polkadot/runtime/common/src/paras_registrar/mock.rs delete mode 100644 polkadot/runtime/common/src/paras_registrar/tests.rs create mode 100644 polkadot/runtime/common/src/purchase.rs delete mode 100644 polkadot/runtime/common/src/purchase/mock.rs delete mode 100644 polkadot/runtime/common/src/purchase/mod.rs delete mode 100644 polkadot/runtime/common/src/purchase/tests.rs rename polkadot/xcm/procedural/{src/enum_variants.rs => tests/ui/builder_pattern/loads_holding_no_operands.rs} (51%) create mode 100644 polkadot/xcm/procedural/tests/ui/builder_pattern/loads_holding_no_operands.stderr delete mode 100644 polkadot/xcm/procedural/tests/ui/builder_pattern/unexpected_attribute.stderr rename polkadot/xcm/procedural/tests/{enum_variants.rs => ui/builder_pattern/unpaid_execution_named_fields.rs} (70%) create mode 100644 polkadot/xcm/procedural/tests/ui/builder_pattern/unpaid_execution_named_fields.stderr delete mode 100644 polkadot/xcm/xcm-executor/src/tests/execute_with_origin.rs delete mode 100644 polkadot/zombienet-sdk-tests/tests/elastic_scaling/helpers.rs delete mode 100644 polkadot/zombienet-sdk-tests/tests/elastic_scaling/mod.rs delete mode 100644 polkadot/zombienet-sdk-tests/tests/elastic_scaling/slot_based_3cores.rs delete mode 100644 polkadot/zombienet_tests/functional/0019-coretime-collation-fetching-fairness.toml delete mode 100644 polkadot/zombienet_tests/functional/0019-coretime-collation-fetching-fairness.zndsl delete mode 100644 polkadot/zombienet_tests/functional/0019-verify-included-events.js rename prdoc/{stable2412 => }/pr_3151.prdoc (100%) rename prdoc/{stable2412 => }/pr_3685.prdoc (100%) rename prdoc/{stable2412 => }/pr_3881.prdoc (100%) rename prdoc/{stable2412 => }/pr_3970.prdoc (100%) rename prdoc/{stable2412 => }/pr_4012.prdoc (100%) rename prdoc/{stable2412 => }/pr_4251.prdoc (100%) rename prdoc/{stable2412 => }/pr_4257.prdoc (100%) delete mode 100644 prdoc/pr_4273.prdoc rename prdoc/{stable2412 => }/pr_4639.prdoc (100%) rename prdoc/{stable2412 => }/pr_4826.prdoc (100%) rename prdoc/{stable2412 => }/pr_4837.prdoc (100%) rename prdoc/{stable2412 => }/pr_4846.prdoc (100%) rename prdoc/{stable2412 => }/pr_4849.prdoc (100%) rename prdoc/{stable2412 => }/pr_4851.prdoc (100%) delete mode 100644 prdoc/pr_4880.prdoc rename prdoc/{stable2412 => }/pr_4889.prdoc (100%) rename prdoc/{stable2412 => }/pr_4974.prdoc (100%) rename prdoc/{stable2412 => }/pr_4982.prdoc (100%) rename prdoc/{stable2412 => }/pr_5038.prdoc (100%) rename prdoc/{stable2412 => }/pr_5194.prdoc (100%) rename prdoc/{stable2412 => }/pr_5198.prdoc (100%) rename prdoc/{stable2412 => }/pr_5201.prdoc (100%) rename prdoc/{stable2412 => }/pr_5274.prdoc (100%) rename prdoc/{stable2412 => }/pr_5322.prdoc (100%) rename prdoc/{stable2412 => }/pr_5343.prdoc (100%) delete mode 100644 prdoc/pr_5363.prdoc rename prdoc/{stable2412 => }/pr_5372.prdoc (100%) rename prdoc/{stable2412 => }/pr_5390.prdoc (100%) rename prdoc/{stable2412 => }/pr_5420.prdoc (100%) rename prdoc/{stable2412 => }/pr_5423.prdoc (100%) rename prdoc/{stable2412 => }/pr_5435.prdoc (100%) rename prdoc/{stable2412 => }/pr_5461.prdoc (100%) rename prdoc/{stable2412 => }/pr_5469.prdoc (100%) rename prdoc/{stable2412 => }/pr_5502.prdoc (100%) rename prdoc/{stable2412 => }/pr_5515.prdoc (100%) rename prdoc/{stable2412 => }/pr_5521.prdoc (100%) rename prdoc/{stable2412 => }/pr_5526.prdoc (100%) rename prdoc/{stable2412 => }/pr_5540.prdoc (100%) rename prdoc/{stable2412 => }/pr_5548.prdoc (100%) rename prdoc/{stable2412 => }/pr_5554.prdoc (100%) rename prdoc/{stable2412 => }/pr_5555.prdoc (100%) rename prdoc/{stable2412 => }/pr_5556.prdoc (100%) rename prdoc/{stable2412 => }/pr_5572.prdoc (100%) rename prdoc/{stable2412 => }/pr_5585.prdoc (100%) rename prdoc/{stable2412 => }/pr_5592.prdoc (100%) rename prdoc/{stable2412 => }/pr_5601.prdoc (100%) rename prdoc/{stable2412 => }/pr_5606.prdoc (100%) rename prdoc/{stable2412 => }/pr_5608.prdoc (100%) rename prdoc/{stable2412 => }/pr_5609.prdoc (100%) rename prdoc/{stable2412 => }/pr_5616.prdoc (100%) rename prdoc/{stable2412 => }/pr_5623.prdoc (100%) rename prdoc/{stable2412 => }/pr_5630.prdoc (100%) rename prdoc/{stable2412 => }/pr_5635.prdoc (100%) rename prdoc/{stable2412 => }/pr_5640.prdoc (100%) delete mode 100644 prdoc/pr_5656.prdoc rename prdoc/{stable2412 => }/pr_5664.prdoc (100%) rename prdoc/{stable2412 => }/pr_5665.prdoc (100%) rename prdoc/{stable2412 => }/pr_5666.prdoc (100%) rename prdoc/{stable2412 => }/pr_5675.prdoc (100%) rename prdoc/{stable2412 => }/pr_5676.prdoc (100%) rename prdoc/{stable2412 => }/pr_5679.prdoc (100%) rename prdoc/{stable2412 => }/pr_5682.prdoc (100%) rename prdoc/{stable2412 => }/pr_5684.prdoc (100%) rename prdoc/{stable2412 => }/pr_5686.prdoc (100%) rename prdoc/{stable2412 => }/pr_5687.prdoc (100%) rename prdoc/{stable2412 => }/pr_5693.prdoc (100%) rename prdoc/{stable2412 => }/pr_5701.prdoc (100%) delete mode 100644 prdoc/pr_5703.prdoc rename prdoc/{stable2412 => }/pr_5707.prdoc (100%) rename prdoc/{stable2412 => }/pr_5716.prdoc (100%) delete mode 100644 prdoc/pr_5723.prdoc delete mode 100644 prdoc/pr_5724.prdoc rename prdoc/{stable2412 => }/pr_5726.prdoc (100%) rename prdoc/{stable2412 => }/pr_5737.prdoc (100%) rename prdoc/{stable2412 => }/pr_5741.prdoc (100%) rename prdoc/{stable2412 => }/pr_5743.prdoc (100%) rename prdoc/{stable2412 => }/pr_5745.prdoc (100%) rename prdoc/{stable2412 => }/pr_5756.prdoc (100%) rename prdoc/{stable2412 => }/pr_5762.prdoc (100%) rename prdoc/{stable2412 => }/pr_5765.prdoc (100%) rename prdoc/{stable2412 => }/pr_5768.prdoc (100%) rename prdoc/{stable2412 => }/pr_5774.prdoc (100%) rename prdoc/{stable2412 => }/pr_5779.prdoc (100%) rename prdoc/{stable2412 => }/pr_5787.prdoc (100%) rename prdoc/{stable2412 => }/pr_5789.prdoc (100%) rename prdoc/{stable2412 => }/pr_5796.prdoc (100%) rename prdoc/{stable2412 => }/pr_5804.prdoc (100%) rename prdoc/{stable2412 => }/pr_5807.prdoc (100%) rename prdoc/{stable2412 => }/pr_5811.prdoc (100%) rename prdoc/{stable2412 => }/pr_5813.prdoc (100%) rename prdoc/{stable2412 => }/pr_5824.prdoc (100%) rename prdoc/{stable2412 => }/pr_5830.prdoc (100%) rename prdoc/{stable2412 => }/pr_5838.prdoc (100%) rename prdoc/{stable2412 => }/pr_5839.prdoc (100%) delete mode 100644 prdoc/pr_5842.prdoc rename prdoc/{stable2412 => }/pr_5845.prdoc (100%) rename prdoc/{stable2412 => }/pr_5847.prdoc (100%) delete mode 100644 prdoc/pr_5855.prdoc rename prdoc/{stable2412 => }/pr_5856.prdoc (100%) rename prdoc/{stable2412 => }/pr_5857.prdoc (100%) rename prdoc/{stable2412 => }/pr_5859.prdoc (100%) rename prdoc/{stable2412 => }/pr_5861.prdoc (100%) rename prdoc/{stable2412 => }/pr_5866.prdoc (100%) rename prdoc/{stable2412 => }/pr_5872.prdoc (100%) rename prdoc/{stable2412 => }/pr_5875.prdoc (100%) rename prdoc/{stable2412 => }/pr_5876.prdoc (100%) rename prdoc/{stable2412 => }/pr_5880.prdoc (100%) rename prdoc/{stable2412 => }/pr_5883.prdoc (100%) rename prdoc/{stable2412 => }/pr_5886.prdoc (100%) rename prdoc/{stable2412 => }/pr_5888.prdoc (100%) rename prdoc/{stable2412 => }/pr_5891.prdoc (100%) rename prdoc/{stable2412 => }/pr_5892.prdoc (100%) delete mode 100644 prdoc/pr_5899.prdoc rename prdoc/{stable2412 => }/pr_5901.prdoc (100%) rename prdoc/{stable2412 => }/pr_5908.prdoc (100%) rename prdoc/{stable2412 => }/pr_5911.prdoc (100%) rename prdoc/{stable2412 => }/pr_5915.prdoc (100%) rename prdoc/{stable2412 => }/pr_5917.prdoc (100%) rename prdoc/{stable2412 => }/pr_5919.prdoc (100%) rename prdoc/{stable2412 => }/pr_5924.prdoc (100%) rename prdoc/{stable2412 => }/pr_5939.prdoc (100%) rename prdoc/{stable2412 => }/pr_5941.prdoc (100%) rename prdoc/{stable2412 => }/pr_5946.prdoc (100%) rename prdoc/{stable2412 => }/pr_5954.prdoc (100%) rename prdoc/{stable2412 => }/pr_5961.prdoc (100%) rename prdoc/{stable2412 => }/pr_5971.prdoc (100%) rename prdoc/{stable2412 => }/pr_5984.prdoc (100%) rename prdoc/{stable2412 => }/pr_5994.prdoc (100%) rename prdoc/{stable2412 => }/pr_5995.prdoc (100%) rename prdoc/{stable2412 => }/pr_5998.prdoc (100%) rename prdoc/{stable2412 => }/pr_5999.prdoc (100%) rename prdoc/{stable2412 => }/pr_6011.prdoc (100%) rename prdoc/{stable2412 => }/pr_6015.prdoc (100%) rename prdoc/{stable2412 => }/pr_6016.prdoc (100%) rename prdoc/{stable2412 => }/pr_6022.prdoc (100%) rename prdoc/{stable2412 => }/pr_6023.prdoc (100%) rename prdoc/{stable2412 => }/pr_6025.prdoc (100%) rename prdoc/{stable2412 => }/pr_6027.prdoc (100%) rename prdoc/{stable2412 => }/pr_6032.prdoc (100%) rename prdoc/{stable2412 => }/pr_6039.prdoc (100%) rename prdoc/{stable2412 => }/pr_6045.prdoc (100%) rename prdoc/{stable2412 => }/pr_6058.prdoc (100%) rename prdoc/{stable2412 => }/pr_6061.prdoc (100%) rename prdoc/{stable2412 => }/pr_6073.prdoc (100%) rename prdoc/{stable2412 => }/pr_6077.prdoc (100%) rename prdoc/{stable2412 => }/pr_6080.prdoc (100%) rename prdoc/{stable2412 => }/pr_6087.prdoc (100%) rename prdoc/{stable2412 => }/pr_6088.prdoc (100%) rename prdoc/{stable2412 => }/pr_6094.prdoc (100%) rename prdoc/{stable2412 => }/pr_6096.prdoc (100%) rename prdoc/{stable2412 => }/pr_6104.prdoc (100%) rename prdoc/{stable2412 => }/pr_6105.prdoc (100%) delete mode 100644 prdoc/pr_6111.prdoc rename prdoc/{stable2412 => }/pr_6129.prdoc (100%) rename prdoc/{stable2412 => }/pr_6141.prdoc (100%) rename prdoc/{stable2412 => }/pr_6147.prdoc (100%) rename prdoc/{stable2412 => }/pr_6148.prdoc (100%) rename prdoc/{stable2412 => }/pr_6156.prdoc (100%) rename prdoc/{stable2412 => }/pr_6169.prdoc (100%) rename prdoc/{stable2412 => }/pr_6171.prdoc (100%) rename prdoc/{stable2412 => }/pr_6174.prdoc (100%) delete mode 100644 prdoc/pr_6184.prdoc rename prdoc/{stable2412 => }/pr_6187.prdoc (100%) rename prdoc/{stable2412 => }/pr_6192.prdoc (100%) rename prdoc/{stable2412 => }/pr_6205.prdoc (100%) rename prdoc/{stable2412 => }/pr_6212.prdoc (100%) rename prdoc/{stable2412 => }/pr_6214.prdoc (100%) delete mode 100644 prdoc/pr_6215.prdoc rename prdoc/{stable2412 => }/pr_6217.prdoc (100%) rename prdoc/{stable2412 => }/pr_6218.prdoc (100%) delete mode 100644 prdoc/pr_6220.prdoc rename prdoc/{stable2412 => }/pr_6221.prdoc (100%) rename prdoc/{stable2412 => }/pr_6228.prdoc (100%) rename prdoc/{stable2412 => }/pr_6246.prdoc (100%) delete mode 100644 prdoc/pr_6248.prdoc delete mode 100644 prdoc/pr_6249.prdoc rename prdoc/{stable2412 => }/pr_6255.prdoc (100%) rename prdoc/{stable2412 => }/pr_6257.prdoc (100%) rename prdoc/{stable2412 => }/pr_6260.prdoc (100%) rename prdoc/{stable2412 => }/pr_6261.prdoc (100%) delete mode 100644 prdoc/pr_6262.prdoc rename prdoc/{stable2412 => }/pr_6263.prdoc (100%) rename prdoc/{stable2412 => }/pr_6264.prdoc (100%) rename prdoc/{stable2412 => }/pr_6268.prdoc (100%) rename prdoc/{stable2412 => }/pr_6278.prdoc (100%) delete mode 100644 prdoc/pr_6284.prdoc rename prdoc/{stable2412 => }/pr_6288.prdoc (100%) delete mode 100644 prdoc/pr_6290.prdoc rename prdoc/{stable2412 => }/pr_6291.prdoc (100%) rename prdoc/{stable2412 => }/pr_6295.prdoc (100%) rename prdoc/{stable2412 => }/pr_6296.prdoc (100%) rename prdoc/{stable2412 => }/pr_6298.prdoc (100%) rename prdoc/{stable2412 => }/pr_6299.prdoc (100%) delete mode 100644 prdoc/pr_6301.prdoc delete mode 100644 prdoc/pr_6302.prdoc rename prdoc/{stable2412 => }/pr_6305.prdoc (100%) delete mode 100644 prdoc/pr_6310.prdoc delete mode 100644 prdoc/pr_6311.prdoc rename prdoc/{stable2412 => }/pr_6314.prdoc (100%) rename prdoc/{stable2412 => }/pr_6315.prdoc (100%) rename prdoc/{stable2412 => }/pr_6316.prdoc (100%) rename prdoc/{stable2412 => }/pr_6317.prdoc (100%) rename prdoc/{stable2412 => }/pr_6318.prdoc (100%) rename prdoc/{stable2412 => }/pr_6337.prdoc (100%) delete mode 100644 prdoc/pr_6349.prdoc rename prdoc/{stable2412 => }/pr_6353.prdoc (100%) rename prdoc/{stable2412 => }/pr_6357.prdoc (100%) rename prdoc/{stable2412 => }/pr_6360.prdoc (100%) rename prdoc/{stable2412 => }/pr_6365.prdoc (100%) delete mode 100644 prdoc/pr_6367.prdoc delete mode 100644 prdoc/pr_6368.prdoc rename prdoc/{stable2412 => }/pr_6373.prdoc (100%) rename prdoc/{stable2412 => }/pr_6380.prdoc (100%) rename prdoc/{stable2412 => }/pr_6382.prdoc (100%) rename prdoc/{stable2412 => }/pr_6384.prdoc (100%) delete mode 100644 prdoc/pr_6393.prdoc delete mode 100644 prdoc/pr_6400.prdoc delete mode 100644 prdoc/pr_6405.prdoc rename prdoc/{stable2412 => }/pr_6406.prdoc (100%) delete mode 100644 prdoc/pr_6411.prdoc delete mode 100644 prdoc/pr_6417.prdoc delete mode 100644 prdoc/pr_6419.prdoc delete mode 100644 prdoc/pr_6425.prdoc delete mode 100644 prdoc/pr_6435.prdoc delete mode 100644 prdoc/pr_6439.prdoc delete mode 100644 prdoc/pr_6440.prdoc delete mode 100644 prdoc/pr_6446.prdoc delete mode 100644 prdoc/pr_6450.prdoc delete mode 100644 prdoc/pr_6452.prdoc delete mode 100644 prdoc/pr_6453.prdoc delete mode 100644 prdoc/pr_6455.prdoc delete mode 100644 prdoc/pr_6459.prdoc delete mode 100644 prdoc/pr_6460.prdoc delete mode 100644 prdoc/pr_6461.prdoc delete mode 100644 prdoc/pr_6463.prdoc delete mode 100644 prdoc/pr_6466.prdoc delete mode 100644 prdoc/pr_6481.prdoc delete mode 100644 prdoc/pr_6486.prdoc delete mode 100644 prdoc/pr_6502.prdoc delete mode 100644 prdoc/pr_6503.prdoc delete mode 100644 prdoc/pr_6506.prdoc delete mode 100644 prdoc/pr_6509.prdoc delete mode 100644 prdoc/pr_6521.prdoc delete mode 100644 prdoc/pr_6522.prdoc delete mode 100644 prdoc/pr_6526.prdoc delete mode 100644 prdoc/pr_6528.prdoc delete mode 100644 prdoc/pr_6533.prdoc delete mode 100644 prdoc/pr_6534.prdoc delete mode 100644 prdoc/pr_6540.prdoc delete mode 100644 prdoc/pr_6544.prdoc delete mode 100644 prdoc/pr_6546.prdoc delete mode 100644 prdoc/pr_6549.prdoc delete mode 100644 prdoc/pr_6553.prdoc delete mode 100644 prdoc/pr_6561.prdoc delete mode 100644 prdoc/pr_6562.prdoc delete mode 100644 prdoc/pr_6565.prdoc delete mode 100644 prdoc/pr_6583.prdoc delete mode 100644 prdoc/pr_6604.prdoc delete mode 100644 prdoc/pr_6605.prdoc delete mode 100644 prdoc/pr_6608.prdoc delete mode 100644 prdoc/pr_6624.prdoc delete mode 100644 prdoc/pr_6628.prdoc delete mode 100644 prdoc/pr_6636.prdoc delete mode 100644 prdoc/pr_6665.prdoc delete mode 100644 prdoc/pr_6673.prdoc delete mode 100644 prdoc/pr_6681.prdoc delete mode 100644 prdoc/pr_6695.prdoc delete mode 100644 prdoc/pr_6703.prdoc delete mode 100644 prdoc/pr_6711.prdoc delete mode 100644 prdoc/pr_6728.prdoc delete mode 100644 prdoc/pr_6741.prdoc delete mode 100644 prdoc/pr_6743.prdoc delete mode 100644 prdoc/pr_6759.prdoc delete mode 100644 prdoc/pr_6768.prdoc delete mode 100644 prdoc/pr_6792.prdoc delete mode 100644 prdoc/pr_6796.prdoc delete mode 100644 prdoc/pr_6832.prdoc delete mode 100644 prdoc/pr_6835.prdoc delete mode 100644 prdoc/pr_6844.prdoc delete mode 100644 prdoc/pr_6857.prdoc delete mode 100644 prdoc/pr_6865.prdoc delete mode 100644 prdoc/pr_6866.prdoc delete mode 100644 prdoc/pr_6880.prdoc delete mode 100644 prdoc/pr_6889.prdoc delete mode 100644 prdoc/pr_6896.prdoc delete mode 100644 prdoc/pr_6908.prdoc delete mode 100644 prdoc/pr_6917.prdoc delete mode 100644 prdoc/pr_6920.prdoc delete mode 100644 prdoc/pr_6923.prdoc delete mode 100644 prdoc/pr_6926.prdoc delete mode 100644 prdoc/pr_6928.prdoc delete mode 100644 prdoc/pr_6937.prdoc delete mode 100644 prdoc/pr_6954.prdoc delete mode 100644 prdoc/pr_6963.prdoc delete mode 100644 prdoc/pr_6964.prdoc delete mode 100644 prdoc/pr_6979.prdoc delete mode 100644 prdoc/pr_6981.prdoc delete mode 100644 prdoc/pr_6986.prdoc delete mode 100644 prdoc/pr_6989.prdoc delete mode 100644 prdoc/pr_7005.prdoc delete mode 100644 prdoc/pr_7011.prdoc delete mode 100644 prdoc/pr_7013.prdoc delete mode 100644 prdoc/pr_7020.prdoc delete mode 100644 prdoc/pr_7021.prdoc delete mode 100644 prdoc/pr_7028.prdoc delete mode 100644 prdoc/stable2412/pr_4834.prdoc delete mode 100644 prdoc/stable2412/pr_5311.prdoc delete mode 100644 prdoc/stable2412/pr_5732.prdoc delete mode 100644 prdoc/stable2412/pr_5997.prdoc delete mode 100644 prdoc/stable2412/pr_6304.prdoc delete mode 100644 prdoc/stable2412/pr_6323.prdoc delete mode 100644 prdoc/stable2412/pr_6418.prdoc delete mode 100644 prdoc/stable2412/pr_6454.prdoc delete mode 100644 prdoc/stable2412/pr_6484.prdoc delete mode 100644 prdoc/stable2412/pr_6505.prdoc delete mode 100644 prdoc/stable2412/pr_6536.prdoc delete mode 100644 prdoc/stable2412/pr_6566.prdoc delete mode 100644 prdoc/stable2412/pr_6588.prdoc delete mode 100644 prdoc/stable2412/pr_6603.prdoc delete mode 100644 prdoc/stable2412/pr_6643.prdoc delete mode 100644 prdoc/stable2412/pr_6645.prdoc delete mode 100644 prdoc/stable2412/pr_6646.prdoc delete mode 100644 prdoc/stable2412/pr_6652.prdoc delete mode 100644 prdoc/stable2412/pr_6677.prdoc delete mode 100644 prdoc/stable2412/pr_6690.prdoc delete mode 100644 prdoc/stable2412/pr_6696.prdoc delete mode 100644 prdoc/stable2412/pr_6729.prdoc delete mode 100644 prdoc/stable2412/pr_6742.prdoc delete mode 100644 prdoc/stable2412/pr_6760.prdoc delete mode 100644 prdoc/stable2412/pr_6781.prdoc delete mode 100644 prdoc/stable2412/pr_6814.prdoc delete mode 100644 prdoc/stable2412/pr_6860.prdoc delete mode 100644 prdoc/stable2412/pr_6863.prdoc delete mode 100644 prdoc/stable2412/pr_6864.prdoc delete mode 100644 prdoc/stable2412/pr_6885.prdoc create mode 100644 substrate/.config/nextest.toml delete mode 100644 substrate/client/network/types/src/kad.rs delete mode 100644 substrate/client/runtime-utilities/Cargo.toml delete mode 100644 substrate/client/runtime-utilities/src/error.rs delete mode 100644 substrate/client/transaction-pool/tests/fatp_prios.rs rename substrate/frame/revive/fixtures/build/{_Cargo.toml => Cargo.toml} (62%) delete mode 100644 substrate/frame/revive/fixtures/build/_rust-toolchain.toml delete mode 100644 substrate/frame/revive/fixtures/contracts/base_fee.rs delete mode 100644 substrate/frame/revive/fixtures/contracts/call_data_copy.rs delete mode 100644 substrate/frame/revive/fixtures/contracts/call_data_load.rs delete mode 100644 substrate/frame/revive/fixtures/contracts/call_data_size.rs delete mode 100644 substrate/frame/revive/fixtures/contracts/delegate_call_deposit_limit.rs delete mode 100644 substrate/frame/revive/fixtures/contracts/gas_limit.rs delete mode 100644 substrate/frame/revive/fixtures/contracts/gas_price.rs delete mode 100644 substrate/frame/revive/fixtures/contracts/ref_time_left.rs delete mode 100644 substrate/frame/revive/fixtures/contracts/unknown_syscall.rs delete mode 100644 substrate/frame/revive/fixtures/contracts/unstable_interface.rs create mode 100644 substrate/frame/revive/fixtures/riscv32emac-unknown-none-polkavm.json delete mode 100755 substrate/frame/revive/rpc/examples/bun.lockb delete mode 100644 substrate/frame/revive/rpc/examples/js/.prettierrc.json delete mode 100644 substrate/frame/revive/rpc/examples/js/abi/Errors.json delete mode 100644 substrate/frame/revive/rpc/examples/js/abi/Errors.ts delete mode 100644 substrate/frame/revive/rpc/examples/js/abi/EventExample.json delete mode 100644 substrate/frame/revive/rpc/examples/js/abi/EventExample.ts delete mode 100644 substrate/frame/revive/rpc/examples/js/abi/Flipper.json delete mode 100644 substrate/frame/revive/rpc/examples/js/abi/Flipper.ts delete mode 100644 substrate/frame/revive/rpc/examples/js/abi/FlipperCaller.json delete mode 100644 substrate/frame/revive/rpc/examples/js/abi/FlipperCaller.ts delete mode 100644 substrate/frame/revive/rpc/examples/js/abi/PiggyBank.json delete mode 100644 substrate/frame/revive/rpc/examples/js/abi/RevertExample.ts delete mode 100644 substrate/frame/revive/rpc/examples/js/abi/piggyBank.ts delete mode 100644 substrate/frame/revive/rpc/examples/js/contracts/.solhint.json delete mode 100644 substrate/frame/revive/rpc/examples/js/contracts/Errors.sol delete mode 100644 substrate/frame/revive/rpc/examples/js/contracts/Event.sol delete mode 100644 substrate/frame/revive/rpc/examples/js/contracts/Flipper.sol delete mode 100644 substrate/frame/revive/rpc/examples/js/contracts/PiggyBank.sol delete mode 100644 substrate/frame/revive/rpc/examples/js/evm/.gitkeep delete mode 100644 substrate/frame/revive/rpc/examples/js/package-lock.json delete mode 100644 substrate/frame/revive/rpc/examples/js/pvm/Errors.polkavm delete mode 100644 substrate/frame/revive/rpc/examples/js/pvm/EventExample.polkavm delete mode 100644 substrate/frame/revive/rpc/examples/js/pvm/Flipper.polkavm delete mode 100644 substrate/frame/revive/rpc/examples/js/pvm/FlipperCaller.polkavm delete mode 100644 substrate/frame/revive/rpc/examples/js/pvm/PiggyBank.polkavm delete mode 100644 substrate/frame/revive/rpc/examples/js/src/balance.ts delete mode 100644 substrate/frame/revive/rpc/examples/js/src/build-contracts.ts delete mode 100644 substrate/frame/revive/rpc/examples/js/src/event.ts delete mode 100644 substrate/frame/revive/rpc/examples/js/src/geth-diff-setup.ts delete mode 100644 substrate/frame/revive/rpc/examples/js/src/geth-diff.test.ts delete mode 100644 substrate/frame/revive/rpc/examples/js/src/lib.ts create mode 100644 substrate/frame/revive/rpc/examples/js/src/main.ts delete mode 100644 substrate/frame/revive/rpc/examples/js/src/piggy-bank.ts create mode 100644 substrate/frame/revive/rpc/examples/js/src/script.ts delete mode 100644 substrate/frame/revive/rpc/examples/js/src/solc.d.ts delete mode 100644 substrate/frame/revive/rpc/examples/js/src/transfer.ts delete mode 100644 substrate/frame/revive/rpc/examples/js/src/web.ts rename substrate/frame/revive/uapi/src/host/{riscv64.rs => riscv32.rs} (72%) delete mode 100644 substrate/primitives/metadata-ir/src/unstable.rs rename substrate/{client/runtime-utilities/src/lib.rs => utils/frame/benchmarking-cli/src/overhead/runtime_utilities.rs} (56%) delete mode 100644 substrate/utils/frame/remote-externalities/src/logging.rs diff --git a/.cargo/config.toml b/.cargo/config.toml index 68a0d7b552dc..1b8ffe1a1c82 100644 --- a/.cargo/config.toml +++ b/.cargo/config.toml @@ -9,7 +9,3 @@ rustdocflags = [ CC_x86_64_unknown_linux_musl = { value = ".cargo/musl-gcc", force = true, relative = true } CXX_x86_64_unknown_linux_musl = { value = ".cargo/musl-g++", force = true, relative = true } CARGO_WORKSPACE_ROOT_DIR = { value = "", relative = true } - -[net] -retry = 5 -# git-fetch-with-cli = true # commented because there is a risk that a runner can be banned by github diff --git a/.config/lychee.toml b/.config/lychee.toml index 58f8d068d9d1..b1f08de33340 100644 --- a/.config/lychee.toml +++ b/.config/lychee.toml @@ -28,7 +28,7 @@ exclude = [ "http://visitme/", "https://visitme/", - # TODO meta issue: + # TODO "https://docs.substrate.io/main-docs/build/custom-rpc/#public-rpcs", "https://docs.substrate.io/rustdocs/latest/sp_api/macro.decl_runtime_apis.html", "https://github.com/ipfs/js-ipfs-bitswap/blob/", @@ -50,10 +50,8 @@ exclude = [ "https://w3f.github.io/parachain-implementers-guide/runtime/session_info.html", # Behind a captcha (code 403): - "https://chainlist.org/chain/*", "https://iohk.io/en/blog/posts/2023/11/03/partner-chains-are-coming-to-cardano/", "https://www.reddit.com/r/rust/comments/3spfh1/does_collect_allocate_more_than_once_while/", - # 403 rate limited: "https://etherscan.io/block/11090290", "https://subscan.io/", diff --git a/.config/nextest.toml b/.config/nextest.toml index b4bdec4aea92..1e18f8b5589c 100644 --- a/.config/nextest.toml +++ b/.config/nextest.toml @@ -21,6 +21,7 @@ retries = 5 # The number of threads to run tests with. Supported values are either an integer or # the string "num-cpus". Can be overridden through the `--test-threads` option. # test-threads = "num-cpus" + test-threads = 20 # The number of threads required for each test. This is generally used in overrides to @@ -123,10 +124,3 @@ serial-integration = { max-threads = 1 } [[profile.default.overrides]] filter = 'test(/(^ui$|_ui|ui_)/)' test-group = 'serial-integration' - -# Running eth-rpc tests sequentially -# These tests rely on a shared resource (the RPC and Node) -# and would cause race conditions due to transaction nonces if run in parallel. -[[profile.default.overrides]] -filter = 'package(pallet-revive-eth-rpc) and test(/^tests::/)' -test-group = 'serial-integration' diff --git a/.config/taplo.toml b/.config/taplo.toml index 4b8afc74a52e..7cbc1b075125 100644 --- a/.config/taplo.toml +++ b/.config/taplo.toml @@ -40,10 +40,3 @@ keys = ["workspace.dependencies"] [rule.formatting] reorder_keys = true - -[[rule]] -include = ["**/Cargo.toml"] -keys = ["build-dependencies", "dependencies", "dev-dependencies"] - -[rule.formatting] -reorder_keys = true diff --git a/.github/actions/workflow-stopper/action.yml b/.github/actions/workflow-stopper/action.yml deleted file mode 100644 index 0bd9382fdb30..000000000000 --- a/.github/actions/workflow-stopper/action.yml +++ /dev/null @@ -1,28 +0,0 @@ -name: "stop all workflows" -description: "Action stops all workflows in a PR to save compute resources." -inputs: - app-id: - description: "App id" - required: true - app-key: - description: "App token" - required: true -runs: - using: "composite" - steps: - - name: Worfklow stopper - Generate token - uses: actions/create-github-app-token@v1 - id: app-token - with: - app-id: ${{ inputs.app-id }} - private-key: ${{ inputs.app-key }} - owner: "paritytech" - repositories: "workflow-stopper" - - name: Workflow stopper - Stop all workflows - uses: octokit/request-action@v2.x - with: - route: POST /repos/paritytech/workflow-stopper/actions/workflows/stopper.yml/dispatches - ref: main - inputs: '${{ format(''{{ "github_sha": "{0}", "github_repository": "{1}", "github_ref_name": "{2}", "github_workflow_id": "{3}", "github_job_name": "{4}" }}'', github.event.pull_request.head.sha, github.repository, github.ref_name, github.run_id, github.job) }}' - env: - GITHUB_TOKEN: ${{ steps.app-token.outputs.token }} diff --git a/.github/env b/.github/env index 730c37f1db80..bb61e1f4cd99 100644 --- a/.github/env +++ b/.github/env @@ -1 +1 @@ -IMAGE="docker.io/paritytech/ci-unified:bullseye-1.81.0-2024-11-19-v202411281558" +IMAGE="docker.io/paritytech/ci-unified:bullseye-1.81.0-2024-09-11-v202409111034" diff --git a/.github/scripts/check-missing-readme-generation.sh b/.github/scripts/check-missing-readme-generation.sh deleted file mode 100755 index 13f2b6a7cb28..000000000000 --- a/.github/scripts/check-missing-readme-generation.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/bin/bash -echo "Running script relative to `pwd`" -# Find all README.docify.md files -DOCIFY_FILES=$(find . -name "README.docify.md") - -# Initialize a variable to track directories needing README regeneration -NEED_REGENERATION="" - -for file in $DOCIFY_FILES; do - echo "Processing $file" - - # Get the directory containing the docify file - DIR=$(dirname "$file") - - # Go to the directory and run cargo build - cd "$DIR" - cargo check --features generate-readme || { echo "Readme generation for $DIR failed. Ensure the crate compiles successfully and has a `generate-readme` feature which guards markdown compilation in the crate as follows: https://docs.rs/docify/latest/docify/macro.compile_markdown.html#conventions." && exit 1; } - - # Check if README.md has any uncommitted changes - git diff --exit-code README.md - - if [ $? -ne 0 ]; then - echo "Error: Found uncommitted changes in $DIR/README.md" - NEED_REGENERATION="$NEED_REGENERATION $DIR" - fi - - # Return to the original directory - cd - > /dev/null -done - -# Check if any directories need README regeneration -if [ -n "$NEED_REGENERATION" ]; then - echo "The following directories need README regeneration:" - echo "$NEED_REGENERATION" - exit 1 -fi \ No newline at end of file diff --git a/.github/scripts/cmd/cmd.py b/.github/scripts/cmd/cmd.py index 2c017b7d0c3e..9da05cac17b9 100755 --- a/.github/scripts/cmd/cmd.py +++ b/.github/scripts/cmd/cmd.py @@ -58,7 +58,7 @@ def setup_logging(): %(prog)s --runtime westend rococo --pallet pallet_balances pallet_multisig --quiet --clean ''' -parser_bench = subparsers.add_parser('bench', help='Runs benchmarks (old CLI)', epilog=bench_example, formatter_class=argparse.RawDescriptionHelpFormatter) +parser_bench = subparsers.add_parser('bench', help='Runs benchmarks', epilog=bench_example, formatter_class=argparse.RawDescriptionHelpFormatter) for arg, config in common_args.items(): parser_bench.add_argument(arg, **config) @@ -67,35 +67,6 @@ def setup_logging(): parser_bench.add_argument('--pallet', help='Pallet(s) space separated', nargs='*', default=[]) parser_bench.add_argument('--fail-fast', help='Fail fast on first failed benchmark', action='store_true') - -""" -BENCH OMNI -""" - -bench_example = '''**Examples**: - Runs all benchmarks - %(prog)s - - Runs benchmarks for pallet_balances and pallet_multisig for all runtimes which have these pallets. **--quiet** makes it to output nothing to PR but reactions - %(prog)s --pallet pallet_balances pallet_xcm_benchmarks::generic --quiet - - Runs bench for all pallets for westend runtime and fails fast on first failed benchmark - %(prog)s --runtime westend --fail-fast - - Does not output anything and cleans up the previous bot's & author command triggering comments in PR - %(prog)s --runtime westend rococo --pallet pallet_balances pallet_multisig --quiet --clean -''' - -parser_bench_old = subparsers.add_parser('bench-omni', help='Runs benchmarks (frame omni bencher)', epilog=bench_example, formatter_class=argparse.RawDescriptionHelpFormatter) - -for arg, config in common_args.items(): - parser_bench_old.add_argument(arg, **config) - -parser_bench_old.add_argument('--runtime', help='Runtime(s) space separated', choices=runtimeNames, nargs='*', default=runtimeNames) -parser_bench_old.add_argument('--pallet', help='Pallet(s) space separated', nargs='*', default=[]) -parser_bench_old.add_argument('--fail-fast', help='Fail fast on first failed benchmark', action='store_true') - - """ FMT """ @@ -127,12 +98,12 @@ def main(): print(f'args: {args}') - if args.command == 'bench-omni': + if args.command == 'bench': runtime_pallets_map = {} failed_benchmarks = {} successful_benchmarks = {} - profile = "production" + profile = "release" print(f'Provided runtimes: {args.runtime}') # convert to mapped dict @@ -142,22 +113,11 @@ def main(): # loop over remaining runtimes to collect available pallets for runtime in runtimesMatrix.values(): - build_command = f"forklift cargo build -p {runtime['package']} --profile {profile} --features={runtime['bench_features']}" - print(f'-- building "{runtime["name"]}" with `{build_command}`') - os.system(build_command) + os.system(f"forklift cargo build -p {runtime['package']} --profile {profile} --features={runtime['bench_features']}") print(f'-- listing pallets for benchmark for {runtime["name"]}') wasm_file = f"target/{profile}/wbuild/{runtime['package']}/{runtime['package'].replace('-', '_')}.wasm" - list_command = f"frame-omni-bencher v1 benchmark pallet " \ - f"--no-csv-header " \ - f"--no-storage-info " \ - f"--no-min-squares " \ - f"--no-median-slopes " \ - f"--all " \ - f"--list " \ - f"--runtime={wasm_file} " \ - f"{runtime['bench_flags']}" - print(f'-- running: {list_command}') - output = os.popen(list_command).read() + output = os.popen( + f"frame-omni-bencher v1 benchmark pallet --no-csv-header --no-storage-info --no-min-squares --no-median-slopes --all --list --runtime={wasm_file} {runtime['bench_flags']}").read() raw_pallets = output.strip().split('\n') all_pallets = set() @@ -270,149 +230,6 @@ def main(): print_and_log('✅ Successful benchmarks of runtimes/pallets:') for runtime, pallets in successful_benchmarks.items(): print_and_log(f'-- {runtime}: {pallets}') - - if args.command == 'bench': - runtime_pallets_map = {} - failed_benchmarks = {} - successful_benchmarks = {} - - profile = "production" - - print(f'Provided runtimes: {args.runtime}') - # convert to mapped dict - runtimesMatrix = list(filter(lambda x: x['name'] in args.runtime, runtimesMatrix)) - runtimesMatrix = {x['name']: x for x in runtimesMatrix} - print(f'Filtered out runtimes: {runtimesMatrix}') - - # loop over remaining runtimes to collect available pallets - for runtime in runtimesMatrix.values(): - build_command = f"forklift cargo build -p {runtime['old_package']} --profile {profile} --features={runtime['bench_features']} --locked" - print(f'-- building {runtime["name"]} with `{build_command}`') - os.system(build_command) - - chain = runtime['name'] if runtime['name'] == 'dev' else f"{runtime['name']}-dev" - - machine_test = f"target/{profile}/{runtime['old_bin']} benchmark machine --chain={chain}" - print(f"Running machine test for `{machine_test}`") - os.system(machine_test) - - print(f'-- listing pallets for benchmark for {chain}') - list_command = f"target/{profile}/{runtime['old_bin']} " \ - f"benchmark pallet " \ - f"--no-csv-header " \ - f"--no-storage-info " \ - f"--no-min-squares " \ - f"--no-median-slopes " \ - f"--all " \ - f"--list " \ - f"--chain={chain}" - print(f'-- running: {list_command}') - output = os.popen(list_command).read() - raw_pallets = output.strip().split('\n') - - all_pallets = set() - for pallet in raw_pallets: - if pallet: - all_pallets.add(pallet.split(',')[0].strip()) - - pallets = list(all_pallets) - print(f'Pallets in {runtime["name"]}: {pallets}') - runtime_pallets_map[runtime['name']] = pallets - - print(f'\n') - - # filter out only the specified pallets from collected runtimes/pallets - if args.pallet: - print(f'Pallets: {args.pallet}') - new_pallets_map = {} - # keep only specified pallets if they exist in the runtime - for runtime in runtime_pallets_map: - if set(args.pallet).issubset(set(runtime_pallets_map[runtime])): - new_pallets_map[runtime] = args.pallet - - runtime_pallets_map = new_pallets_map - - print(f'Filtered out runtimes & pallets: {runtime_pallets_map}\n') - - if not runtime_pallets_map: - if args.pallet and not args.runtime: - print(f"No pallets {args.pallet} found in any runtime") - elif args.runtime and not args.pallet: - print(f"{args.runtime} runtime does not have any pallets") - elif args.runtime and args.pallet: - print(f"No pallets {args.pallet} found in {args.runtime}") - else: - print('No runtimes found') - sys.exit(1) - - for runtime in runtime_pallets_map: - for pallet in runtime_pallets_map[runtime]: - config = runtimesMatrix[runtime] - header_path = os.path.abspath(config['header']) - template = None - - chain = config['name'] if runtime == 'dev' else f"{config['name']}-dev" - - print(f'-- config: {config}') - if runtime == 'dev': - # to support sub-modules (https://github.com/paritytech/command-bot/issues/275) - search_manifest_path = f"cargo metadata --locked --format-version 1 --no-deps | jq -r '.packages[] | select(.name == \"{pallet.replace('_', '-')}\") | .manifest_path'" - print(f'-- running: {search_manifest_path}') - manifest_path = os.popen(search_manifest_path).read() - if not manifest_path: - print(f'-- pallet {pallet} not found in dev runtime') - if args.fail_fast: - print_and_log(f'Error: {pallet} not found in dev runtime') - sys.exit(1) - package_dir = os.path.dirname(manifest_path) - print(f'-- package_dir: {package_dir}') - print(f'-- manifest_path: {manifest_path}') - output_path = os.path.join(package_dir, "src", "weights.rs") - template = config['template'] - else: - default_path = f"./{config['path']}/src/weights" - xcm_path = f"./{config['path']}/src/weights/xcm" - output_path = default_path - if pallet.startswith("pallet_xcm_benchmarks"): - template = config['template'] - output_path = xcm_path - - print(f'-- benchmarking {pallet} in {runtime} into {output_path}') - cmd = f"target/{profile}/{config['old_bin']} benchmark pallet " \ - f"--extrinsic=* " \ - f"--chain={chain} " \ - f"--pallet={pallet} " \ - f"--header={header_path} " \ - f"--output={output_path} " \ - f"--wasm-execution=compiled " \ - f"--steps=50 " \ - f"--repeat=20 " \ - f"--heap-pages=4096 " \ - f"{f'--template={template} ' if template else ''}" \ - f"--no-storage-info --no-min-squares --no-median-slopes " - print(f'-- Running: {cmd} \n') - status = os.system(cmd) - - if status != 0 and args.fail_fast: - print_and_log(f'❌ Failed to benchmark {pallet} in {runtime}') - sys.exit(1) - - # Otherwise collect failed benchmarks and print them at the end - # push failed pallets to failed_benchmarks - if status != 0: - failed_benchmarks[f'{runtime}'] = failed_benchmarks.get(f'{runtime}', []) + [pallet] - else: - successful_benchmarks[f'{runtime}'] = successful_benchmarks.get(f'{runtime}', []) + [pallet] - - if failed_benchmarks: - print_and_log('❌ Failed benchmarks of runtimes/pallets:') - for runtime, pallets in failed_benchmarks.items(): - print_and_log(f'-- {runtime}: {pallets}') - - if successful_benchmarks: - print_and_log('✅ Successful benchmarks of runtimes/pallets:') - for runtime, pallets in successful_benchmarks.items(): - print_and_log(f'-- {runtime}: {pallets}') elif args.command == 'fmt': command = f"cargo +nightly fmt" diff --git a/.github/scripts/cmd/test_cmd.py b/.github/scripts/cmd/test_cmd.py index 68998b989909..7b29fbfe90d8 100644 --- a/.github/scripts/cmd/test_cmd.py +++ b/.github/scripts/cmd/test_cmd.py @@ -47,7 +47,7 @@ def get_mock_bench_output(runtime, pallets, output_path, header, bench_flags, template = None): return f"frame-omni-bencher v1 benchmark pallet --extrinsic=* " \ - f"--runtime=target/production/wbuild/{runtime}-runtime/{runtime.replace('-', '_')}_runtime.wasm " \ + f"--runtime=target/release/wbuild/{runtime}-runtime/{runtime.replace('-', '_')}_runtime.wasm " \ f"--pallet={pallets} --header={header} " \ f"--output={output_path} " \ f"--wasm-execution=compiled " \ @@ -93,7 +93,7 @@ def tearDown(self): def test_bench_command_normal_execution_all_runtimes(self): self.mock_parse_args.return_value = (argparse.Namespace( - command='bench-omni', + command='bench', runtime=list(map(lambda x: x['name'], mock_runtimes_matrix)), pallet=['pallet_balances'], fail_fast=True, @@ -117,10 +117,10 @@ def test_bench_command_normal_execution_all_runtimes(self): expected_calls = [ # Build calls - call("forklift cargo build -p kitchensink-runtime --profile production --features=runtime-benchmarks"), - call("forklift cargo build -p westend-runtime --profile production --features=runtime-benchmarks"), - call("forklift cargo build -p rococo-runtime --profile production --features=runtime-benchmarks"), - call("forklift cargo build -p asset-hub-westend-runtime --profile production --features=runtime-benchmarks"), + call("forklift cargo build -p kitchensink-runtime --profile release --features=runtime-benchmarks"), + call("forklift cargo build -p westend-runtime --profile release --features=runtime-benchmarks"), + call("forklift cargo build -p rococo-runtime --profile release --features=runtime-benchmarks"), + call("forklift cargo build -p asset-hub-westend-runtime --profile release --features=runtime-benchmarks"), call(get_mock_bench_output( runtime='kitchensink', @@ -150,7 +150,7 @@ def test_bench_command_normal_execution_all_runtimes(self): def test_bench_command_normal_execution(self): self.mock_parse_args.return_value = (argparse.Namespace( - command='bench-omni', + command='bench', runtime=['westend'], pallet=['pallet_balances', 'pallet_staking'], fail_fast=True, @@ -170,7 +170,7 @@ def test_bench_command_normal_execution(self): expected_calls = [ # Build calls - call("forklift cargo build -p westend-runtime --profile production --features=runtime-benchmarks"), + call("forklift cargo build -p westend-runtime --profile release --features=runtime-benchmarks"), # Westend runtime calls call(get_mock_bench_output( @@ -193,7 +193,7 @@ def test_bench_command_normal_execution(self): def test_bench_command_normal_execution_xcm(self): self.mock_parse_args.return_value = (argparse.Namespace( - command='bench-omni', + command='bench', runtime=['westend'], pallet=['pallet_xcm_benchmarks::generic'], fail_fast=True, @@ -213,7 +213,7 @@ def test_bench_command_normal_execution_xcm(self): expected_calls = [ # Build calls - call("forklift cargo build -p westend-runtime --profile production --features=runtime-benchmarks"), + call("forklift cargo build -p westend-runtime --profile release --features=runtime-benchmarks"), # Westend runtime calls call(get_mock_bench_output( @@ -229,7 +229,7 @@ def test_bench_command_normal_execution_xcm(self): def test_bench_command_two_runtimes_two_pallets(self): self.mock_parse_args.return_value = (argparse.Namespace( - command='bench-omni', + command='bench', runtime=['westend', 'rococo'], pallet=['pallet_balances', 'pallet_staking'], fail_fast=True, @@ -250,8 +250,8 @@ def test_bench_command_two_runtimes_two_pallets(self): expected_calls = [ # Build calls - call("forklift cargo build -p westend-runtime --profile production --features=runtime-benchmarks"), - call("forklift cargo build -p rococo-runtime --profile production --features=runtime-benchmarks"), + call("forklift cargo build -p westend-runtime --profile release --features=runtime-benchmarks"), + call("forklift cargo build -p rococo-runtime --profile release --features=runtime-benchmarks"), # Westend runtime calls call(get_mock_bench_output( runtime='westend', @@ -287,7 +287,7 @@ def test_bench_command_two_runtimes_two_pallets(self): def test_bench_command_one_dev_runtime(self): self.mock_parse_args.return_value = (argparse.Namespace( - command='bench-omni', + command='bench', runtime=['dev'], pallet=['pallet_balances'], fail_fast=True, @@ -309,7 +309,7 @@ def test_bench_command_one_dev_runtime(self): expected_calls = [ # Build calls - call("forklift cargo build -p kitchensink-runtime --profile production --features=runtime-benchmarks"), + call("forklift cargo build -p kitchensink-runtime --profile release --features=runtime-benchmarks"), # Westend runtime calls call(get_mock_bench_output( runtime='kitchensink', @@ -324,7 +324,7 @@ def test_bench_command_one_dev_runtime(self): def test_bench_command_one_cumulus_runtime(self): self.mock_parse_args.return_value = (argparse.Namespace( - command='bench-omni', + command='bench', runtime=['asset-hub-westend'], pallet=['pallet_assets'], fail_fast=True, @@ -344,7 +344,7 @@ def test_bench_command_one_cumulus_runtime(self): expected_calls = [ # Build calls - call("forklift cargo build -p asset-hub-westend-runtime --profile production --features=runtime-benchmarks"), + call("forklift cargo build -p asset-hub-westend-runtime --profile release --features=runtime-benchmarks"), # Asset-hub-westend runtime calls call(get_mock_bench_output( runtime='asset-hub-westend', @@ -359,7 +359,7 @@ def test_bench_command_one_cumulus_runtime(self): def test_bench_command_one_cumulus_runtime_xcm(self): self.mock_parse_args.return_value = (argparse.Namespace( - command='bench-omni', + command='bench', runtime=['asset-hub-westend'], pallet=['pallet_xcm_benchmarks::generic', 'pallet_assets'], fail_fast=True, @@ -379,7 +379,7 @@ def test_bench_command_one_cumulus_runtime_xcm(self): expected_calls = [ # Build calls - call("forklift cargo build -p asset-hub-westend-runtime --profile production --features=runtime-benchmarks"), + call("forklift cargo build -p asset-hub-westend-runtime --profile release --features=runtime-benchmarks"), # Asset-hub-westend runtime calls call(get_mock_bench_output( runtime='asset-hub-westend', diff --git a/.github/scripts/common/lib.sh b/.github/scripts/common/lib.sh index c9be21e45dcb..e3dd6224f29b 100755 --- a/.github/scripts/common/lib.sh +++ b/.github/scripts/common/lib.sh @@ -237,52 +237,24 @@ fetch_release_artifacts() { popd > /dev/null } -# Fetch deb package from S3. Assumes the ENV are set: +# Fetch the release artifacts like binary and signatures from S3. Assumes the ENV are set: # - RELEASE_ID # - GITHUB_TOKEN # - REPO in the form paritytech/polkadot -fetch_debian_package_from_s3() { +fetch_release_artifacts_from_s3() { BINARY=$1 echo "Version : $VERSION" echo "Repo : $REPO" echo "Binary : $BINARY" - echo "Tag : $RELEASE_TAG" OUTPUT_DIR=${OUTPUT_DIR:-"./release-artifacts/${BINARY}"} echo "OUTPUT_DIR : $OUTPUT_DIR" URL_BASE=$(get_s3_url_base $BINARY) echo "URL_BASE=$URL_BASE" - URL=$URL_BASE/$RELEASE_TAG/x86_64-unknown-linux-gnu/${BINARY}_${VERSION}_amd64.deb - - mkdir -p "$OUTPUT_DIR" - pushd "$OUTPUT_DIR" > /dev/null - - echo "Fetching deb package..." - - echo "Fetching %s" "$URL" - curl --progress-bar -LO "$URL" || echo "Missing $URL" - - pwd - ls -al --color - popd > /dev/null - -} - -# Fetch the release artifacts like binary and signatures from S3. Assumes the ENV are set: -# inputs: binary (polkadot), target(aarch64-apple-darwin) -fetch_release_artifacts_from_s3() { - BINARY=$1 - TARGET=$2 - OUTPUT_DIR=${OUTPUT_DIR:-"./release-artifacts/${TARGET}/${BINARY}"} - echo "OUTPUT_DIR : $OUTPUT_DIR" - - URL_BASE=$(get_s3_url_base $BINARY) - echo "URL_BASE=$URL_BASE" - - URL_BINARY=$URL_BASE/$VERSION/$TARGET/$BINARY - URL_SHA=$URL_BASE/$VERSION/$TARGET/$BINARY.sha256 - URL_ASC=$URL_BASE/$VERSION/$TARGET/$BINARY.asc + URL_BINARY=$URL_BASE/$VERSION/$BINARY + URL_SHA=$URL_BASE/$VERSION/$BINARY.sha256 + URL_ASC=$URL_BASE/$VERSION/$BINARY.asc # Fetch artifacts mkdir -p "$OUTPUT_DIR" @@ -297,6 +269,7 @@ fetch_release_artifacts_from_s3() { pwd ls -al --color popd > /dev/null + } # Pass the name of the binary as input, it will @@ -304,26 +277,15 @@ fetch_release_artifacts_from_s3() { function get_s3_url_base() { name=$1 case $name in - polkadot | polkadot-execute-worker | polkadot-prepare-worker ) + polkadot | polkadot-execute-worker | polkadot-prepare-worker | staking-miner) printf "https://releases.parity.io/polkadot" ;; - polkadot-parachain) - printf "https://releases.parity.io/polkadot-parachain" - ;; - - polkadot-omni-node) - printf "https://releases.parity.io/polkadot-omni-node" + polkadot-parachain) + printf "https://releases.parity.io/cumulus" ;; - chain-spec-builder) - printf "https://releases.parity.io/chain-spec-builder" - ;; - - frame-omni-bencher) - printf "https://releases.parity.io/frame-omni-bencher" - ;; - *) + *) printf "UNSUPPORTED BINARY $name" exit 1 ;; @@ -506,16 +468,3 @@ validate_stable_tag() { exit 1 fi } - -# Prepare docker stable tag form the polkadot stable tag -# input: tag (polkaodot-stableYYMM(-X) or polkadot-stableYYMM(-X)-rcX) -# output: stableYYMM(-X) or stableYYMM(-X)-rcX -prepare_docker_stable_tag() { - tag="$1" - if [[ "$tag" =~ stable[0-9]{4}(-[0-9]+)?(-rc[0-9]+)? ]]; then - echo "${BASH_REMATCH[0]}" - else - echo "Tag is invalid: $tag" - exit 1 - fi -} diff --git a/.github/scripts/generate-prdoc.py b/.github/scripts/generate-prdoc.py index 9154f185e64b..780fa0012976 100644 --- a/.github/scripts/generate-prdoc.py +++ b/.github/scripts/generate-prdoc.py @@ -36,21 +36,6 @@ def from_pr_number(n, audience, bump, force): create_prdoc(n, audience, pr.title, pr.body, patch, bump, force) -def translate_audience(audience): - aliases = { - 'runtime_dev': 'Runtime Dev', - 'runtime_user': 'Runtime Operator', - 'node_dev': 'Node Dev', - 'node_user': 'Node User', - } - - if audience in aliases: - to = aliases[audience] - print(f"Translated audience '{audience}' to '{to}'") - audience = to - - return audience - def create_prdoc(pr, audience, title, description, patch, bump, force): path = f"prdoc/pr_{pr}.prdoc" @@ -64,7 +49,6 @@ def create_prdoc(pr, audience, title, description, patch, bump, force): print(f"No preexisting PrDoc for PR {pr}") prdoc = { "title": title, "doc": [{}], "crates": [] } - audience = translate_audience(audience) prdoc["doc"][0]["audience"] = audience prdoc["doc"][0]["description"] = description @@ -133,7 +117,7 @@ def setup_parser(parser=None, pr_required=True): parser = argparse.ArgumentParser() parser.add_argument("--pr", type=int, required=pr_required, help="The PR number to generate the PrDoc for.") parser.add_argument("--audience", type=str, nargs='*', choices=allowed_audiences, default=["todo"], help="The audience of whom the changes may concern. Example: --audience runtime_dev node_dev") - parser.add_argument("--bump", type=str, default="major", choices=["patch", "minor", "major", "silent", "ignore", "none"], help="A default bump level for all crates. Example: --bump patch") + parser.add_argument("--bump", type=str, default="major", choices=["patch", "minor", "major", "silent", "ignore", "no_change"], help="A default bump level for all crates. Example: --bump patch") parser.add_argument("--force", action="store_true", help="Whether to overwrite any existing PrDoc.") return parser diff --git a/.github/scripts/release/build-linux-release.sh b/.github/scripts/release/build-linux-release.sh index 874c9b44788b..a6bd658d292a 100755 --- a/.github/scripts/release/build-linux-release.sh +++ b/.github/scripts/release/build-linux-release.sh @@ -3,8 +3,6 @@ # This is used to build our binaries: # - polkadot # - polkadot-parachain -# - polkadot-omni-node -# # set -e BIN=$1 @@ -23,7 +21,7 @@ time cargo build --profile $PROFILE --locked --verbose --bin $BIN --package $PAC echo "Artifact target: $ARTIFACTS" cp ./target/$PROFILE/$BIN "$ARTIFACTS" -pushd "$ARTIFACTS" > /dev/null +pushd "$ARTIFACTS" > /dev/nul sha256sum "$BIN" | tee "$BIN.sha256" EXTRATAG="$($ARTIFACTS/$BIN --version | diff --git a/.github/scripts/release/build-macos-release.sh b/.github/scripts/release/build-macos-release.sh deleted file mode 100755 index ba6dcc65d650..000000000000 --- a/.github/scripts/release/build-macos-release.sh +++ /dev/null @@ -1,37 +0,0 @@ -#!/usr/bin/env bash - -# This is used to build our binaries: -# - polkadot -# - polkadot-parachain -# - polkadot-omni-node -# set -e - -BIN=$1 -PACKAGE=${2:-$BIN} - -PROFILE=${PROFILE:-production} -# parity-macos runner needs a path where it can -# write, so make it relative to github workspace. -ARTIFACTS=$GITHUB_WORKSPACE/artifacts/$BIN -VERSION=$(git tag -l --contains HEAD | grep -E "^v.*") - -echo "Artifacts will be copied into $ARTIFACTS" -mkdir -p "$ARTIFACTS" - -git log --pretty=oneline -n 1 -time cargo build --profile $PROFILE --locked --verbose --bin $BIN --package $PACKAGE - -echo "Artifact target: $ARTIFACTS" - -cp ./target/$PROFILE/$BIN "$ARTIFACTS" -pushd "$ARTIFACTS" > /dev/null -sha256sum "$BIN" | tee "$BIN.sha256" - -EXTRATAG="$($ARTIFACTS/$BIN --version | - sed -n -r 's/^'$BIN' ([0-9.]+.*-[0-9a-f]{7,13})-.*$/\1/p')" - -EXTRATAG="${VERSION}-${EXTRATAG}-$(cut -c 1-8 $ARTIFACTS/$BIN.sha256)" - -echo "$BIN version = ${VERSION} (EXTRATAG = ${EXTRATAG})" -echo -n ${VERSION} > "$ARTIFACTS/VERSION" -echo -n ${EXTRATAG} > "$ARTIFACTS/EXTRATAG" diff --git a/.github/scripts/release/distributions b/.github/scripts/release/distributions deleted file mode 100644 index a430ec76c6ba..000000000000 --- a/.github/scripts/release/distributions +++ /dev/null @@ -1,39 +0,0 @@ -Origin: Parity -Label: Parity -Codename: release -Architectures: amd64 -Components: main -Description: Apt repository for software made by Parity Technologies Ltd. -SignWith: 90BD75EBBB8E95CB3DA6078F94A4029AB4B35DAE - -Origin: Parity -Label: Parity Staging -Codename: staging -Architectures: amd64 -Components: main -Description: Staging distribution for Parity Technologies Ltd. packages -SignWith: 90BD75EBBB8E95CB3DA6078F94A4029AB4B35DAE - -Origin: Parity -Label: Parity stable2407 -Codename: stable2407 -Architectures: amd64 -Components: main -Description: Apt repository for software made by Parity Technologies Ltd. -SignWith: 90BD75EBBB8E95CB3DA6078F94A4029AB4B35DAE - -Origin: Parity -Label: Parity stable2409 -Codename: stable2409 -Architectures: amd64 -Components: main -Description: Apt repository for software made by Parity Technologies Ltd. -SignWith: 90BD75EBBB8E95CB3DA6078F94A4029AB4B35DAE - -Origin: Parity -Label: Parity stable2412 -Codename: stable2412 -Architectures: amd64 -Components: main -Description: Apt repository for software made by Parity Technologies Ltd. -SignWith: 90BD75EBBB8E95CB3DA6078F94A4029AB4B35DAE diff --git a/.github/scripts/release/release_lib.sh b/.github/scripts/release/release_lib.sh index 984709f2ea03..f5032073b617 100644 --- a/.github/scripts/release/release_lib.sh +++ b/.github/scripts/release/release_lib.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# Set the new version by replacing the value of the constant given as pattern +# Set the new version by replacing the value of the constant given as patetrn # in the file. # # input: pattern, version, file @@ -119,79 +119,21 @@ set_polkadot_parachain_binary_version() { upload_s3_release() { - alias aws='podman run --rm -it docker.io/paritytech/awscli -e AWS_ACCESS_KEY_ID -e AWS_SECRET_ACCESS_KEY -e AWS_BUCKET aws' - - product=$1 - version=$2 - target=$3 - - echo "Working on product: $product " - echo "Working on version: $version " - echo "Working on platform: $target " - - URL_BASE=$(get_s3_url_base $product) - - echo "Current content, should be empty on new uploads:" - aws s3 ls "s3://${URL_BASE}/${version}/${target}" --recursive --human-readable --summarize || true - echo "Content to be uploaded:" - artifacts="release-artifacts/$target/$product/" - ls "$artifacts" - aws s3 sync --acl public-read "$artifacts" "s3://${URL_BASE}/${version}/${target}" - echo "Uploaded files:" - aws s3 ls "s3://${URL_BASE}/${version}/${target}" --recursive --human-readable --summarize - echo "✅ The release should be at https://${URL_BASE}/${version}/${target}" -} - -# Upload runtimes artifacts to s3 release bucket -# -# input: version (stable release tage.g. polkadot-stable2412 or polkadot-stable2412-rc1) -# output: none -upload_s3_runtimes_release_artifacts() { alias aws='podman run --rm -it docker.io/paritytech/awscli -e AWS_ACCESS_KEY_ID -e AWS_SECRET_ACCESS_KEY -e AWS_BUCKET aws' - version=$1 + product=$1 + version=$2 + echo "Working on product: $product " echo "Working on version: $version " echo "Current content, should be empty on new uploads:" - aws s3 ls "s3://releases.parity.io/polkadot/runtimes/${version}/" --recursive --human-readable --summarize || true + aws s3 ls "s3://releases.parity.io/polkadot/${version}/" --recursive --human-readable --summarize || true echo "Content to be uploaded:" - artifacts="artifacts/runtimes/" + artifacts="artifacts/$product/" ls "$artifacts" - aws s3 sync --acl public-read "$artifacts" "s3://releases.parity.io/polkadot/runtimes/${version}/" + aws s3 sync --acl public-read "$artifacts" "s3://releases.parity.io/polkadot/${version}/" echo "Uploaded files:" - aws s3 ls "s3://releases.parity.io/polkadot/runtimes/${version}/" --recursive --human-readable --summarize - echo "✅ The release should be at https://releases.parity.io/polkadot/runtimes/${version}" -} - - -# Pass the name of the binary as input, it will -# return the s3 base url -function get_s3_url_base() { - name=$1 - case $name in - polkadot | polkadot-execute-worker | polkadot-prepare-worker ) - printf "releases.parity.io/polkadot" - ;; - - polkadot-parachain) - printf "releases.parity.io/polkadot-parachain" - ;; - - polkadot-omni-node) - printf "releases.parity.io/polkadot-omni-node" - ;; - - chain-spec-builder) - printf "releases.parity.io/chain-spec-builder" - ;; - - frame-omni-bencher) - printf "releases.parity.io/frame-omni-bencher" - ;; - *) - printf "UNSUPPORTED BINARY $name" - exit 1 - ;; - esac + aws s3 ls "s3://releases.parity.io/polkadot/${version}/" --recursive --human-readable --summarize + echo "✅ The release should be at https://releases.parity.io/polkadot/${version}" } diff --git a/.github/workflows/benchmarks-networking.yml b/.github/workflows/benchmarks-networking.yml deleted file mode 100644 index 79494b9a015c..000000000000 --- a/.github/workflows/benchmarks-networking.yml +++ /dev/null @@ -1,109 +0,0 @@ -name: Networking Benchmarks - -on: - push: - branches: - - master - -concurrency: - group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} - cancel-in-progress: true - -permissions: - contents: read - -jobs: - preflight: - uses: ./.github/workflows/reusable-preflight.yml - - build: - timeout-minutes: 50 - needs: [preflight] - runs-on: ${{ needs.preflight.outputs.RUNNER_BENCHMARK }} - container: - image: ${{ needs.preflight.outputs.IMAGE }} - strategy: - fail-fast: false - matrix: - features: - [ - { bench: "notifications_protocol" }, - { bench: "request_response_protocol" }, - ] - steps: - - name: Checkout - uses: actions/checkout@v4 - - - name: Run Benchmarks - id: run-benchmarks - run: | - mkdir -p ./charts - forklift cargo bench -p sc-network --bench ${{ matrix.features.bench }} -- --output-format bencher | grep "^test" | tee ./charts/${{ matrix.features.bench }}.txt || echo "Benchmarks failed" - ls -lsa ./charts - - - name: Upload artifacts - uses: actions/upload-artifact@v4.3.6 - with: - name: ${{ matrix.features.bench }}-${{ github.sha }} - path: ./charts - - publish-benchmarks: - timeout-minutes: 60 - needs: [build] - if: github.ref == 'refs/heads/master' - environment: subsystem-benchmarks - runs-on: ubuntu-latest - steps: - - name: Checkout - uses: actions/checkout@v4 - with: - ref: gh-pages - fetch-depth: 0 - - - run: git checkout master -- - - - name: Download artifacts - uses: actions/download-artifact@v4.1.8 - with: - name: notifications_protocol-${{ github.sha }} - path: ./charts - - - name: Download artifacts - uses: actions/download-artifact@v4.1.8 - with: - name: request_response_protocol-${{ github.sha }} - path: ./charts - - - name: Setup git - run: | - # Fixes "detected dubious ownership" error in the ci - git config --global --add safe.directory '*' - ls -lsR ./charts - - - uses: actions/create-github-app-token@v1 - id: app-token - with: - app-id: ${{ secrets.POLKADOTSDK_GHPAGES_APP_ID }} - private-key: ${{ secrets.POLKADOTSDK_GHPAGES_APP_KEY }} - - - name: Generate ${{ env.BENCH }} - env: - BENCH: notifications_protocol - uses: benchmark-action/github-action-benchmark@v1 - with: - tool: "cargo" - output-file-path: ./charts/${{ env.BENCH }}.txt - benchmark-data-dir-path: ./bench/${{ env.BENCH }} - github-token: ${{ steps.app-token.outputs.token }} - auto-push: true - - - name: Generate ${{ env.BENCH }} - env: - BENCH: request_response_protocol - uses: benchmark-action/github-action-benchmark@v1 - with: - tool: "cargo" - output-file-path: ./charts/${{ env.BENCH }}.txt - benchmark-data-dir-path: ./bench/${{ env.BENCH }} - github-token: ${{ steps.app-token.outputs.token }} - auto-push: true diff --git a/.github/workflows/build-misc.yml b/.github/workflows/build-misc.yml index 335c26282027..2a8e81b97878 100644 --- a/.github/workflows/build-misc.yml +++ b/.github/workflows/build-misc.yml @@ -16,11 +16,12 @@ permissions: contents: read jobs: + preflight: uses: ./.github/workflows/reusable-preflight.yml build-runtimes-polkavm: - timeout-minutes: 60 + timeout-minutes: 20 needs: [preflight] runs-on: ${{ needs.preflight.outputs.RUNNER }} container: @@ -37,14 +38,11 @@ jobs: - name: Build env: SUBSTRATE_RUNTIME_TARGET: riscv - id: required - run: forklift cargo check -p minimal-template-runtime -p westend-runtime -p rococo-runtime -p polkadot-test-runtime - - name: Stop all workflows if failed - if: ${{ failure() && steps.required.conclusion == 'failure' && !github.event.pull_request.head.repo.fork }} - uses: ./.github/actions/workflow-stopper - with: - app-id: ${{ secrets.WORKFLOW_STOPPER_RUNNER_APP_ID }} - app-key: ${{ secrets.WORKFLOW_STOPPER_RUNNER_APP_KEY }} + run: | + forklift cargo check -p minimal-template-runtime + forklift cargo check -p westend-runtime + forklift cargo check -p rococo-runtime + forklift cargo check -p polkadot-test-runtime build-subkey: timeout-minutes: 20 @@ -64,16 +62,9 @@ jobs: - name: Build env: SKIP_WASM_BUILD: 1 - id: required run: | cd ./substrate/bin/utils/subkey forklift cargo build --locked --release - - name: Stop all workflows if failed - if: ${{ failure() && steps.required.conclusion == 'failure' && !github.event.pull_request.head.repo.fork }} - uses: ./.github/actions/workflow-stopper - with: - app-id: ${{ secrets.WORKFLOW_STOPPER_RUNNER_APP_ID }} - app-key: ${{ secrets.WORKFLOW_STOPPER_RUNNER_APP_KEY }} confirm-required-build-misc-jobs-passed: runs-on: ubuntu-latest diff --git a/.github/workflows/check-frame-omni-bencher.yml b/.github/workflows/check-frame-omni-bencher.yml index bc0ff82b6774..924a8b7f712f 100644 --- a/.github/workflows/check-frame-omni-bencher.yml +++ b/.github/workflows/check-frame-omni-bencher.yml @@ -36,16 +36,9 @@ jobs: - name: Checkout uses: actions/checkout@v4 - name: script - id: required run: | forklift cargo build --locked --quiet --release -p asset-hub-westend-runtime --features runtime-benchmarks forklift cargo run --locked --release -p frame-omni-bencher --quiet -- v1 benchmark pallet --runtime target/release/wbuild/asset-hub-westend-runtime/asset_hub_westend_runtime.compact.compressed.wasm --all --steps 2 --repeat 1 --quiet - - name: Stop all workflows if failed - if: ${{ failure() && steps.required.conclusion == 'failure' && !github.event.pull_request.head.repo.fork }} - uses: ./.github/actions/workflow-stopper - with: - app-id: ${{ secrets.WORKFLOW_STOPPER_RUNNER_APP_ID }} - app-key: ${{ secrets.WORKFLOW_STOPPER_RUNNER_APP_KEY }} runtime-matrix: runs-on: ubuntu-latest @@ -87,7 +80,6 @@ jobs: uses: actions/checkout@v4 - name: script - id: required run: | RUNTIME_BLOB_NAME=$(echo $PACKAGE_NAME | sed 's/-/_/g').compact.compressed.wasm RUNTIME_BLOB_PATH=./target/release/wbuild/$PACKAGE_NAME/$RUNTIME_BLOB_NAME @@ -98,13 +90,6 @@ jobs: cmd="./target/release/frame-omni-bencher v1 benchmark pallet --runtime $RUNTIME_BLOB_PATH --all --steps 2 --repeat 1 $FLAGS" echo "Running command: $cmd" eval "$cmd" - - name: Stop all workflows if failed - if: ${{ failure() && steps.required.conclusion == 'failure' && !github.event.pull_request.head.repo.fork }} - uses: ./.github/actions/workflow-stopper - with: - app-id: ${{ secrets.WORKFLOW_STOPPER_RUNNER_APP_ID }} - app-key: ${{ secrets.WORKFLOW_STOPPER_RUNNER_APP_KEY }} - confirm-frame-omni-benchers-passed: runs-on: ubuntu-latest name: All benchmarks passed diff --git a/.github/workflows/check-links.yml b/.github/workflows/check-links.yml index cea6b9a8636a..dd9d3eaf824f 100644 --- a/.github/workflows/check-links.yml +++ b/.github/workflows/check-links.yml @@ -33,7 +33,7 @@ jobs: - uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.1.0 (22. Sep 2023) - name: Lychee link checker - uses: lycheeverse/lychee-action@f81112d0d2814ded911bd23e3beaa9dda9093915 # for v1.9.1 (10. Jan 2024) + uses: lycheeverse/lychee-action@7cd0af4c74a61395d455af97419279d86aafaede # for v1.9.1 (10. Jan 2024) with: args: >- --config .config/lychee.toml diff --git a/.github/workflows/check-runtime-migration.yml b/.github/workflows/check-runtime-migration.yml index 9866ae18b98a..758de0e7b433 100644 --- a/.github/workflows/check-runtime-migration.yml +++ b/.github/workflows/check-runtime-migration.yml @@ -101,29 +101,20 @@ jobs: ./try-runtime create-snapshot --uri ${{ matrix.uri }} snapshot.raw - name: Build Runtime - id: required1 run: | echo "---------- Building ${{ matrix.package }} runtime ----------" - forklift cargo build --release --locked -p ${{ matrix.package }} --features try-runtime -q + time forklift cargo build --release --locked -p ${{ matrix.package }} --features try-runtime -q - name: Run Check - id: required2 run: | echo "Running ${{ matrix.network }} runtime migration check" export RUST_LOG=remote-ext=debug,runtime=debug echo "---------- Executing on-runtime-upgrade for ${{ matrix.network }} ----------" - ./try-runtime ${{ matrix.command_extra_args }} \ + time ./try-runtime ${{ matrix.command_extra_args }} \ --runtime ./target/release/wbuild/${{ matrix.package }}/${{ matrix.wasm }} \ on-runtime-upgrade --disable-spec-version-check --checks=all ${{ matrix.subcommand_extra_args }} snap -p snapshot.raw sleep 5 - - name: Stop all workflows if failed - if: ${{ failure() && (steps.required1.conclusion == 'failure' || steps.required2.conclusion == 'failure') }} - uses: ./.github/actions/workflow-stopper - with: - app-id: ${{ secrets.WORKFLOW_STOPPER_RUNNER_APP_ID }} - app-key: ${{ secrets.WORKFLOW_STOPPER_RUNNER_APP_KEY }} - # name of this job must be unique across all workflows # otherwise GitHub will mark all these jobs as required confirm-required-checks-passed: diff --git a/.github/workflows/check-semver.yml b/.github/workflows/check-semver.yml index 0da3e54ef60b..78602410cdf6 100644 --- a/.github/workflows/check-semver.yml +++ b/.github/workflows/check-semver.yml @@ -2,7 +2,7 @@ name: Check semver on: pull_request: - types: [opened, synchronize, reopened, ready_for_review, labeled, unlabeled] + types: [opened, synchronize, reopened, ready_for_review] workflow_dispatch: merge_group: @@ -11,7 +11,7 @@ concurrency: cancel-in-progress: true env: - TOOLCHAIN: nightly-2024-11-19 + TOOLCHAIN: nightly-2024-06-01 jobs: preflight: @@ -62,35 +62,22 @@ jobs: echo "PRDOC_EXTRA_ARGS=--max-bump minor" >> $GITHUB_ENV - - name: Echo Skip - if: ${{ contains(github.event.pull_request.labels.*.name, 'R0-silent') }} - run: echo "Skipping this PR because it is labeled as R0-silent." - - name: Rust Cache - if: ${{ !contains(github.event.pull_request.labels.*.name, 'R0-silent') }} uses: Swatinem/rust-cache@82a92a6e8fbeee089604da2575dc567ae9ddeaab # v2.7.5 with: cache-on-failure: true - name: Rust compilation prerequisites - if: ${{ !contains(github.event.pull_request.labels.*.name, 'R0-silent') }} run: | rustup default $TOOLCHAIN rustup component add rust-src --toolchain $TOOLCHAIN - name: install parity-publish - if: ${{ !contains(github.event.pull_request.labels.*.name, 'R0-silent') }} # Set the target dir to cache the build. - run: CARGO_TARGET_DIR=./target/ cargo install parity-publish@0.10.3 --locked -q + run: CARGO_TARGET_DIR=./target/ cargo install parity-publish@0.8.0 --locked -q - name: check semver - if: ${{ !contains(github.event.pull_request.labels.*.name, 'R0-silent') }} run: | - if [ -z "$PR" ]; then - echo "Skipping master/merge queue" - exit 0 - fi - export CARGO_TARGET_DIR=target export RUSTFLAGS='-A warnings -A missing_docs' export SKIP_WASM_BUILD=1 diff --git a/.github/workflows/checks-quick.yml b/.github/workflows/checks-quick.yml index 4c26b85a6303..36deba7dfb78 100644 --- a/.github/workflows/checks-quick.yml +++ b/.github/workflows/checks-quick.yml @@ -15,6 +15,7 @@ concurrency: permissions: {} jobs: + preflight: uses: ./.github/workflows/reusable-preflight.yml @@ -27,14 +28,7 @@ jobs: steps: - uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.1.7 - name: Cargo fmt - id: required run: cargo +nightly fmt --all -- --check - - name: Stop all workflows if failed - if: ${{ failure() && steps.required.conclusion == 'failure' && !github.event.pull_request.head.repo.fork }} - uses: ./.github/actions/workflow-stopper - with: - app-id: ${{ secrets.WORKFLOW_STOPPER_RUNNER_APP_ID }} - app-key: ${{ secrets.WORKFLOW_STOPPER_RUNNER_APP_KEY }} check-dependency-rules: runs-on: ubuntu-latest timeout-minutes: 20 @@ -97,6 +91,7 @@ jobs: --exclude "substrate/frame/contracts/fixtures/build" "substrate/frame/contracts/fixtures/contracts/common" + "substrate/frame/revive/fixtures/build" "substrate/frame/revive/fixtures/contracts/common" - name: deny git deps run: python3 .github/scripts/deny-git-deps.py . @@ -177,32 +172,6 @@ jobs: env: ASSERT_REGEX: "FAIL-CI" GIT_DEPTH: 1 - check-readme: - runs-on: ubuntu-latest - timeout-minutes: 10 - steps: - - uses: actions/checkout@v4 - - - name: Install prerequisites - run: | - sudo apt-get update - sudo apt-get install -y protobuf-compiler - - - name: Set rust version from env file - run: | - RUST_VERSION=$(cat .github/env | sed -E 's/.*ci-unified:([^-]+)-([^-]+).*/\2/') - echo $RUST_VERSION - echo "RUST_VERSION=${RUST_VERSION}" >> $GITHUB_ENV - - - name: Install Rust - uses: actions-rust-lang/setup-rust-toolchain@11df97af8e8102fd60b60a77dfbf58d40cd843b8 # v1.10.1 - with: - cache: false - toolchain: ${{ env.RUST_VERSION }} - components: cargo, clippy, rust-docs, rust-src, rustfmt, rustc, rust-std - - - name: Find README.docify.md files and check generated READMEs - run: .github/scripts/check-missing-readme-generation.sh confirm-required-checks-quick-jobs-passed: runs-on: ubuntu-latest @@ -218,7 +187,6 @@ jobs: - check-markdown - check-umbrella - check-fail-ci - - check-readme if: always() && !cancelled() steps: - run: | diff --git a/.github/workflows/checks.yml b/.github/workflows/checks.yml index 02428711811f..8ec3660307d4 100644 --- a/.github/workflows/checks.yml +++ b/.github/workflows/checks.yml @@ -31,17 +31,9 @@ jobs: steps: - uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.1.7 - name: script - id: required run: | cargo clippy --all-targets --locked --workspace --quiet cargo clippy --all-targets --all-features --locked --workspace --quiet - - name: Stop all workflows if failed - if: ${{ failure() && steps.required.conclusion == 'failure' && !github.event.pull_request.head.repo.fork }} - uses: ./.github/actions/workflow-stopper - with: - app-id: ${{ secrets.WORKFLOW_STOPPER_RUNNER_APP_ID }} - app-key: ${{ secrets.WORKFLOW_STOPPER_RUNNER_APP_KEY }} - check-try-runtime: runs-on: ${{ needs.preflight.outputs.RUNNER }} needs: [preflight] @@ -52,7 +44,6 @@ jobs: steps: - uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.1.7 - name: script - id: required run: | forklift cargo check --locked --all --features try-runtime --quiet # this is taken from cumulus @@ -61,13 +52,6 @@ jobs: # add after https://github.com/paritytech/substrate/pull/14502 is merged # experimental code may rely on try-runtime and vice-versa forklift cargo check --locked --all --features try-runtime,experimental --quiet - - name: Stop all workflows if failed - if: ${{ failure() && steps.required.conclusion == 'failure' && !github.event.pull_request.head.repo.fork }} - uses: ./.github/actions/workflow-stopper - with: - app-id: ${{ secrets.WORKFLOW_STOPPER_RUNNER_APP_ID }} - app-key: ${{ secrets.WORKFLOW_STOPPER_RUNNER_APP_KEY }} - # check-core-crypto-features works fast without forklift check-core-crypto-features: runs-on: ${{ needs.preflight.outputs.RUNNER }} @@ -79,7 +63,6 @@ jobs: steps: - uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.1.7 - name: script - id: required run: | cd substrate/primitives/core ./check-features-variants.sh @@ -90,12 +73,6 @@ jobs: cd substrate/primitives/keyring ./check-features-variants.sh cd - - - name: Stop all workflows if failed - if: ${{ failure() && steps.required.conclusion == 'failure' && !github.event.pull_request.head.repo.fork }} - uses: ./.github/actions/workflow-stopper - with: - app-id: ${{ secrets.WORKFLOW_STOPPER_RUNNER_APP_ID }} - app-key: ${{ secrets.WORKFLOW_STOPPER_RUNNER_APP_KEY }} # name of this job must be unique across all workflows # otherwise GitHub will mark all these jobs as required confirm-required-checks-passed: diff --git a/.github/workflows/cmd.yml b/.github/workflows/cmd.yml index 42b2eab3b9e4..525ab0c0fc23 100644 --- a/.github/workflows/cmd.yml +++ b/.github/workflows/cmd.yml @@ -19,10 +19,10 @@ jobs: steps: - name: Generate token id: generate_token - uses: actions/create-github-app-token@v1 + uses: tibdex/github-app-token@v2.1.0 with: - app-id: ${{ secrets.CMD_BOT_APP_ID }} - private-key: ${{ secrets.CMD_BOT_APP_KEY }} + app_id: ${{ secrets.CMD_BOT_APP_ID }} + private_key: ${{ secrets.CMD_BOT_APP_KEY }} - name: Check if user is a member of the organization id: is-member @@ -227,8 +227,7 @@ jobs: cat .github/env >> $GITHUB_OUTPUT if [ -n "$IMAGE_OVERRIDE" ]; then - IMAGE=$IMAGE_OVERRIDE - echo "IMAGE=$IMAGE" >> $GITHUB_OUTPUT + echo "IMAGE=$IMAGE_OVERRIDE" >> $GITHUB_OUTPUT fi if [[ $BODY == "/cmd bench"* ]]; then @@ -238,10 +237,6 @@ jobs: else echo "RUNNER=ubuntu-latest" >> $GITHUB_OUTPUT fi - - name: Print outputs - run: | - echo "RUNNER=${{ steps.set-image.outputs.RUNNER }}" - echo "IMAGE=${{ steps.set-image.outputs.IMAGE }}" # Get PR branch name, because the issue_comment event does not contain the PR branch name get-pr-branch: @@ -288,24 +283,10 @@ jobs: env: JOB_NAME: "cmd" runs-on: ${{ needs.set-image.outputs.RUNNER }} + timeout-minutes: 4320 # 72 hours -> 3 days; as it could take a long time to run all the runtimes/pallets container: image: ${{ needs.set-image.outputs.IMAGE }} - timeout-minutes: 1440 # 24 hours per runtime steps: - - name: Generate token - uses: actions/create-github-app-token@v1 - id: generate_token - with: - app-id: ${{ secrets.CMD_BOT_APP_ID }} - private-key: ${{ secrets.CMD_BOT_APP_KEY }} - - - name: Checkout - uses: actions/checkout@v4 - with: - token: ${{ steps.generate_token.outputs.token }} - repository: ${{ needs.get-pr-branch.outputs.repo }} - ref: ${{ needs.get-pr-branch.outputs.pr-branch }} - - name: Get command uses: actions-ecosystem/action-regex-match@v2 id: get-pr-comment @@ -359,7 +340,13 @@ jobs: repo: context.repo.repo, body: `Command "${{ steps.get-pr-comment.outputs.group2 }}" has started 🚀 [See logs here](${job_url})` }) - + + - name: Checkout + uses: actions/checkout@v4 + with: + repository: ${{ needs.get-pr-branch.outputs.repo }} + ref: ${{ needs.get-pr-branch.outputs.pr-branch }} + - name: Install dependencies for bench if: startsWith(steps.get-pr-comment.outputs.group2, 'bench') run: | @@ -377,7 +364,6 @@ jobs: # Fixes "detected dubious ownership" error in the ci git config --global --add safe.directory '*' git remote -v - cat /proc/cpuinfo python3 -m pip install -r .github/scripts/generate-prdoc.requirements.txt python3 .github/scripts/cmd/cmd.py $CMD $PR_ARG git status @@ -400,41 +386,19 @@ jobs: name: command-output path: /tmp/cmd/command_output.log - # Generate token for commit, as the earlier token expires after 1 hour, while cmd can take longer - - name: Generate token for commit - uses: actions/create-github-app-token@v1 - id: generate_token_commit - with: - app-id: ${{ secrets.CMD_BOT_APP_ID }} - private-key: ${{ secrets.CMD_BOT_APP_KEY }} - - name: Commit changes run: | if [ -n "$(git status --porcelain)" ]; then - git config --global user.name command-bot - git config --global user.email "<>" - git config --global pull.rebase false - - # Push the results to the target branch - git remote add \ - github \ - "https://x-access-token:${{ steps.generate_token_commit.outputs.token }}@github.com/${{ needs.get-pr-branch.outputs.repo }}.git" || : - - push_changes() { - git push github "HEAD:${{ needs.get-pr-branch.outputs.pr-branch }}" - } + git config --local user.email "action@github.com" + git config --local user.name "GitHub Action" git add . git restore --staged Cargo.lock # ignore changes in Cargo.lock git commit -m "Update from ${{ github.actor }} running command '${{ steps.get-pr-comment.outputs.group2 }}'" || true - # Attempt to push changes - if ! push_changes; then - echo "Push failed, trying to rebase..." - git pull --rebase github "${{ needs.get-pr-branch.outputs.pr-branch }}" - # After successful rebase, try pushing again - push_changes - fi + git pull --rebase origin ${{ needs.get-pr-branch.outputs.pr-branch }} + + git push origin ${{ needs.get-pr-branch.outputs.pr-branch }} else echo "Nothing to commit"; fi diff --git a/.github/workflows/command-backport.yml b/.github/workflows/command-backport.yml index 8a017a434525..8f23bcd75f01 100644 --- a/.github/workflows/command-backport.yml +++ b/.github/workflows/command-backport.yml @@ -40,7 +40,7 @@ jobs: uses: korthout/backport-action@v3 id: backport with: - target_branches: stable2407 stable2409 stable2412 + target_branches: stable2407 stable2409 merge_commits: skip github_token: ${{ steps.generate_token.outputs.token }} pull_description: | @@ -86,7 +86,7 @@ jobs: const reviewer = '${{ github.event.pull_request.user.login }}'; for (const pullNumber of pullNumbers) { - await github.pulls.requestReviewers({ + await github.pulls.createReviewRequest({ owner: context.repo.owner, repo: context.repo.repo, pull_number: parseInt(pullNumber), diff --git a/.github/workflows/command-prdoc.yml b/.github/workflows/command-prdoc.yml index 71dbcfbd2289..7022e8e0e006 100644 --- a/.github/workflows/command-prdoc.yml +++ b/.github/workflows/command-prdoc.yml @@ -14,7 +14,7 @@ on: required: true options: - "TODO" - - "none" + - "no_change" - "patch" - "minor" - "major" diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index b7c70c9e6d66..a257c8229598 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -25,15 +25,8 @@ jobs: steps: - uses: actions/checkout@v4 - run: forklift cargo test --doc --workspace - id: required env: RUSTFLAGS: "-Cdebug-assertions=y -Dwarnings" - - name: Stop all workflows if failed - if: ${{ failure() && steps.required.conclusion == 'failure' && !github.event.pull_request.head.repo.fork }} - uses: ./.github/actions/workflow-stopper - with: - app-id: ${{ secrets.WORKFLOW_STOPPER_RUNNER_APP_ID }} - app-key: ${{ secrets.WORKFLOW_STOPPER_RUNNER_APP_KEY }} build-rustdoc: runs-on: ${{ needs.preflight.outputs.RUNNER }} @@ -45,7 +38,6 @@ jobs: steps: - uses: actions/checkout@v4 - run: forklift cargo doc --all-features --workspace --no-deps - id: required env: SKIP_WASM_BUILD: 1 RUSTDOCFLAGS: "-Dwarnings --default-theme=ayu --html-in-header ./docs/sdk/assets/header.html --extend-css ./docs/sdk/assets/theme.css --html-after-content ./docs/sdk/assets/after-content.html" @@ -68,12 +60,6 @@ jobs: path: ./crate-docs/ retention-days: 1 if-no-files-found: error - - name: Stop all workflows if failed - if: ${{ failure() && steps.required.conclusion == 'failure' && !github.event.pull_request.head.repo.fork }} - uses: ./.github/actions/workflow-stopper - with: - app-id: ${{ secrets.WORKFLOW_STOPPER_RUNNER_APP_ID }} - app-key: ${{ secrets.WORKFLOW_STOPPER_RUNNER_APP_KEY }} build-implementers-guide: runs-on: ubuntu-latest diff --git a/.github/workflows/publish-check-compile.yml b/.github/workflows/publish-check-compile.yml deleted file mode 100644 index ce1b2cb231d0..000000000000 --- a/.github/workflows/publish-check-compile.yml +++ /dev/null @@ -1,48 +0,0 @@ -name: Check publish build - -on: - push: - branches: - - master - pull_request: - types: [opened, synchronize, reopened, ready_for_review] - merge_group: - -concurrency: - group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} - cancel-in-progress: true - -jobs: - preflight: - uses: ./.github/workflows/reusable-preflight.yml - - check-publish-compile: - timeout-minutes: 90 - needs: [preflight] - runs-on: ${{ needs.preflight.outputs.RUNNER }} - container: - image: ${{ needs.preflight.outputs.IMAGE }} - steps: - - uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.1.7 - - - name: Rust Cache - uses: Swatinem/rust-cache@82a92a6e8fbeee089604da2575dc567ae9ddeaab # v2.7.5 - with: - cache-on-failure: true - - - name: install parity-publish - run: cargo install parity-publish@0.10.3 --locked -q - - - name: parity-publish update plan - run: parity-publish --color always plan --skip-check --prdoc prdoc/ - - - name: parity-publish apply plan - run: parity-publish --color always apply --registry - - - name: parity-publish check compile - run: | - packages="$(parity-publish apply --print)" - - if [ -n "$packages" ]; then - cargo --color always check $(printf -- '-p %s ' $packages) - fi diff --git a/.github/workflows/publish-check-crates.yml b/.github/workflows/publish-check-crates.yml index 3150cb9dd405..3fad3b641474 100644 --- a/.github/workflows/publish-check-crates.yml +++ b/.github/workflows/publish-check-crates.yml @@ -24,7 +24,7 @@ jobs: cache-on-failure: true - name: install parity-publish - run: cargo install parity-publish@0.10.3 --locked -q + run: cargo install parity-publish@0.8.0 --locked -q - name: parity-publish check run: parity-publish --color always check --allow-unpublished diff --git a/.github/workflows/publish-claim-crates.yml b/.github/workflows/publish-claim-crates.yml index a6efc8a5599e..37bf06bb82d8 100644 --- a/.github/workflows/publish-claim-crates.yml +++ b/.github/workflows/publish-claim-crates.yml @@ -18,7 +18,7 @@ jobs: cache-on-failure: true - name: install parity-publish - run: cargo install parity-publish@0.10.3 --locked -q + run: cargo install parity-publish@0.8.0 --locked -q - name: parity-publish claim env: diff --git a/.github/workflows/release-11_rc-automation.yml b/.github/workflows/release-10_rc-automation.yml similarity index 100% rename from .github/workflows/release-11_rc-automation.yml rename to .github/workflows/release-10_rc-automation.yml diff --git a/.github/workflows/release-20_build-rc.yml b/.github/workflows/release-20_build-rc.yml deleted file mode 100644 index d4c7055c37c5..000000000000 --- a/.github/workflows/release-20_build-rc.yml +++ /dev/null @@ -1,263 +0,0 @@ -name: Release - Build node release candidate - -on: - workflow_dispatch: - inputs: - binary: - description: Binary to be build for the release - default: all - type: choice - options: - - polkadot - - polkadot-parachain - - polkadot-omni-node - - frame-omni-bencher - - chain-spec-builder - - all - - release_tag: - description: Tag matching the actual release candidate with the format polkadot-stableYYMM(-X)-rcX or polkadot-stableYYMM(-X) - type: string - -jobs: - check-synchronization: - uses: paritytech-release/sync-workflows/.github/workflows/check-syncronization.yml@main - - validate-inputs: - needs: [check-synchronization] - if: ${{ needs.check-synchronization.outputs.checks_passed }} == 'true' - runs-on: ubuntu-latest - outputs: - release_tag: ${{ steps.validate_inputs.outputs.release_tag }} - - steps: - - name: Checkout sources - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - - - name: Validate inputs - id: validate_inputs - run: | - . ./.github/scripts/common/lib.sh - - RELEASE_TAG=$(validate_stable_tag ${{ inputs.release_tag }}) - echo "release_tag=${RELEASE_TAG}" >> $GITHUB_OUTPUT - - build-polkadot-binary: - needs: [validate-inputs] - if: ${{ inputs.binary == 'polkadot' || inputs.binary == 'all' }} - uses: "./.github/workflows/release-reusable-rc-buid.yml" - with: - binary: '["polkadot", "polkadot-prepare-worker", "polkadot-execute-worker"]' - package: polkadot - release_tag: ${{ needs.validate-inputs.outputs.release_tag }} - target: x86_64-unknown-linux-gnu - secrets: - PGP_KMS_KEY: ${{ secrets.PGP_KMS_KEY }} - PGP_KMS_HASH: ${{ secrets.PGP_KMS_HASH }} - AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} - AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} - AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} - AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} - AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} - permissions: - id-token: write - attestations: write - contents: read - - build-polkadot-parachain-binary: - needs: [validate-inputs] - if: ${{ inputs.binary == 'polkadot-parachain' || inputs.binary == 'all' }} - uses: "./.github/workflows/release-reusable-rc-buid.yml" - with: - binary: '["polkadot-parachain"]' - package: "polkadot-parachain-bin" - release_tag: ${{ needs.validate-inputs.outputs.release_tag }} - target: x86_64-unknown-linux-gnu - secrets: - PGP_KMS_KEY: ${{ secrets.PGP_KMS_KEY }} - PGP_KMS_HASH: ${{ secrets.PGP_KMS_HASH }} - AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} - AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} - AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} - AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} - AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} - permissions: - id-token: write - attestations: write - contents: read - - build-polkadot-omni-node-binary: - needs: [validate-inputs] - if: ${{ inputs.binary == 'polkadot-omni-node' || inputs.binary == 'all' }} - uses: "./.github/workflows/release-reusable-rc-buid.yml" - with: - binary: '["polkadot-omni-node"]' - package: "polkadot-omni-node" - release_tag: ${{ needs.validate-inputs.outputs.release_tag }} - target: x86_64-unknown-linux-gnu - secrets: - PGP_KMS_KEY: ${{ secrets.PGP_KMS_KEY }} - PGP_KMS_HASH: ${{ secrets.PGP_KMS_HASH }} - AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} - AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} - AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} - AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} - AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} - permissions: - id-token: write - attestations: write - contents: read - - build-frame-omni-bencher-binary: - needs: [validate-inputs] - if: ${{ inputs.binary == 'frame-omni-bencher' || inputs.binary == 'all' }} - uses: "./.github/workflows/release-reusable-rc-buid.yml" - with: - binary: '["frame-omni-bencher"]' - package: "frame-omni-bencher" - release_tag: ${{ needs.validate-inputs.outputs.release_tag }} - target: x86_64-unknown-linux-gnu - secrets: - PGP_KMS_KEY: ${{ secrets.PGP_KMS_KEY }} - PGP_KMS_HASH: ${{ secrets.PGP_KMS_HASH }} - AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} - AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} - AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} - AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} - AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} - permissions: - id-token: write - attestations: write - contents: read - - build-chain-spec-builder-binary: - needs: [validate-inputs] - if: ${{ inputs.binary == 'chain-spec-builder' || inputs.binary == 'all' }} - uses: "./.github/workflows/release-reusable-rc-buid.yml" - with: - binary: '["chain-spec-builder"]' - package: staging-chain-spec-builder - release_tag: ${{ needs.validate-inputs.outputs.release_tag }} - target: x86_64-unknown-linux-gnu - secrets: - PGP_KMS_KEY: ${{ secrets.PGP_KMS_KEY }} - PGP_KMS_HASH: ${{ secrets.PGP_KMS_HASH }} - AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} - AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} - AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} - AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} - AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} - permissions: - id-token: write - attestations: write - contents: read - - build-polkadot-macos-binary: - needs: [validate-inputs] - if: ${{ inputs.binary == 'polkadot' || inputs.binary == 'all' }} - uses: "./.github/workflows/release-reusable-rc-buid.yml" - with: - binary: '["polkadot", "polkadot-prepare-worker", "polkadot-execute-worker"]' - package: polkadot - release_tag: ${{ needs.validate-inputs.outputs.release_tag }} - target: aarch64-apple-darwin - secrets: - PGP_KMS_KEY: ${{ secrets.PGP_KMS_KEY }} - PGP_KMS_HASH: ${{ secrets.PGP_KMS_HASH }} - AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} - AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} - AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} - AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} - AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} - permissions: - id-token: write - attestations: write - contents: read - - build-polkadot-parachain-macos-binary: - needs: [validate-inputs] - if: ${{ inputs.binary == 'polkadot-parachain' || inputs.binary == 'all' }} - uses: "./.github/workflows/release-reusable-rc-buid.yml" - with: - binary: '["polkadot-parachain"]' - package: polkadot-parachain-bin - release_tag: ${{ needs.validate-inputs.outputs.release_tag }} - target: aarch64-apple-darwin - secrets: - PGP_KMS_KEY: ${{ secrets.PGP_KMS_KEY }} - PGP_KMS_HASH: ${{ secrets.PGP_KMS_HASH }} - AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} - AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} - AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} - AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} - AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} - permissions: - id-token: write - attestations: write - contents: read - - build-polkadot-omni-node-macos-binary: - needs: [validate-inputs] - if: ${{ inputs.binary == 'polkadot-omni-node' || inputs.binary == 'all' }} - uses: "./.github/workflows/release-reusable-rc-buid.yml" - with: - binary: '["polkadot-omni-node"]' - package: polkadot-omni-node - release_tag: ${{ needs.validate-inputs.outputs.release_tag }} - target: aarch64-apple-darwin - secrets: - PGP_KMS_KEY: ${{ secrets.PGP_KMS_KEY }} - PGP_KMS_HASH: ${{ secrets.PGP_KMS_HASH }} - AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} - AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} - AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} - AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} - AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} - permissions: - id-token: write - attestations: write - contents: read - - build-frame-omni-bencher-macos-binary: - needs: [validate-inputs] - if: ${{ inputs.binary == 'frame-omni-bencher' || inputs.binary == 'all' }} - uses: "./.github/workflows/release-reusable-rc-buid.yml" - with: - binary: '["frame-omni-bencher"]' - package: frame-omni-bencher - release_tag: ${{ needs.validate-inputs.outputs.release_tag }} - target: aarch64-apple-darwin - secrets: - PGP_KMS_KEY: ${{ secrets.PGP_KMS_KEY }} - PGP_KMS_HASH: ${{ secrets.PGP_KMS_HASH }} - AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} - AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} - AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} - AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} - AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} - permissions: - id-token: write - attestations: write - contents: read - - build-chain-spec-builder-macos-binary: - needs: [validate-inputs] - if: ${{ inputs.binary == 'chain-spec-builder' || inputs.binary == 'all' }} - uses: "./.github/workflows/release-reusable-rc-buid.yml" - with: - binary: '["chain-spec-builder"]' - package: staging-chain-spec-builder - release_tag: ${{ needs.validate-inputs.outputs.release_tag }} - target: aarch64-apple-darwin - secrets: - PGP_KMS_KEY: ${{ secrets.PGP_KMS_KEY }} - PGP_KMS_HASH: ${{ secrets.PGP_KMS_HASH }} - AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} - AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} - AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} - AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} - AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} - permissions: - id-token: write - attestations: write - contents: read diff --git a/.github/workflows/release-30_publish_release_draft.yml b/.github/workflows/release-30_publish_release_draft.yml index 78ceea91f100..73d1aeaa4009 100644 --- a/.github/workflows/release-30_publish_release_draft.yml +++ b/.github/workflows/release-30_publish_release_draft.yml @@ -1,46 +1,19 @@ name: Release - Publish draft -# This workflow runs in paritytech-release and creates full release draft with: -# - release notes -# - info about the runtimes -# - attached artifacts: -# - runtimes -# - binaries -# - signatures - on: + push: + tags: + # Catches v1.2.3 and v1.2.3-rc1 + - v[0-9]+.[0-9]+.[0-9]+* + # - polkadot-stable[0-9]+* Activate when the release process from release org is setteled + workflow_dispatch: inputs: - release_tag: - description: Tag matching the actual release candidate with the format polkadot-stableYYMM(-X)-rcX or polkadot-stableYYMM(-X) - required: true - type: string + version: + description: Current release/rc version jobs: - check-synchronization: - uses: paritytech-release/sync-workflows/.github/workflows/check-syncronization.yml@main - - validate-inputs: - needs: [ check-synchronization ] - if: ${{ needs.check-synchronization.outputs.checks_passed }} == 'true' - runs-on: ubuntu-latest - outputs: - release_tag: ${{ steps.validate_inputs.outputs.release_tag }} - - steps: - - name: Checkout sources - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - - name: Validate inputs - id: validate_inputs - run: | - . ./.github/scripts/common/lib.sh - - RELEASE_TAG=$(validate_stable_tag ${{ inputs.release_tag }}) - echo "release_tag=${RELEASE_TAG}" >> $GITHUB_OUTPUT - get-rust-versions: - needs: [ validate-inputs ] runs-on: ubuntu-latest outputs: rustc-stable: ${{ steps.get-rust-versions.outputs.stable }} @@ -51,35 +24,54 @@ jobs: echo "stable=$RUST_STABLE_VERSION" >> $GITHUB_OUTPUT build-runtimes: - needs: [ validate-inputs ] uses: "./.github/workflows/release-srtool.yml" with: excluded_runtimes: "asset-hub-rococo bridge-hub-rococo contracts-rococo coretime-rococo people-rococo rococo rococo-parachain substrate-test bp cumulus-test kitchensink minimal-template parachain-template penpal polkadot-test seedling shell frame-try sp solochain-template polkadot-sdk-docs-first" build_opts: "--features on-chain-release-build" - profile: production - permissions: - id-token: write - attestations: write - contents: read + + build-binaries: + runs-on: ubuntu-latest + strategy: + matrix: + # Tuples of [package, binary-name] + binary: [ [frame-omni-bencher, frame-omni-bencher], [staging-chain-spec-builder, chain-spec-builder], [polkadot-omni-node, polkadot-omni-node] ] + steps: + - name: Checkout sources + uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.0.0 + + - name: Install protobuf-compiler + run: | + sudo apt update + sudo apt install -y protobuf-compiler + + - name: Build ${{ matrix.binary[1] }} binary + run: | + cargo build --locked --profile=production -p ${{ matrix.binary[0] }} --bin ${{ matrix.binary[1] }} + target/production/${{ matrix.binary[1] }} --version + + - name: Upload ${{ matrix.binary[1] }} binary + uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 + with: + name: ${{ matrix.binary[1] }} + path: target/production/${{ matrix.binary[1] }} + publish-release-draft: runs-on: ubuntu-latest - environment: release - needs: [ validate-inputs, get-rust-versions, build-runtimes ] + needs: [ get-rust-versions, build-runtimes ] outputs: release_url: ${{ steps.create-release.outputs.html_url }} asset_upload_url: ${{ steps.create-release.outputs.upload_url }} - steps: - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.0.0 - name: Download artifacts uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 - name: Prepare tooling run: | - URL=https://github.com/chevdor/tera-cli/releases/download/v0.4.0/tera-cli_linux_amd64.deb + URL=https://github.com/chevdor/tera-cli/releases/download/v0.2.4/tera-cli_linux_amd64.deb wget $URL -O tera.deb sudo dpkg -i tera.deb @@ -95,21 +87,20 @@ jobs: GLUTTON_WESTEND_DIGEST: ${{ github.workspace}}/glutton-westend-runtime/glutton-westend-srtool-digest.json PEOPLE_WESTEND_DIGEST: ${{ github.workspace}}/people-westend-runtime/people-westend-srtool-digest.json WESTEND_DIGEST: ${{ github.workspace}}/westend-runtime/westend-srtool-digest.json - RELEASE_TAG: ${{ needs.validate-inputs.outputs.release_tag }} shell: bash run: | . ./.github/scripts/common/lib.sh export REF1=$(get_latest_release_tag) - if [[ -z "$RELEASE_TAG" ]]; then + if [[ -z "${{ inputs.version }}" ]]; then export REF2="${{ github.ref_name }}" echo "REF2: ${REF2}" else - export REF2="$RELEASE_TAG" + export REF2="${{ inputs.version }}" echo "REF2: ${REF2}" fi echo "REL_TAG=$REF2" >> $GITHUB_ENV - export VERSION=$(echo "$REF2" | sed -E 's/.*(stable[0-9]{4}(-[0-9]+)?).*$/\1/') + export VERSION=$(echo "$REF2" | sed -E 's/.*(stable[0-9]+).*$/\1/') ./scripts/release/build-changelogs.sh @@ -121,29 +112,19 @@ jobs: scripts/release/context.json **/*-srtool-digest.json - - name: Generate content write token for the release automation - id: generate_write_token - uses: actions/create-github-app-token@v1 - with: - app-id: ${{ vars.POLKADOT_SDK_RELEASE_RW_APP_ID }} - private-key: ${{ secrets.POLKADOT_SDK_RELEASE_RW_APP_KEY }} - owner: paritytech - repositories: polkadot-sdk - - name: Create draft release id: create-release + uses: actions/create-release@0cb9c9b65d5d1901c1f53e5e66eaf4afd303e70e # v1.1.4 env: - GITHUB_TOKEN: ${{ steps.generate_write_token.outputs.token }} - run: | - gh release create ${{ env.REL_TAG }} \ - --repo paritytech/polkadot-sdk \ - --draft \ - --title "Polkadot ${{ env.REL_TAG }}" \ - --notes-file ${{ github.workspace}}/scripts/release/RELEASE_DRAFT.md + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + tag_name: ${{ env.REL_TAG }} + release_name: Polkadot ${{ env.REL_TAG }} + body_path: ${{ github.workspace}}/scripts/release/RELEASE_DRAFT.md + draft: true publish-runtimes: - needs: [ validate-inputs, build-runtimes, publish-release-draft ] - environment: release + needs: [ build-runtimes, publish-release-draft ] continue-on-error: true runs-on: ubuntu-latest strategy: @@ -151,7 +132,7 @@ jobs: steps: - name: Checkout sources - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.0.0 - name: Download artifacts uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 @@ -163,83 +144,44 @@ jobs: >>$GITHUB_ENV echo ASSET=$(find ${{ matrix.chain }}-runtime -name '*.compact.compressed.wasm') >>$GITHUB_ENV echo SPEC=$(<${JSON} jq -r .runtimes.compact.subwasm.core_version.specVersion) - - name: Generate content write token for the release automation - id: generate_write_token - uses: actions/create-github-app-token@v1 - with: - app-id: ${{ vars.POLKADOT_SDK_RELEASE_RW_APP_ID }} - private-key: ${{ secrets.POLKADOT_SDK_RELEASE_RW_APP_KEY }} - owner: paritytech - repositories: polkadot-sdk - - name: Upload compressed ${{ matrix.chain }} v${{ env.SPEC }} wasm + if: ${{ matrix.chain != 'rococo-parachain' }} + uses: actions/upload-release-asset@e8f9f06c4b078e705bd2ea027f0926603fc9b4d5 #v1.0.2 env: - GITHUB_TOKEN: ${{ steps.generate_write_token.outputs.token }} - run: | - gh release upload ${{ needs.validate-inputs.outputs.release_tag }} \ - --repo paritytech/polkadot-sdk \ - '${{ env.ASSET }}#${{ matrix.chain }}_runtime-v${{ env.SPEC }}.compact.compressed.wasm' + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + upload_url: ${{ needs.publish-release-draft.outputs.asset_upload_url }} + asset_path: ${{ env.ASSET }} + asset_name: ${{ matrix.chain }}_runtime-v${{ env.SPEC }}.compact.compressed.wasm + asset_content_type: application/wasm - publish-release-artifacts: - needs: [ validate-inputs, publish-release-draft ] - environment: release + publish-binaries: + needs: [ publish-release-draft, build-binaries ] continue-on-error: true runs-on: ubuntu-latest strategy: matrix: - binary: [ polkadot, polkadot-execute-worker, polkadot-prepare-worker, polkadot-parachain, polkadot-omni-node, frame-omni-bencher, chain-spec-builder ] - target: [ x86_64-unknown-linux-gnu, aarch64-apple-darwin ] + binary: [frame-omni-bencher, chain-spec-builder, polkadot-omni-node] steps: - - name: Checkout sources - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - - name: Fetch binaries from s3 based on version - run: | - . ./.github/scripts/common/lib.sh - - VERSION="${{ needs.validate-inputs.outputs.release_tag }}" - fetch_release_artifacts_from_s3 ${{ matrix.binary }} ${{ matrix.target }} - - - name: Rename aarch64-apple-darwin binaries - if: ${{ matrix.target == 'aarch64-apple-darwin' }} - working-directory: ${{ github.workspace}}/release-artifacts/${{ matrix.target }}/${{ matrix.binary }} - run: | - mv ${{ matrix.binary }} ${{ matrix.binary }}-aarch64-apple-darwin - mv ${{ matrix.binary }}.asc ${{ matrix.binary }}-aarch64-apple-darwin.asc - mv ${{ matrix.binary }}.sha256 ${{ matrix.binary }}-aarch64-apple-darwin.sha256 - - - name: Generate content write token for the release automation - id: generate_write_token - uses: actions/create-github-app-token@v1 + - name: Download artifacts + uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 with: - app-id: ${{ vars.POLKADOT_SDK_RELEASE_RW_APP_ID }} - private-key: ${{ secrets.POLKADOT_SDK_RELEASE_RW_APP_KEY }} - owner: paritytech - repositories: polkadot-sdk + name: ${{ matrix.binary }} - - name: Upload ${{ matrix.binary }} binary to release draft + - name: Upload ${{ matrix.binary }} binary + uses: actions/upload-release-asset@e8f9f06c4b078e705bd2ea027f0926603fc9b4d5 #v1.0.2 env: - GITHUB_TOKEN: ${{ steps.generate_write_token.outputs.token }} - working-directory: ${{ github.workspace}}/release-artifacts/${{ matrix.target }}/${{ matrix.binary }} - run: | - if [[ ${{ matrix.target }} == "aarch64-apple-darwin" ]]; then - gh release upload ${{ needs.validate-inputs.outputs.release_tag }} \ - --repo paritytech/polkadot-sdk \ - ${{ matrix.binary }}-aarch64-apple-darwin \ - ${{ matrix.binary }}-aarch64-apple-darwin.asc \ - ${{ matrix.binary }}-aarch64-apple-darwin.sha256 - else - gh release upload ${{ needs.validate-inputs.outputs.release_tag }} \ - --repo paritytech/polkadot-sdk \ - ${{ matrix.binary }} \ - ${{ matrix.binary }}.asc \ - ${{ matrix.binary }}.sha256 - fi + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + upload_url: ${{ needs.publish-release-draft.outputs.asset_upload_url }} + asset_path: ${{ github.workspace}}/${{ matrix.binary }} + asset_name: ${{ matrix.binary }} + asset_content_type: application/octet-stream post_to_matrix: runs-on: ubuntu-latest - needs: [ validate-inputs, publish-release-draft ] + needs: publish-release-draft environment: release strategy: matrix: @@ -255,5 +197,5 @@ jobs: access_token: ${{ secrets.RELEASENOTES_MATRIX_V2_ACCESS_TOKEN }} server: m.parity.io message: | - **New version of polkadot tagged**: ${{ needs.validate-inputs.outputs.release_tag }}
- And release draft is release created in [polkadot-sdk repo](https://github.com/paritytech/polkadot-sdk/releases) + **New version of polkadot tagged**: ${{ github.ref_name }}
+ Draft release created: ${{ needs.publish-release-draft.outputs.release_url }} diff --git a/.github/workflows/release-31_promote-rc-to-final.yml b/.github/workflows/release-31_promote-rc-to-final.yml deleted file mode 100644 index 6aa9d4bddd1d..000000000000 --- a/.github/workflows/release-31_promote-rc-to-final.yml +++ /dev/null @@ -1,125 +0,0 @@ -name: Release - Promote RC to final candidate on S3 - -on: - workflow_dispatch: - inputs: - binary: - description: Binary to be build for the release - default: all - type: choice - options: - - polkadot - - polkadot-parachain - - polkadot-omni-node - - frame-omni-bencher - - chain-spec-builder - - all - release_tag: - description: Tag matching the actual release candidate with the format polkadot-stableYYMM(-X)-rcX - type: string - - -jobs: - - check-synchronization: - uses: paritytech-release/sync-workflows/.github/workflows/check-syncronization.yml@main - - validate-inputs: - needs: [ check-synchronization ] - if: ${{ needs.check-synchronization.outputs.checks_passed }} == 'true' - runs-on: ubuntu-latest - outputs: - release_tag: ${{ steps.validate_inputs.outputs.release_tag }} - final_tag: ${{ steps.validate_inputs.outputs.final_tag }} - - steps: - - name: Checkout sources - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - - name: Validate inputs - id: validate_inputs - run: | - . ./.github/scripts/common/lib.sh - - RELEASE_TAG=$(validate_stable_tag ${{ inputs.release_tag }}) - echo "release_tag=${RELEASE_TAG}" >> $GITHUB_OUTPUT - - promote-polkadot-rc-to-final: - if: ${{ inputs.binary == 'polkadot' || inputs.binary == 'all' }} - needs: [ validate-inputs ] - uses: ./.github/workflows/release-reusable-promote-to-final.yml - strategy: - matrix: - target: [ x86_64-unknown-linux-gnu, aarch64-apple-darwin ] - with: - package: polkadot - release_tag: ${{ needs.validate-inputs.outputs.release_tag }} - target: ${{ matrix.target }} - secrets: - AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} - AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} - AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} - - promote-polkadot-parachain-rc-to-final: - if: ${{ inputs.binary == 'polkadot-parachain' || inputs.binary == 'all' }} - needs: [ validate-inputs ] - uses: ./.github/workflows/release-reusable-promote-to-final.yml - strategy: - matrix: - target: [ x86_64-unknown-linux-gnu, aarch64-apple-darwin ] - with: - package: polkadot-parachain - release_tag: ${{ needs.validate-inputs.outputs.release_tag }} - target: ${{ matrix.target }} - secrets: - AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} - AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} - AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} - - promote-polkadot-omni-node-rc-to-final: - if: ${{ inputs.binary == 'polkadot-omni-node' || inputs.binary == 'all' }} - needs: [ validate-inputs ] - uses: ./.github/workflows/release-reusable-promote-to-final.yml - strategy: - matrix: - target: [ x86_64-unknown-linux-gnu, aarch64-apple-darwin ] - with: - package: polkadot-omni-node - release_tag: ${{ needs.validate-inputs.outputs.release_tag }} - target: ${{ matrix.target }} - secrets: - AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} - AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} - AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} - - promote-frame-omni-bencher-rc-to-final: - if: ${{ inputs.binary == 'frame-omni-bencher' || inputs.binary == 'all' }} - needs: [ validate-inputs ] - uses: ./.github/workflows/release-reusable-promote-to-final.yml - strategy: - matrix: - target: [ x86_64-unknown-linux-gnu, aarch64-apple-darwin ] - with: - package: frame-omni-bencher - release_tag: ${{ needs.validate-inputs.outputs.release_tag }} - target: ${{ matrix.target }} - secrets: - AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} - AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} - AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} - - promote-chain-spec-builder-rc-to-final: - if: ${{ inputs.binary == 'chain-spec-builder' || inputs.binary == 'all' }} - needs: [ validate-inputs ] - uses: ./.github/workflows/release-reusable-promote-to-final.yml - strategy: - matrix: - target: [ x86_64-unknown-linux-gnu, aarch64-apple-darwin ] - with: - package: chain-spec-builder - release_tag: ${{ needs.validate-inputs.outputs.release_tag }} - target: ${{ matrix.target }} - secrets: - AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} - AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} - AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} diff --git a/.github/workflows/release-40_publish-deb-package.yml b/.github/workflows/release-40_publish-deb-package.yml deleted file mode 100644 index 3c5411ab16f0..000000000000 --- a/.github/workflows/release-40_publish-deb-package.yml +++ /dev/null @@ -1,152 +0,0 @@ -name: Release - Publish polakdot deb package - -on: - workflow_dispatch: - inputs: - tag: - description: Current final release tag in the format polakdot-stableYYMM or polkadot-stable-YYMM-X - default: polkadot-stable2412 - required: true - type: string - - distribution: - description: Distribution where to publish deb package (release, staging, stable2407, etc) - default: staging - required: true - type: string - -jobs: - check-synchronization: - uses: paritytech-release/sync-workflows/.github/workflows/check-syncronization.yml@main - - validate-inputs: - needs: [check-synchronization] - if: ${{ needs.check-synchronization.outputs.checks_passed }} == 'true' - runs-on: ubuntu-latest - outputs: - release_tag: ${{ steps.validate_inputs.outputs.release_tag }} - - steps: - - name: Checkout sources - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - - - name: Validate inputs - id: validate_inputs - run: | - . ./.github/scripts/common/lib.sh - - RELEASE_TAG=$(validate_stable_tag ${{ inputs.tag }}) - echo "release_tag=${RELEASE_TAG}" >> $GITHUB_OUTPUT - - - fetch-artifacts-from-s3: - runs-on: ubuntu-latest - needs: [validate-inputs] - env: - REPO: ${{ github.repository }} - RELEASE_TAG: ${{ needs.validate-inputs.outputs.release_tag }} - outputs: - VERSION: ${{ steps.fetch_artifacts_from_s3.outputs.VERSION }} - - steps: - - name: Checkout sources - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - - - name: Fetch rc artifacts or release artifacts from s3 based on version - id: fetch_artifacts_from_s3 - run: | - . ./.github/scripts/common/lib.sh - - VERSION="$(get_polkadot_node_version_from_code)" - echo "VERSION=${VERSION}" >> $GITHUB_OUTPUT - - fetch_debian_package_from_s3 polkadot - - - name: Upload artifacts - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 - with: - name: release-artifacts - path: release-artifacts/polkadot/*.deb - - publish-deb-package: - runs-on: ubuntu-latest - needs: [fetch-artifacts-from-s3] - environment: release - env: - AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} - AWS_DEB_PATH: "s3://releases-package-repos/deb" - LOCAL_DEB_REPO_PATH: ${{ github.workspace }}/deb - VERSION: ${{ needs.fetch-artifacts-from-s3.outputs.VERSION }} - - steps: - - name: Install pgpkkms - run: | - # Install pgpkms that is used to sign built artifacts - python3 -m pip install "pgpkms @ git+https://github.com/paritytech-release/pgpkms.git@1f8555426662ac93a3849480a35449f683b1c89f" - echo "PGPKMS_REPREPRO_PATH=$(which pgpkms-reprepro)" >> $GITHUB_ENV - - - name: Install awscli - run: | - python3 -m pip install awscli - which aws - - - name: Checkout sources - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - - - name: Import gpg keys - shell: bash - run: | - . ./.github/scripts/common/lib.sh - - import_gpg_keys - - - name: Download artifacts - uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 - with: - name: release-artifacts - path: release-artifacts - - - name: Setup local deb repo - run: | - sudo apt-get install -y reprepro - which reprepro - - sed -i "s|^SignWith:.*|SignWith: ! ${PGPKMS_REPREPRO_PATH}|" ${{ github.workspace }}/.github/scripts/release/distributions - - mkdir -p ${{ github.workspace }}/deb/conf - cp ${{ github.workspace }}/.github/scripts/release/distributions ${{ github.workspace }}/deb/conf/distributions - cat ${{ github.workspace }}/deb/conf/distributions - - - name: Sync local deb repo - env: - AWS_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} - AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} - run: | - # Download the current state of the deb repo - aws s3 sync "$AWS_DEB_PATH/db" "$LOCAL_DEB_REPO_PATH/db" - aws s3 sync "$AWS_DEB_PATH/pool" "$LOCAL_DEB_REPO_PATH/pool" - aws s3 sync "$AWS_DEB_PATH/dists" "$LOCAL_DEB_REPO_PATH/dists" - - - name: Add deb package to local repo - env: - PGP_KMS_KEY: ${{ secrets.PGP_KMS_KEY }} - PGP_KMS_HASH: ${{ secrets.PGP_KMS_HASH }} - AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} - AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} - run: | - # Add the new deb to the repo - reprepro -b "$LOCAL_DEB_REPO_PATH" includedeb "${{ inputs.distribution }}" "release-artifacts/polkadot_${VERSION}_amd64.deb" - - - name: Upload updated deb repo - env: - AWS_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} - AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} - run: | - # Upload the updated repo - dists and pool should be publicly readable - aws s3 sync "$LOCAL_DEB_REPO_PATH/pool" "$AWS_DEB_PATH/pool" --acl public-read - aws s3 sync "$LOCAL_DEB_REPO_PATH/dists" "$AWS_DEB_PATH/dists" --acl public-read - aws s3 sync "$LOCAL_DEB_REPO_PATH/db" "$AWS_DEB_PATH/db" - aws s3 sync "$LOCAL_DEB_REPO_PATH/conf" "$AWS_DEB_PATH/conf" - - # Invalidate caches to make sure latest files are served - aws cloudfront create-invalidation --distribution-id E36FKEYWDXAZYJ --paths '/deb/*' diff --git a/.github/workflows/release-50_publish-docker.yml b/.github/workflows/release-50_publish-docker.yml index a3c49598d6b1..627e53bacd88 100644 --- a/.github/workflows/release-50_publish-docker.yml +++ b/.github/workflows/release-50_publish-docker.yml @@ -4,6 +4,10 @@ name: Release - Publish Docker Image # It builds and published releases and rc candidates. on: + #TODO: activate automated run later + # release: + # types: + # - published workflow_dispatch: inputs: image_type: @@ -26,6 +30,16 @@ on: - polkadot-parachain - chain-spec-builder + release_id: + description: | + Release ID. + You can find it using the command: + curl -s \ + -H "Authorization: Bearer ${GITHUB_TOKEN}" https://api.github.com/repos/$OWNER/$REPO/releases | \ + jq '.[] | { name: .name, id: .id }' + required: true + type: number + registry: description: Container registry required: true @@ -41,12 +55,12 @@ on: default: parity version: - description: Version of the polkadot node release in format v1.16.0 or v1.16.0-rc1 + description: version to build/release default: v0.9.18 required: true stable_tag: - description: Tag matching the actual stable release version in the format polkadpt-stableYYMM(-rcX) or plkadot-stableYYMM-X(-rcX) for patch releases + description: Tag matching the actual stable release version in the format stableYYMM or stableYYMM-X for patch releases required: true permissions: @@ -64,15 +78,11 @@ env: IMAGE_TYPE: ${{ inputs.image_type }} jobs: - check-synchronization: - uses: paritytech-release/sync-workflows/.github/workflows/check-syncronization.yml@main - validate-inputs: - needs: [check-synchronization] - if: ${{ needs.check-synchronization.outputs.checks_passed }} == 'true' runs-on: ubuntu-latest outputs: version: ${{ steps.validate_inputs.outputs.VERSION }} + release_id: ${{ steps.validate_inputs.outputs.RELEASE_ID }} stable_tag: ${{ steps.validate_inputs.outputs.stable_tag }} steps: @@ -87,6 +97,11 @@ jobs: VERSION=$(filter_version_from_input "${{ inputs.version }}") echo "VERSION=${VERSION}" >> $GITHUB_OUTPUT + RELEASE_ID=$(check_release_id "${{ inputs.release_id }}") + echo "RELEASE_ID=${RELEASE_ID}" >> $GITHUB_OUTPUT + + echo "Release ID: $RELEASE_ID" + STABLE_TAG=$(validate_stable_tag ${{ inputs.stable_tag }}) echo "stable_tag=${STABLE_TAG}" >> $GITHUB_OUTPUT @@ -99,26 +114,50 @@ jobs: - name: Checkout sources uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 + #TODO: this step will be needed when automated triggering will work + #this step runs only if the workflow is triggered automatically when new release is published + # if: ${{ env.EVENT_NAME == 'release' && env.EVENT_ACTION != '' && env.EVENT_ACTION == 'published' }} + # run: | + # mkdir -p release-artifacts && cd release-artifacts + + # for f in $BINARY $BINARY.asc $BINARY.sha256; do + # URL="https://github.com/${{ github.event.repository.full_name }}/releases/download/${{ github.event.release.tag_name }}/$f" + # echo " - Fetching $f from $URL" + # wget "$URL" -O "$f" + # done + # chmod a+x $BINARY + # ls -al + - name: Fetch rc artifacts or release artifacts from s3 based on version - # if: ${{ env.EVENT_NAME == 'workflow_dispatch' && inputs.binary != 'polkadot-omni-node' && inputs.binary != 'chain-spec-builder'}} + #this step runs only if the workflow is triggered manually + if: ${{ env.EVENT_NAME == 'workflow_dispatch' && inputs.binary != 'polkadot-omni-node' && inputs.binary != 'chain-spec-builder'}} run: | . ./.github/scripts/common/lib.sh - VERSION="${{ needs.validate-inputs.outputs.stable_tag }}" + VERSION="${{ needs.validate-inputs.outputs.VERSION }}" if [[ ${{ inputs.binary }} == 'polkadot' ]]; then bins=(polkadot polkadot-prepare-worker polkadot-execute-worker) for bin in "${bins[@]}"; do - fetch_release_artifacts_from_s3 $bin x86_64-unknown-linux-gnu + fetch_release_artifacts_from_s3 $bin done else - fetch_release_artifacts_from_s3 $BINARY x86_64-unknown-linux-gnu + fetch_release_artifacts_from_s3 $BINARY fi + - name: Fetch polkadot-omni-node/chain-spec-builder rc artifacts or release artifacts based on release id + #this step runs only if the workflow is triggered manually and only for chain-spec-builder + if: ${{ env.EVENT_NAME == 'workflow_dispatch' && (inputs.binary == 'polkadot-omni-node' || inputs.binary == 'chain-spec-builder') }} + run: | + . ./.github/scripts/common/lib.sh + + RELEASE_ID="${{ needs.validate-inputs.outputs.RELEASE_ID }}" + fetch_release_artifacts + - name: Upload artifacts uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 with: name: release-artifacts - path: release-artifacts/x86_64-unknown-linux-gnu/${{ env.BINARY }}/**/* + path: release-artifacts/${{ env.BINARY }}/**/* build-container: # this job will be triggered for the polkadot-parachain rc and release or polkadot rc image build if: ${{ inputs.binary == 'polkadot-omni-node' || inputs.binary == 'polkadot-parachain' || inputs.binary == 'chain-spec-builder' || inputs.image_type == 'rc' }} @@ -134,7 +173,7 @@ jobs: uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 - name: Check sha256 ${{ env.BINARY }} - # if: ${{ inputs.binary == 'polkadot-parachain' || inputs.binary == 'polkadot' }} + if: ${{ inputs.binary == 'polkadot-parachain' || inputs.binary == 'polkadot' }} working-directory: release-artifacts run: | . ../.github/scripts/common/lib.sh @@ -143,7 +182,7 @@ jobs: check_sha256 $BINARY && echo "OK" || echo "ERR" - name: Check GPG ${{ env.BINARY }} - # if: ${{ inputs.binary == 'polkadot-parachain' || inputs.binary == 'polkadot' }} + if: ${{ inputs.binary == 'polkadot-parachain' || inputs.binary == 'polkadot' }} working-directory: release-artifacts run: | . ../.github/scripts/common/lib.sh @@ -151,29 +190,35 @@ jobs: check_gpg $BINARY - name: Fetch rc commit and tag - working-directory: release-artifacts if: ${{ env.IMAGE_TYPE == 'rc' }} id: fetch_rc_refs - shell: bash run: | - . ../.github/scripts/common/lib.sh + . ./.github/scripts/common/lib.sh + + echo "release=${{ needs.validate-inputs.outputs.stable_tag }}" >> $GITHUB_OUTPUT commit=$(git rev-parse --short HEAD) && \ echo "commit=${commit}" >> $GITHUB_OUTPUT - echo "release=$(echo ${{ needs.validate-inputs.outputs.version }})" >> $GITHUB_OUTPUT - echo "tag=$(prepare_docker_stable_tag ${{ needs.validate-inputs.outputs.stable_tag }})" >> $GITHUB_OUTPUT + + echo "tag=${{ needs.validate-inputs.outputs.version }}" >> $GITHUB_OUTPUT - name: Fetch release tags working-directory: release-artifacts if: ${{ env.IMAGE_TYPE == 'release'}} id: fetch_release_refs - shell: bash run: | - . ../.github/scripts/common/lib.sh + chmod a+rx $BINARY + + if [[ $BINARY != 'chain-spec-builder' ]]; then + VERSION=$(./$BINARY --version | awk '{ print $2 }' ) + release=$( echo $VERSION | cut -f1 -d- ) + else + release=$(echo ${{ needs.validate-inputs.outputs.VERSION }} | sed 's/^v//') + fi echo "tag=latest" >> $GITHUB_OUTPUT - echo "release=$(echo ${{ needs.validate-inputs.outputs.version }})" >> $GITHUB_OUTPUT - echo "stable=$(prepare_docker_stable_tag ${{ needs.validate-inputs.outputs.stable_tag }})" >> $GITHUB_OUTPUT + echo "release=${release}" >> $GITHUB_OUTPUT + echo "stable=${{ needs.validate-inputs.outputs.stable_tag }}" >> $GITHUB_OUTPUT - name: Build Injected Container image for polkadot rc if: ${{ env.BINARY == 'polkadot' }} @@ -297,10 +342,8 @@ jobs: - name: Fetch values id: fetch-data run: | - . ./.github/scripts/common/lib.sh date=$(date -u '+%Y-%m-%dT%H:%M:%SZ') echo "date=$date" >> $GITHUB_OUTPUT - echo "stable=$(prepare_docker_stable_tag ${{ needs.validate-inputs.outputs.stable_tag }})" >> $GITHUB_OUTPUT - name: Build and push id: docker_build @@ -311,7 +354,7 @@ jobs: # TODO: The owner should be used below but buildx does not resolve the VARs # TODO: It would be good to get rid of this GHA that we don't really need. tags: | - parity/polkadot:${{ steps.fetch-data.outputs.stable }} + parity/polkadot:${{ needs.validate-inputs.outputs.stable_tag }} parity/polkadot:latest parity/polkadot:${{ needs.fetch-latest-debian-package-version.outputs.polkadot_container_tag }} build-args: | diff --git a/.github/workflows/release-10_branchoff-stable.yml b/.github/workflows/release-branchoff-stable.yml similarity index 100% rename from .github/workflows/release-10_branchoff-stable.yml rename to .github/workflows/release-branchoff-stable.yml diff --git a/.github/workflows/release-build-rc.yml b/.github/workflows/release-build-rc.yml new file mode 100644 index 000000000000..94bacf320898 --- /dev/null +++ b/.github/workflows/release-build-rc.yml @@ -0,0 +1,82 @@ +name: Release - Build node release candidate + +on: + workflow_dispatch: + inputs: + binary: + description: Binary to be build for the release + default: all + type: choice + options: + - polkadot + - polkadot-parachain + - all + + release_tag: + description: Tag matching the actual release candidate with the format stableYYMM-rcX or stableYYMM + type: string + +jobs: + check-synchronization: + uses: paritytech-release/sync-workflows/.github/workflows/check-syncronization.yml@main + + validate-inputs: + needs: [check-synchronization] + if: ${{ needs.check-synchronization.outputs.checks_passed }} == 'true' + runs-on: ubuntu-latest + outputs: + release_tag: ${{ steps.validate_inputs.outputs.release_tag }} + + steps: + - name: Checkout sources + uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 + + - name: Validate inputs + id: validate_inputs + run: | + . ./.github/scripts/common/lib.sh + + RELEASE_TAG=$(validate_stable_tag ${{ inputs.release_tag }}) + echo "release_tag=${RELEASE_TAG}" >> $GITHUB_OUTPUT + + build-polkadot-binary: + needs: [validate-inputs] + if: ${{ inputs.binary == 'polkadot' || inputs.binary == 'all' }} + uses: "./.github/workflows/release-reusable-rc-buid.yml" + with: + binary: '["polkadot", "polkadot-prepare-worker", "polkadot-execute-worker"]' + package: polkadot + release_tag: ${{ needs.validate-inputs.outputs.release_tag }} + secrets: + PGP_KMS_KEY: ${{ secrets.PGP_KMS_KEY }} + PGP_KMS_HASH: ${{ secrets.PGP_KMS_HASH }} + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + permissions: + id-token: write + attestations: write + contents: read + + build-polkadot-parachain-binary: + needs: [validate-inputs] + if: ${{ inputs.binary == 'polkadot-parachain' || inputs.binary == 'all' }} + uses: "./.github/workflows/release-reusable-rc-buid.yml" + with: + binary: '["polkadot-parachain"]' + package: "polkadot-parachain-bin" + release_tag: ${{ needs.validate-inputs.outputs.release_tag }} + secrets: + PGP_KMS_KEY: ${{ secrets.PGP_KMS_KEY }} + PGP_KMS_HASH: ${{ secrets.PGP_KMS_HASH }} + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + permissions: + id-token: write + attestations: write + contents: read diff --git a/.github/workflows/release-reusable-promote-to-final.yml b/.github/workflows/release-reusable-promote-to-final.yml deleted file mode 100644 index ed4a80a01e82..000000000000 --- a/.github/workflows/release-reusable-promote-to-final.yml +++ /dev/null @@ -1,83 +0,0 @@ -name: Promote rc to final - -on: - workflow_call: - inputs: - package: - description: Package to be promoted - required: true - type: string - - release_tag: - description: Tag matching the actual release candidate with the format polkadot-stableYYMM(-X)-rcX taht will be changed to final in form of polkadot-stableYYMM(-X) - required: true - type: string - - target: - description: Target triple for which the artifacts are being uploaded (e.g aarch64-apple-darwin) - required: true - type: string - - secrets: - AWS_DEFAULT_REGION: - required: true - AWS_RELEASE_ACCESS_KEY_ID: - required: true - AWS_RELEASE_SECRET_ACCESS_KEY: - required: true - -jobs: - - promote-release-artifacts: - environment: release - runs-on: ubuntu-latest - env: - AWS_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} - AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} - AWS_REGION: ${{ secrets.AWS_DEFAULT_REGION }} - - steps: - - name: Checkout sources - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - - name: Prepare final tag - id: prepare_final_tag - shell: bash - run: | - tag="$(echo ${{ inputs.release_tag }} | sed 's/-rc[0-9]*$//')" - echo $tag - echo "FINAL_TAG=${tag}" >> $GITHUB_OUTPUT - - - name: Fetch binaries from s3 based on version - run: | - . ./.github/scripts/common/lib.sh - - VERSION="${{ inputs.release_tag }}" - if [[ ${{ inputs.package }} == 'polkadot' ]]; then - packages=(polkadot polkadot-prepare-worker polkadot-execute-worker) - for package in "${packages[@]}"; do - fetch_release_artifacts_from_s3 $package ${{ inputs.target }} - done - else - fetch_release_artifacts_from_s3 ${{ inputs.package }} ${{ inputs.target }} - fi - - - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # v4.0.2 - with: - aws-access-key-id: ${{ env.AWS_ACCESS_KEY_ID }} - aws-secret-access-key: ${{ env.AWS_SECRET_ACCESS_KEY }} - aws-region: ${{ env.AWS_REGION }} - - - name: Upload ${{ inputs.package }} ${{ inputs.target }} artifacts to s3 - run: | - . ./.github/scripts/release/release_lib.sh - - if [[ ${{ inputs.package }} == 'polkadot' ]]; then - packages=(polkadot polkadot-prepare-worker polkadot-execute-worker) - for package in "${packages[@]}"; do - upload_s3_release $package ${{ steps.prepare_final_tag.outputs.final_tag }} ${{ inputs.target }} - done - else - upload_s3_release ${{ inputs.package }} ${{ steps.prepare_final_tag.outputs.final_tag }} ${{ inputs.target }} - fi diff --git a/.github/workflows/release-reusable-rc-buid.yml b/.github/workflows/release-reusable-rc-buid.yml index 0222b2aa91e2..d925839fb84a 100644 --- a/.github/workflows/release-reusable-rc-buid.yml +++ b/.github/workflows/release-reusable-rc-buid.yml @@ -10,7 +10,7 @@ on: type: string package: - description: Package to be built, for now can be polkadot, polkadot-parachain-bin, or polkadot-omni-node + description: Package to be built, for now is either polkadot or polkadot-parachain-bin required: true type: string @@ -19,11 +19,6 @@ on: required: true type: string - target: - description: Target triple for which the artifacts are being built (e.g. x86_64-unknown-linux-gnu) - required: true - type: string - secrets: PGP_KMS_KEY: required: true @@ -62,7 +57,6 @@ jobs: run: cat .github/env >> $GITHUB_OUTPUT build-rc: - if: ${{ inputs.target == 'x86_64-unknown-linux-gnu' }} needs: [set-image] runs-on: ubuntu-latest-m environment: release @@ -104,7 +98,7 @@ jobs: ./.github/scripts/release/build-linux-release.sh ${{ matrix.binaries }} ${{ inputs.package }} - name: Generate artifact attestation - uses: actions/attest-build-provenance@ef244123eb79f2f7a7e75d99086184180e6d0018 # v1.4.4 + uses: actions/attest-build-provenance@1c608d11d69870c2092266b3f9a6f3abbf17002c # v1.4.3 with: subject-path: /artifacts/${{ matrix.binaries }}/${{ matrix.binaries }} @@ -133,127 +127,11 @@ jobs: - name: Upload ${{ matrix.binaries }} artifacts uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 with: - name: ${{ matrix.binaries }}_${{ inputs.target }} + name: ${{ matrix.binaries }} path: /artifacts/${{ matrix.binaries }} - build-macos-rc: - if: ${{ inputs.target == 'aarch64-apple-darwin' }} - runs-on: parity-macos - environment: release - strategy: - matrix: - binaries: ${{ fromJSON(inputs.binary) }} - env: - PGP_KMS_KEY: ${{ secrets.PGP_KMS_KEY }} - PGP_KMS_HASH: ${{ secrets.PGP_KMS_HASH }} - AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} - AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} - AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} - SKIP_WASM_BUILD: 1 - steps: - - name: Checkout sources - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - with: - ref: ${{ inputs.release_tag }} - fetch-depth: 0 - - - name: Set rust version from env file - run: | - RUST_VERSION=$(cat .github/env | sed -E 's/.*ci-unified:([^-]+)-([^-]+).*/\2/') - echo $RUST_VERSION - echo "RUST_VERSION=${RUST_VERSION}" >> $GITHUB_ENV - - name: Set workspace environment variable - # relevant for artifacts upload, which can not interpolate Github Action variable syntax when - # used within valid paths. We can not use root-based paths either, since it is set as read-only - # on the `parity-macos` runner. - run: echo "ARTIFACTS_PATH=${GITHUB_WORKSPACE}/artifacts/${{ matrix.binaries }}" >> $GITHUB_ENV - - - name: Set up Homebrew - uses: Homebrew/actions/setup-homebrew@1ccc07ccd54b6048295516a3eb89b192c35057dc # master from 12.09.2024 - - name: Set homebrew binaries location on path - run: echo "/opt/homebrew/bin" >> $GITHUB_PATH - - - name: Install rust ${{ env.RUST_VERSION }} - uses: actions-rust-lang/setup-rust-toolchain@11df97af8e8102fd60b60a77dfbf58d40cd843b8 # v1.10.1 - with: - cache: false - toolchain: ${{ env.RUST_VERSION }} - target: wasm32-unknown-unknown - components: cargo, clippy, rust-docs, rust-src, rustfmt, rustc, rust-std - - - name: cargo info - run: | - echo "######## rustup show ########" - rustup show - echo "######## cargo --version ########" - cargo --version - - - name: Install protobuf - run: brew install protobuf - - name: Install gpg - run: | - brew install gnupg - # Setup for being able to resolve: keyserver.ubuntu.com. - # See: https://github.com/actions/runner-images/issues/9777 - mkdir -p ~/.gnupg/ - touch ~/.gnupg/dirmngr.conf - echo "standard-resolver" > ~/.gnupg/dirmngr.conf - - name: Install sha256sum - run: | - brew install coreutils - - - name: Install pgpkkms - run: | - # Install pgpkms that is used to sign built artifacts - python3 -m pip install "pgpkms @ git+https://github.com/paritytech-release/pgpkms.git@5a8f82fbb607ea102d8c178e761659de54c7af69" --break-system-packages - - - name: Import gpg keys - shell: bash - run: | - . ./.github/scripts/common/lib.sh - - import_gpg_keys - - - name: Build binary - run: | - git config --global --add safe.directory "${GITHUB_WORKSPACE}" #avoid "detected dubious ownership" error - ./.github/scripts/release/build-macos-release.sh ${{ matrix.binaries }} ${{ inputs.package }} - - - name: Generate artifact attestation - uses: actions/attest-build-provenance@ef244123eb79f2f7a7e75d99086184180e6d0018 # v1.4.4 - with: - subject-path: ${{ env.ARTIFACTS_PATH }}/${{ matrix.binaries }} - - - name: Sign artifacts - working-directory: ${{ env.ARTIFACTS_PATH }} - run: | - python3 -m pgpkms sign --input ${{matrix.binaries }} -o ${{ matrix.binaries }}.asc - - - name: Check sha256 ${{ matrix.binaries }} - working-directory: ${{ env.ARTIFACTS_PATH }} - shell: bash - run: | - . "${GITHUB_WORKSPACE}"/.github/scripts/common/lib.sh - - echo "Checking binary ${{ matrix.binaries }}" - check_sha256 ${{ matrix.binaries }} - - - name: Check GPG ${{ matrix.binaries }} - working-directory: ${{ env.ARTIFACTS_PATH }} - shell: bash - run: | - . "${GITHUB_WORKSPACE}"/.github/scripts/common/lib.sh - - check_gpg ${{ matrix.binaries }} - - - name: Upload ${{ matrix.binaries }} artifacts - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 - with: - name: ${{ matrix.binaries }}_${{ inputs.target }} - path: ${{ env.ARTIFACTS_PATH }} - build-polkadot-deb-package: - if: ${{ inputs.package == 'polkadot' && inputs.target == 'x86_64-unknown-linux-gnu' }} + if: ${{ inputs.package == 'polkadot' }} needs: [build-rc] runs-on: ubuntu-latest @@ -278,170 +156,37 @@ jobs: . "${GITHUB_WORKSPACE}"/.github/scripts/release/build-deb.sh ${{ inputs.package }} ${VERSION} - name: Generate artifact attestation - uses: actions/attest-build-provenance@ef244123eb79f2f7a7e75d99086184180e6d0018 # v1.4.4 + uses: actions/attest-build-provenance@1c608d11d69870c2092266b3f9a6f3abbf17002c # v1.4.3 with: subject-path: target/production/*.deb - name: Upload ${{inputs.package }} artifacts uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 with: - name: ${{ inputs.package }}_${{ inputs.target }} + name: ${{ inputs.package }} path: target/production overwrite: true upload-polkadot-artifacts-to-s3: - if: ${{ inputs.package == 'polkadot' && inputs.target == 'x86_64-unknown-linux-gnu' }} + if: ${{ inputs.package == 'polkadot' }} needs: [build-polkadot-deb-package] uses: ./.github/workflows/release-reusable-s3-upload.yml with: package: ${{ inputs.package }} release_tag: ${{ inputs.release_tag }} - target: ${{ inputs.target }} secrets: AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} - upload-polkadot-parachain-artifacts-to-s3: - if: ${{ inputs.package == 'polkadot-parachain-bin' && inputs.target == 'x86_64-unknown-linux-gnu' }} - needs: [build-rc] - uses: ./.github/workflows/release-reusable-s3-upload.yml - with: - package: polkadot-parachain - release_tag: ${{ inputs.release_tag }} - target: ${{ inputs.target }} - secrets: - AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} - AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} - AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} - upload-polkadot-omni-node-artifacts-to-s3: - if: ${{ inputs.package == 'polkadot-omni-node' && inputs.target == 'x86_64-unknown-linux-gnu' }} - needs: [build-rc] - uses: ./.github/workflows/release-reusable-s3-upload.yml - with: - package: ${{ inputs.package }} - release_tag: ${{ inputs.release_tag }} - target: ${{ inputs.target }} - secrets: - AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} - AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} - AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} - - upload-frame-omni-bencher-artifacts-to-s3: - if: ${{ inputs.package == 'frame-omni-bencher' && inputs.target == 'x86_64-unknown-linux-gnu' }} - needs: [build-rc] - uses: ./.github/workflows/release-reusable-s3-upload.yml - with: - package: ${{ inputs.package }} - release_tag: ${{ inputs.release_tag }} - target: ${{ inputs.target }} - secrets: - AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} - AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} - AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} - - upload-chain-spec-builder-artifacts-to-s3: - if: ${{ inputs.package == 'staging-chain-spec-builder' && inputs.target == 'x86_64-unknown-linux-gnu' }} + upload-polkadot-parachain-artifacts-to-s3: + if: ${{ inputs.package == 'polkadot-parachain-bin' }} needs: [build-rc] uses: ./.github/workflows/release-reusable-s3-upload.yml - with: - package: chain-spec-builder - release_tag: ${{ inputs.release_tag }} - target: ${{ inputs.target }} - secrets: - AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} - AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} - AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} - - upload-polkadot-macos-artifacts-to-s3: - if: ${{ inputs.package == 'polkadot' && inputs.target == 'aarch64-apple-darwin' }} - # TODO: add and use a `build-polkadot-homebrew-package` which packs all `polkadot` binaries: - # `polkadot`, `polkadot-prepare-worker` and `polkadot-execute-worker`. - needs: [build-macos-rc] - uses: ./.github/workflows/release-reusable-s3-upload.yml - with: - package: ${{ inputs.package }} - release_tag: ${{ inputs.release_tag }} - target: ${{ inputs.target }} - secrets: - AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} - AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} - AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} - - upload-polkadot-prepare-worker-macos-artifacts-to-s3: - if: ${{ inputs.package == 'polkadot' && inputs.target == 'aarch64-apple-darwin' }} - needs: [build-macos-rc] - uses: ./.github/workflows/release-reusable-s3-upload.yml - with: - package: polkadot-prepare-worker - release_tag: ${{ inputs.release_tag }} - target: ${{ inputs.target }} - secrets: - AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} - AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} - AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} - - upload-polkadot-execute-worker-macos-artifacts-to-s3: - if: ${{ inputs.package == 'polkadot' && inputs.target == 'aarch64-apple-darwin' }} - needs: [build-macos-rc] - uses: ./.github/workflows/release-reusable-s3-upload.yml - with: - package: polkadot-execute-worker - release_tag: ${{ inputs.release_tag }} - target: ${{ inputs.target }} - secrets: - AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} - AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} - AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} - - upload-polkadot-omni-node-macos-artifacts-to-s3: - if: ${{ inputs.package == 'polkadot-omni-node' && inputs.target == 'aarch64-apple-darwin' }} - needs: [build-macos-rc] - uses: ./.github/workflows/release-reusable-s3-upload.yml - with: - package: ${{ inputs.package }} - release_tag: ${{ inputs.release_tag }} - target: ${{ inputs.target }} - secrets: - AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} - AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} - AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} - - upload-polkadot-parachain-macos-artifacts-to-s3: - if: ${{ inputs.package == 'polkadot-parachain-bin' && inputs.target == 'aarch64-apple-darwin' }} - needs: [build-macos-rc] - uses: ./.github/workflows/release-reusable-s3-upload.yml with: package: polkadot-parachain release_tag: ${{ inputs.release_tag }} - target: ${{ inputs.target }} - secrets: - AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} - AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} - AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} - - upload-frame-omni-bencher-macos-artifacts-to-s3: - if: ${{ inputs.package == 'frame-omni-bencher' && inputs.target == 'aarch64-apple-darwin' }} - needs: [build-macos-rc] - uses: ./.github/workflows/release-reusable-s3-upload.yml - with: - package: ${{ inputs.package }} - release_tag: ${{ inputs.release_tag }} - target: ${{ inputs.target }} - secrets: - AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} - AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} - AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} - - upload-chain-spec-builder-macos-artifacts-to-s3: - if: ${{ inputs.package == 'staging-chain-spec-builder' && inputs.target == 'aarch64-apple-darwin' }} - needs: [build-macos-rc] - uses: ./.github/workflows/release-reusable-s3-upload.yml - with: - package: chain-spec-builder - release_tag: ${{ inputs.release_tag }} - target: ${{ inputs.target }} secrets: AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} diff --git a/.github/workflows/release-reusable-s3-upload.yml b/.github/workflows/release-reusable-s3-upload.yml index 48c7e53c6c8f..6776b78da8e6 100644 --- a/.github/workflows/release-reusable-s3-upload.yml +++ b/.github/workflows/release-reusable-s3-upload.yml @@ -9,12 +9,7 @@ on: type: string release_tag: - description: Tag matching the actual release candidate with the format polkadot-stableYYMM(-X)-rcX or polkadot-stableYYMM-rcX - required: true - type: string - - target: - description: Target triple for which the artifacts are being uploaded (e.g aarch64-apple-darwin) + description: Tag matching the actual release candidate with the format stableYYMM-rcX or stableYYMM-rcX required: true type: string @@ -39,11 +34,11 @@ jobs: - name: Checkout uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - - name: Download amd64 artifacts + - name: Download artifacts uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 with: - name: ${{ inputs.package }}_${{ inputs.target }} - path: release-artifacts/${{ inputs.target }}/${{ inputs.package }} + name: ${{ inputs.package }} + path: artifacts/${{ inputs.package }} - name: Configure AWS Credentials uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # v4.0.2 @@ -55,4 +50,4 @@ jobs: - name: Upload ${{ inputs.package }} artifacts to s3 run: | . ./.github/scripts/release/release_lib.sh - upload_s3_release ${{ inputs.package }} ${{ inputs.release_tag }} ${{ inputs.target }} + upload_s3_release ${{ inputs.package }} ${{ inputs.release_tag }} diff --git a/.github/workflows/release-srtool.yml b/.github/workflows/release-srtool.yml index fc10496d481b..9a29b46d2fc3 100644 --- a/.github/workflows/release-srtool.yml +++ b/.github/workflows/release-srtool.yml @@ -1,7 +1,7 @@ name: Srtool build env: - SUBWASM_VERSION: 0.21.0 + SUBWASM_VERSION: 0.20.0 TOML_CLI_VERSION: 0.2.4 on: @@ -11,16 +11,14 @@ on: type: string build_opts: type: string - profile: - type: string outputs: published_runtimes: value: ${{ jobs.find-runtimes.outputs.runtime }} -permissions: - id-token: write - attestations: write - contents: read + schedule: + - cron: "00 02 * * 1" # 2AM weekly on monday + + workflow_dispatch: jobs: find-runtimes: @@ -77,7 +75,6 @@ jobs: with: chain: ${{ matrix.chain }} runtime_dir: ${{ matrix.runtime_dir }} - profile: ${{ inputs.profile }} - name: Summary run: | @@ -86,11 +83,6 @@ jobs: echo "Compact Runtime: ${{ steps.srtool_build.outputs.wasm }}" echo "Compressed Runtime: ${{ steps.srtool_build.outputs.wasm_compressed }}" - - name: Generate artifact attestation - uses: actions/attest-build-provenance@1c608d11d69870c2092266b3f9a6f3abbf17002c # v1.4.3 - with: - subject-path: ${{ steps.srtool_build.outputs.wasm }} - # We now get extra information thanks to subwasm - name: Install subwasm run: | diff --git a/.github/workflows/runtimes-matrix.json b/.github/workflows/runtimes-matrix.json index 104e73521331..f991db55b86d 100644 --- a/.github/workflows/runtimes-matrix.json +++ b/.github/workflows/runtimes-matrix.json @@ -8,8 +8,6 @@ "bench_features": "runtime-benchmarks", "bench_flags": "--genesis-builder-policy=none --exclude-pallets=pallet_xcm,pallet_xcm_benchmarks::fungible,pallet_xcm_benchmarks::generic,pallet_nomination_pools,pallet_remark,pallet_transaction_storage", "uri": null, - "old_package": "staging-node-cli", - "old_bin": "substrate-node", "is_relay": false }, { @@ -21,8 +19,6 @@ "bench_flags": "", "bench_features": "runtime-benchmarks", "uri": "wss://try-runtime-westend.polkadot.io:443", - "old_package": "polkadot", - "old_bin": "polkadot", "is_relay": true }, { @@ -31,11 +27,9 @@ "path": "polkadot/runtime/rococo", "header": "polkadot/file_header.txt", "template": "polkadot/xcm/pallet-xcm-benchmarks/template.hbs", + "uri": "wss://try-runtime-rococo.polkadot.io:443", "bench_features": "runtime-benchmarks", "bench_flags": "", - "uri": "wss://try-runtime-rococo.polkadot.io:443", - "old_package": "polkadot", - "old_bin": "polkadot", "is_relay": true }, { @@ -47,8 +41,6 @@ "bench_features": "runtime-benchmarks", "bench_flags": "", "uri": "wss://westend-asset-hub-rpc.polkadot.io:443", - "old_package": "polkadot-parachain-bin", - "old_bin": "polkadot-parachain", "is_relay": false }, { @@ -60,8 +52,6 @@ "bench_features": "runtime-benchmarks", "bench_flags": "", "uri": "wss://rococo-asset-hub-rpc.polkadot.io:443", - "old_package": "polkadot-parachain-bin", - "old_bin": "polkadot-parachain", "is_relay": false }, { @@ -73,8 +63,6 @@ "bench_features": "runtime-benchmarks", "bench_flags": "", "uri": "wss://rococo-bridge-hub-rpc.polkadot.io:443", - "old_package": "polkadot-parachain-bin", - "old_bin": "polkadot-parachain", "is_relay": false }, { @@ -86,8 +74,6 @@ "bench_features": "runtime-benchmarks", "bench_flags": "", "uri": "wss://westend-bridge-hub-rpc.polkadot.io:443", - "old_package": "polkadot-parachain-bin", - "old_bin": "polkadot-parachain", "is_relay": false }, { @@ -98,10 +84,7 @@ "template": "cumulus/templates/xcm-bench-template.hbs", "bench_features": "runtime-benchmarks", "bench_flags": "", - "uri": "wss://westend-collectives-rpc.polkadot.io:443", - "old_package": "polkadot-parachain-bin", - "old_bin": "polkadot-parachain", - "is_relay": false + "uri": "wss://westend-collectives-rpc.polkadot.io:443" }, { "name": "contracts-rococo", @@ -112,8 +95,6 @@ "bench_features": "runtime-benchmarks", "bench_flags": "--genesis-builder-policy=none --exclude-pallets=pallet_xcm", "uri": "wss://rococo-contracts-rpc.polkadot.io:443", - "old_package": "polkadot-parachain-bin", - "old_bin": "polkadot-parachain", "is_relay": false }, { @@ -125,8 +106,6 @@ "bench_features": "runtime-benchmarks", "bench_flags": "--genesis-builder-policy=none --exclude-pallets=pallet_xcm,pallet_xcm_benchmarks::fungible,pallet_xcm_benchmarks::generic", "uri": "wss://rococo-coretime-rpc.polkadot.io:443", - "old_package": "polkadot-parachain-bin", - "old_bin": "polkadot-parachain", "is_relay": false }, { @@ -138,8 +117,6 @@ "bench_features": "runtime-benchmarks", "bench_flags": "--genesis-builder-policy=none --exclude-pallets=pallet_xcm,pallet_xcm_benchmarks::fungible,pallet_xcm_benchmarks::generic", "uri": "wss://westend-coretime-rpc.polkadot.io:443", - "old_package": "polkadot-parachain-bin", - "old_bin": "polkadot-parachain", "is_relay": false }, { @@ -151,8 +128,6 @@ "bench_features": "runtime-benchmarks", "bench_flags": "--genesis-builder-policy=none", "uri": null, - "old_package": "polkadot-parachain-bin", - "old_bin": "polkadot-parachain", "is_relay": false }, { @@ -164,8 +139,6 @@ "bench_features": "runtime-benchmarks", "bench_flags": "--genesis-builder-policy=none --exclude-pallets=pallet_xcm,pallet_xcm_benchmarks::fungible,pallet_xcm_benchmarks::generic", "uri": "wss://rococo-people-rpc.polkadot.io:443", - "old_package": "polkadot-parachain-bin", - "old_bin": "polkadot-parachain", "is_relay": false }, { @@ -177,8 +150,6 @@ "bench_features": "runtime-benchmarks", "bench_flags": "--genesis-builder-policy=none --exclude-pallets=pallet_xcm,pallet_xcm_benchmarks::fungible,pallet_xcm_benchmarks::generic", "uri": "wss://westend-people-rpc.polkadot.io:443", - "old_package": "polkadot-parachain-bin", - "old_bin": "polkadot-parachain", "is_relay": false } ] diff --git a/.github/workflows/benchmarks-subsystem.yml b/.github/workflows/subsystem-benchmarks.yml similarity index 100% rename from .github/workflows/benchmarks-subsystem.yml rename to .github/workflows/subsystem-benchmarks.yml diff --git a/.github/workflows/tests-linux-stable-coverage.yml b/.github/workflows/tests-linux-stable-coverage.yml index 61e01cda4428..c5af6bcae77f 100644 --- a/.github/workflows/tests-linux-stable-coverage.yml +++ b/.github/workflows/tests-linux-stable-coverage.yml @@ -102,7 +102,7 @@ jobs: merge-multiple: true - run: ls -al reports/ - name: Upload to Codecov - uses: codecov/codecov-action@v5 + uses: codecov/codecov-action@v4 with: token: ${{ secrets.CODECOV_TOKEN }} verbose: true diff --git a/.github/workflows/tests-linux-stable.yml b/.github/workflows/tests-linux-stable.yml index 3f8dc4fe1240..24b96219738a 100644 --- a/.github/workflows/tests-linux-stable.yml +++ b/.github/workflows/tests-linux-stable.yml @@ -34,14 +34,7 @@ jobs: - name: Checkout uses: actions/checkout@v4 - name: script - id: required run: WASM_BUILD_NO_COLOR=1 forklift cargo test -p staging-node-cli --release --locked -- --ignored - - name: Stop all workflows if failed - if: ${{ failure() && steps.required.conclusion == 'failure' && !github.event.pull_request.head.repo.fork }} - uses: ./.github/actions/workflow-stopper - with: - app-id: ${{ secrets.WORKFLOW_STOPPER_RUNNER_APP_ID }} - app-key: ${{ secrets.WORKFLOW_STOPPER_RUNNER_APP_KEY }} # https://github.com/paritytech/ci_cd/issues/864 test-linux-stable-runtime-benchmarks: @@ -60,14 +53,7 @@ jobs: - name: Checkout uses: actions/checkout@v4 - name: script - id: required run: forklift cargo nextest run --workspace --features runtime-benchmarks benchmark --locked --cargo-profile testnet --cargo-quiet - - name: Stop all workflows if failed - if: ${{ failure() && steps.required.conclusion == 'failure' && !github.event.pull_request.head.repo.fork }} - uses: ./.github/actions/workflow-stopper - with: - app-id: ${{ secrets.WORKFLOW_STOPPER_RUNNER_APP_ID }} - app-key: ${{ secrets.WORKFLOW_STOPPER_RUNNER_APP_KEY }} test-linux-stable: needs: [preflight] @@ -96,7 +82,6 @@ jobs: - name: Checkout uses: actions/checkout@v4 - name: script - id: required run: | # Fixes "detected dubious ownership" error in the ci git config --global --add safe.directory '*' @@ -112,12 +97,6 @@ jobs: - name: runtime-api tests if: ${{ matrix.partition == '1/3' }} run: forklift cargo nextest run -p sp-api-test --features enable-staging-api --cargo-quiet - - name: Stop all workflows if failed - if: ${{ failure() && steps.required.conclusion == 'failure' && !github.event.pull_request.head.repo.fork }} - uses: ./.github/actions/workflow-stopper - with: - app-id: ${{ secrets.WORKFLOW_STOPPER_RUNNER_APP_ID }} - app-key: ${{ secrets.WORKFLOW_STOPPER_RUNNER_APP_KEY }} # some tests do not run with `try-runtime` feature enabled # https://github.com/paritytech/polkadot-sdk/pull/4251#discussion_r1624282143 @@ -144,7 +123,6 @@ jobs: - name: Checkout uses: actions/checkout@v4 - name: script - id: required run: | forklift cargo nextest run --workspace \ --locked \ @@ -154,12 +132,6 @@ jobs: --features experimental,ci-only-tests \ --filter-expr " !test(/all_security_features_work/) - test(/nonexistent_cache_dir/)" \ --partition count:${{ matrix.partition }} \ - - name: Stop all workflows if failed - if: ${{ failure() && steps.required.conclusion == 'failure' && !github.event.pull_request.head.repo.fork }} - uses: ./.github/actions/workflow-stopper - with: - app-id: ${{ secrets.WORKFLOW_STOPPER_RUNNER_APP_ID }} - app-key: ${{ secrets.WORKFLOW_STOPPER_RUNNER_APP_KEY }} confirm-required-jobs-passed: runs-on: ubuntu-latest diff --git a/.github/workflows/tests-misc.yml b/.github/workflows/tests-misc.yml index decd88f2e84c..cca32650b106 100644 --- a/.github/workflows/tests-misc.yml +++ b/.github/workflows/tests-misc.yml @@ -165,14 +165,12 @@ jobs: - name: Download artifact (master run) uses: actions/download-artifact@v4.1.8 - continue-on-error: true with: name: cargo-check-benches-master-${{ github.sha }} path: ./artifacts/master - name: Download artifact (current run) uses: actions/download-artifact@v4.1.8 - continue-on-error: true with: name: cargo-check-benches-current-${{ github.sha }} path: ./artifacts/current @@ -185,12 +183,6 @@ jobs: exit 0 fi - # fail if no artifacts - if [ ! -d ./artifacts/master ] || [ ! -d ./artifacts/current ]; then - echo "No artifacts found" - exit 1 - fi - docker run --rm \ -v $PWD/artifacts/master:/artifacts/master \ -v $PWD/artifacts/current:/artifacts/current \ diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 42a7e87bda43..f508404f1efa 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -22,7 +22,7 @@ workflow: variables: # CI_IMAGE: !reference [ .ci-unified, variables, CI_IMAGE ] - CI_IMAGE: "docker.io/paritytech/ci-unified:bullseye-1.81.0-2024-11-19-v202411281558" + CI_IMAGE: "docker.io/paritytech/ci-unified:bullseye-1.81.0-2024-09-11-v202409111034" # BUILDAH_IMAGE is defined in group variables BUILDAH_COMMAND: "buildah --storage-driver overlay2" RELENG_SCRIPTS_BRANCH: "master" diff --git a/.gitlab/pipeline/zombienet/parachain-template.yml b/.gitlab/pipeline/zombienet/parachain-template.yml index d5c1b6558b39..896ba7913be7 100644 --- a/.gitlab/pipeline/zombienet/parachain-template.yml +++ b/.gitlab/pipeline/zombienet/parachain-template.yml @@ -43,4 +43,4 @@ zombienet-parachain-template-smoke: - ls -ltr $(pwd)/artifacts - cargo test -p template-zombienet-tests --features zombienet --tests minimal_template_block_production_test - cargo test -p template-zombienet-tests --features zombienet --tests parachain_template_block_production_test - - cargo test -p template-zombienet-tests --features zombienet --tests solochain_template_block_production_test + # - cargo test -p template-zombienet-tests --features zombienet --tests solochain_template_block_production_test diff --git a/.gitlab/pipeline/zombienet/polkadot.yml b/.gitlab/pipeline/zombienet/polkadot.yml index 14a235bcda86..3dab49a118e5 100644 --- a/.gitlab/pipeline/zombienet/polkadot.yml +++ b/.gitlab/pipeline/zombienet/polkadot.yml @@ -63,8 +63,6 @@ LOCAL_SDK_TEST: "/builds/parity/mirrors/polkadot-sdk/polkadot/zombienet-sdk-tests" FF_DISABLE_UMASK_FOR_DOCKER_EXECUTOR: 1 RUN_IN_CONTAINER: "1" - # don't retry sdk tests - NEXTEST_RETRIES: 0 artifacts: name: "${CI_JOB_NAME}_${CI_COMMIT_REF_NAME}" when: always @@ -181,7 +179,7 @@ zombienet-polkadot-elastic-scaling-0001-basic-3cores-6s-blocks: --local-dir="${LOCAL_DIR}/elastic_scaling" --test="0001-basic-3cores-6s-blocks.zndsl" -.zombienet-polkadot-elastic-scaling-0002-elastic-scaling-doesnt-break-parachains: +zombienet-polkadot-elastic-scaling-0002-elastic-scaling-doesnt-break-parachains: extends: - .zombienet-polkadot-common before_script: @@ -192,7 +190,6 @@ zombienet-polkadot-elastic-scaling-0001-basic-3cores-6s-blocks: --local-dir="${LOCAL_DIR}/elastic_scaling" --test="0002-elastic-scaling-doesnt-break-parachains.zndsl" - .zombienet-polkadot-functional-0012-spam-statement-distribution-requests: extends: - .zombienet-polkadot-common @@ -236,7 +233,7 @@ zombienet-polkadot-functional-0015-coretime-shared-core: --local-dir="${LOCAL_DIR}/functional" --test="0016-approval-voting-parallel.zndsl" -.zombienet-polkadot-functional-0017-sync-backing: +zombienet-polkadot-functional-0017-sync-backing: extends: - .zombienet-polkadot-common script: @@ -255,17 +252,6 @@ zombienet-polkadot-functional-0018-shared-core-idle-parachain: --local-dir="${LOCAL_DIR}/functional" --test="0018-shared-core-idle-parachain.zndsl" -zombienet-polkadot-functional-0019-coretime-collation-fetching-fairness: - extends: - - .zombienet-polkadot-common - before_script: - - !reference [ .zombienet-polkadot-common, before_script ] - - cp --remove-destination ${LOCAL_DIR}/assign-core.js ${LOCAL_DIR}/functional - script: - - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh - --local-dir="${LOCAL_DIR}/functional" - --test="0019-coretime-collation-fetching-fairness.zndsl" - zombienet-polkadot-smoke-0001-parachains-smoke-test: extends: - .zombienet-polkadot-common @@ -400,19 +386,3 @@ zombienet-polkadot-malus-0001-dispute-valid: - unset NEXTEST_FAILURE_OUTPUT - unset NEXTEST_SUCCESS_OUTPUT - cargo nextest run --archive-file ./artifacts/polkadot-zombienet-tests.tar.zst --no-capture -- smoke::coretime_revenue::coretime_revenue_test - -zombienet-polkadot-elastic-scaling-slot-based-3cores: - extends: - - .zombienet-polkadot-common - needs: - - job: build-polkadot-zombienet-tests - artifacts: true - before_script: - - !reference [ ".zombienet-polkadot-common", "before_script" ] - - export POLKADOT_IMAGE="${ZOMBIENET_INTEGRATION_TEST_IMAGE}" - - export CUMULUS_IMAGE="docker.io/paritypr/test-parachain:${PIPELINE_IMAGE_TAG}" - script: - # we want to use `--no-capture` in zombienet tests. - - unset NEXTEST_FAILURE_OUTPUT - - unset NEXTEST_SUCCESS_OUTPUT - - cargo nextest run --archive-file ./artifacts/polkadot-zombienet-tests.tar.zst --no-capture -- elastic_scaling::slot_based_3cores::slot_based_3cores_test diff --git a/Cargo.lock b/Cargo.lock index 747c3c7e74f0..3b4213e18a0a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -125,48 +125,6 @@ version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0942ffc6dcaadf03badf6e6a2d0228460359d5e34b57ccdc720b7382dfbd5ec5" -[[package]] -name = "alloy-core" -version = "0.8.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c618bd382f0bc2ac26a7e4bfae01c9b015ca8f21b37ca40059ae35a7e62b3dc6" -dependencies = [ - "alloy-dyn-abi", - "alloy-json-abi", - "alloy-primitives 0.8.15", - "alloy-rlp", - "alloy-sol-types 0.8.15", -] - -[[package]] -name = "alloy-dyn-abi" -version = "0.8.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41056bde53ae10ffbbf11618efbe1e0290859e5eab0fe9ef82ebdb62f12a866f" -dependencies = [ - "alloy-json-abi", - "alloy-primitives 0.8.15", - "alloy-sol-type-parser", - "alloy-sol-types 0.8.15", - "const-hex", - "itoa", - "serde", - "serde_json", - "winnow 0.6.18", -] - -[[package]] -name = "alloy-json-abi" -version = "0.8.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c357da577dfb56998d01f574d81ad7a1958d248740a7981b205d69d65a7da404" -dependencies = [ - "alloy-primitives 0.8.15", - "alloy-sol-type-parser", - "serde", - "serde_json", -] - [[package]] name = "alloy-primitives" version = "0.4.2" @@ -177,41 +135,13 @@ dependencies = [ "bytes", "cfg-if", "const-hex", - "derive_more 0.99.17", - "hex-literal", - "itoa", - "proptest", - "rand", - "ruint", - "serde", - "tiny-keccak", -] - -[[package]] -name = "alloy-primitives" -version = "0.8.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6259a506ab13e1d658796c31e6e39d2e2ee89243bcc505ddc613b35732e0a430" -dependencies = [ - "alloy-rlp", - "bytes", - "cfg-if", - "const-hex", - "derive_more 1.0.0", - "foldhash", - "hashbrown 0.15.2", + "derive_more", "hex-literal", - "indexmap 2.7.0", "itoa", - "k256", - "keccak-asm", - "paste", "proptest", "rand", "ruint", - "rustc-hash 2.0.0", "serde", - "sha3 0.10.8", "tiny-keccak", ] @@ -239,7 +169,7 @@ dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.87", - "syn-solidity 0.4.2", + "syn-solidity", "tiny-keccak", ] @@ -321,6 +251,8 @@ dependencies = [ "alloy-json-abi", "alloy-primitives 0.8.15", "alloy-sol-macro 0.8.18", + "alloy-primitives", + "alloy-sol-macro", "const-hex", "serde", ] @@ -841,14 +773,30 @@ version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" +[[package]] +name = "asn1-rs" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f6fd5ddaf0351dff5b8da21b2fb4ff8e08ddd02857f0bf69c47639106c0fff0" +dependencies = [ + "asn1-rs-derive 0.4.0", + "asn1-rs-impl 0.1.0", + "displaydoc", + "nom", + "num-traits", + "rusticata-macros", + "thiserror", + "time", +] + [[package]] name = "asn1-rs" version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "22ad1373757efa0f70ec53939aabc7152e1591cb485208052993070ac8d2429d" dependencies = [ - "asn1-rs-derive", - "asn1-rs-impl", + "asn1-rs-derive 0.5.0", + "asn1-rs-impl 0.2.0", "displaydoc", "nom", "num-traits", @@ -857,6 +805,18 @@ dependencies = [ "time", ] +[[package]] +name = "asn1-rs-derive" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "726535892e8eae7e70657b4c8ea93d26b8553afb1ce617caee529ef96d7dee6c" +dependencies = [ + "proc-macro2 1.0.86", + "quote 1.0.37", + "syn 1.0.109", + "synstructure 0.12.6", +] + [[package]] name = "asn1-rs-derive" version = "0.5.0" @@ -869,6 +829,17 @@ dependencies = [ "synstructure 0.13.1", ] +[[package]] +name = "asn1-rs-impl" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2777730b2039ac0f95f093556e61b6d26cebed5393ca6f152717777cec3a42ed" +dependencies = [ + "proc-macro2 1.0.86", + "quote 1.0.37", + "syn 1.0.109", +] + [[package]] name = "asn1-rs-impl" version = "0.2.0" @@ -907,15 +878,15 @@ version = "0.0.0" dependencies = [ "asset-hub-rococo-runtime", "bp-bridge-hub-rococo", - "cumulus-primitives-core 0.7.0", + "cumulus-primitives-core", "emulated-integration-tests-common", - "frame-support 28.0.0", - "parachains-common 7.0.0", + "frame-support", + "parachains-common", "rococo-emulated-chain", "sp-core 28.0.0", - "sp-keyring 31.0.0", - "staging-xcm 7.0.0", - "testnet-parachains-constants 1.0.0", + "sp-keyring", + "staging-xcm", + "testnet-parachains-constants", ] [[package]] @@ -923,113 +894,112 @@ name = "asset-hub-rococo-integration-tests" version = "1.0.0" dependencies = [ "assert_matches", - "asset-test-utils 7.0.0", - "cumulus-pallet-parachain-system 0.7.0", + "asset-test-utils", + "cumulus-pallet-parachain-system", "emulated-integration-tests-common", - "frame-support 28.0.0", - "pallet-asset-conversion 10.0.0", - "pallet-assets 29.1.0", - "pallet-balances 28.0.0", - "pallet-message-queue 31.0.0", - "pallet-treasury 27.0.0", - "pallet-utility 28.0.0", - "pallet-xcm 7.0.0", - "parachains-common 7.0.0", - "parity-scale-codec", - "polkadot-runtime-common 7.0.0", - "rococo-runtime-constants 7.0.0", + "frame-support", + "pallet-asset-conversion", + "pallet-assets", + "pallet-balances", + "pallet-message-queue", + "pallet-treasury", + "pallet-utility", + "pallet-xcm", + "parachains-common", + "parity-scale-codec", + "polkadot-runtime-common", + "rococo-runtime-constants", "rococo-system-emulated-network", "sp-core 28.0.0", "sp-runtime 31.0.1", - "staging-xcm 7.0.0", - "staging-xcm-executor 7.0.0", - "xcm-runtime-apis 0.1.0", + "staging-xcm", + "staging-xcm-executor", + "xcm-runtime-apis", ] [[package]] name = "asset-hub-rococo-runtime" version = "0.11.0" dependencies = [ - "asset-test-utils 7.0.0", - "assets-common 0.7.0", + "asset-test-utils", + "assets-common", "bp-asset-hub-rococo", "bp-asset-hub-westend", "bp-bridge-hub-rococo", "bp-bridge-hub-westend", - "cumulus-pallet-aura-ext 0.7.0", - "cumulus-pallet-parachain-system 0.7.0", - "cumulus-pallet-session-benchmarking 9.0.0", - "cumulus-pallet-xcm 0.7.0", - "cumulus-pallet-xcmp-queue 0.7.0", - "cumulus-primitives-aura 0.7.0", - "cumulus-primitives-core 0.7.0", - "cumulus-primitives-storage-weight-reclaim 1.0.0", - "cumulus-primitives-utility 0.7.0", - "frame-benchmarking 28.0.0", - "frame-executive 28.0.0", - "frame-metadata-hash-extension 0.1.0", - "frame-support 28.0.0", - "frame-system 28.0.0", - "frame-system-benchmarking 28.0.0", - "frame-system-rpc-runtime-api 26.0.0", - "frame-try-runtime 0.34.0", + "cumulus-pallet-aura-ext", + "cumulus-pallet-parachain-system", + "cumulus-pallet-session-benchmarking", + "cumulus-pallet-xcm", + "cumulus-pallet-xcmp-queue", + "cumulus-primitives-aura", + "cumulus-primitives-core", + "cumulus-primitives-storage-weight-reclaim", + "cumulus-primitives-utility", + "frame-benchmarking", + "frame-executive", + "frame-metadata-hash-extension", + "frame-support", + "frame-system", + "frame-system-benchmarking", + "frame-system-rpc-runtime-api", + "frame-try-runtime", "hex-literal", "log", - "pallet-asset-conversion 10.0.0", - "pallet-asset-conversion-ops 0.1.0", - "pallet-asset-conversion-tx-payment 10.0.0", - "pallet-assets 29.1.0", - "pallet-assets-freezer 0.1.0", - "pallet-aura 27.0.0", - "pallet-authorship 28.0.0", - "pallet-balances 28.0.0", - "pallet-collator-selection 9.0.0", - "pallet-message-queue 31.0.0", - "pallet-multisig 28.0.0", - "pallet-nft-fractionalization 10.0.0", - "pallet-nfts 22.0.0", - "pallet-nfts-runtime-api 14.0.0", - "pallet-proxy 28.0.0", - "pallet-session 28.0.0", - "pallet-timestamp 27.0.0", - "pallet-transaction-payment 28.0.0", - "pallet-transaction-payment-rpc-runtime-api 28.0.0", - "pallet-uniques 28.0.0", - "pallet-utility 28.0.0", - "pallet-xcm 7.0.0", - "pallet-xcm-benchmarks 7.0.0", - "pallet-xcm-bridge-hub-router 0.5.0", - "parachains-common 7.0.0", - "parachains-runtimes-test-utils 7.0.0", - "parity-scale-codec", - "polkadot-parachain-primitives 6.0.0", - "polkadot-runtime-common 7.0.0", + "pallet-asset-conversion", + "pallet-asset-conversion-ops", + "pallet-asset-conversion-tx-payment", + "pallet-assets", + "pallet-assets-freezer", + "pallet-aura", + "pallet-authorship", + "pallet-balances", + "pallet-collator-selection", + "pallet-message-queue", + "pallet-multisig", + "pallet-nft-fractionalization", + "pallet-nfts", + "pallet-nfts-runtime-api", + "pallet-proxy", + "pallet-session", + "pallet-timestamp", + "pallet-transaction-payment", + "pallet-transaction-payment-rpc-runtime-api", + "pallet-uniques", + "pallet-utility", + "pallet-xcm", + "pallet-xcm-benchmarks", + "pallet-xcm-bridge-hub-router", + "parachains-common", + "parity-scale-codec", + "polkadot-parachain-primitives", + "polkadot-runtime-common", "primitive-types 0.13.1", - "rococo-runtime-constants 7.0.0", + "rococo-runtime-constants", "scale-info", "serde_json", - "snowbridge-router-primitives 0.9.0", + "snowbridge-router-primitives", "sp-api 26.0.0", - "sp-block-builder 26.0.0", - "sp-consensus-aura 0.32.0", + "sp-block-builder", + "sp-consensus-aura", "sp-core 28.0.0", - "sp-genesis-builder 0.8.0", - "sp-inherents 26.0.0", - "sp-keyring 31.0.0", - "sp-offchain 26.0.0", + "sp-genesis-builder", + "sp-inherents", + "sp-keyring", + "sp-offchain", "sp-runtime 31.0.1", - "sp-session 27.0.0", + "sp-session", "sp-storage 19.0.0", - "sp-transaction-pool 26.0.0", + "sp-transaction-pool", "sp-version 29.0.0", "sp-weights 27.0.0", - "staging-parachain-info 0.7.0", - "staging-xcm 7.0.0", - "staging-xcm-builder 7.0.0", - "staging-xcm-executor 7.0.0", - "substrate-wasm-builder 17.0.0", - "testnet-parachains-constants 1.0.0", - "xcm-runtime-apis 0.1.0", + "staging-parachain-info", + "staging-xcm", + "staging-xcm-builder", + "staging-xcm-executor", + "substrate-wasm-builder", + "testnet-parachains-constants", + "xcm-runtime-apis", ] [[package]] @@ -1038,14 +1008,14 @@ version = "0.0.0" dependencies = [ "asset-hub-westend-runtime", "bp-bridge-hub-westend", - "cumulus-primitives-core 0.7.0", + "cumulus-primitives-core", "emulated-integration-tests-common", - "frame-support 28.0.0", - "parachains-common 7.0.0", + "frame-support", + "parachains-common", "sp-core 28.0.0", - "sp-keyring 31.0.0", - "staging-xcm 7.0.0", - "testnet-parachains-constants 1.0.0", + "sp-keyring", + "staging-xcm", + "testnet-parachains-constants", "westend-emulated-chain", ] @@ -1054,227 +1024,169 @@ name = "asset-hub-westend-integration-tests" version = "1.0.0" dependencies = [ "assert_matches", - "asset-test-utils 7.0.0", - "assets-common 0.7.0", - "cumulus-pallet-parachain-system 0.7.0", - "cumulus-pallet-xcmp-queue 0.7.0", + "asset-test-utils", + "cumulus-pallet-parachain-system", + "cumulus-pallet-xcmp-queue", "emulated-integration-tests-common", - "frame-metadata-hash-extension 0.1.0", - "frame-support 28.0.0", - "frame-system 28.0.0", - "pallet-asset-conversion 10.0.0", - "pallet-asset-tx-payment 28.0.0", - "pallet-assets 29.1.0", - "pallet-balances 28.0.0", - "pallet-message-queue 31.0.0", - "pallet-transaction-payment 28.0.0", - "pallet-treasury 27.0.0", - "pallet-xcm 7.0.0", - "parachains-common 7.0.0", - "parity-scale-codec", - "polkadot-runtime-common 7.0.0", - "sp-core 28.0.0", - "sp-runtime 31.0.1", - "staging-xcm 7.0.0", - "staging-xcm-builder 7.0.0", - "staging-xcm-executor 7.0.0", + "frame-metadata-hash-extension", + "frame-support", + "frame-system", + "pallet-asset-conversion", + "pallet-asset-tx-payment", + "pallet-assets", + "pallet-balances", + "pallet-message-queue", + "pallet-transaction-payment", + "pallet-treasury", + "pallet-xcm", + "parachains-common", + "parity-scale-codec", + "polkadot-runtime-common", + "sp-core 28.0.0", + "sp-runtime 31.0.1", + "staging-xcm", + "staging-xcm-builder", + "staging-xcm-executor", "westend-system-emulated-network", - "xcm-runtime-apis 0.1.0", + "xcm-runtime-apis", ] [[package]] name = "asset-hub-westend-runtime" version = "0.15.0" dependencies = [ - "asset-test-utils 7.0.0", - "assets-common 0.7.0", + "asset-test-utils", + "assets-common", "bp-asset-hub-rococo", "bp-asset-hub-westend", "bp-bridge-hub-rococo", "bp-bridge-hub-westend", - "cumulus-pallet-aura-ext 0.7.0", - "cumulus-pallet-parachain-system 0.7.0", - "cumulus-pallet-session-benchmarking 9.0.0", - "cumulus-pallet-xcm 0.7.0", - "cumulus-pallet-xcmp-queue 0.7.0", - "cumulus-primitives-aura 0.7.0", - "cumulus-primitives-core 0.7.0", - "cumulus-primitives-storage-weight-reclaim 1.0.0", - "cumulus-primitives-utility 0.7.0", - "frame-benchmarking 28.0.0", - "frame-executive 28.0.0", - "frame-metadata-hash-extension 0.1.0", - "frame-support 28.0.0", - "frame-system 28.0.0", - "frame-system-benchmarking 28.0.0", - "frame-system-rpc-runtime-api 26.0.0", - "frame-try-runtime 0.34.0", + "cumulus-pallet-aura-ext", + "cumulus-pallet-parachain-system", + "cumulus-pallet-session-benchmarking", + "cumulus-pallet-xcm", + "cumulus-pallet-xcmp-queue", + "cumulus-primitives-aura", + "cumulus-primitives-core", + "cumulus-primitives-storage-weight-reclaim", + "cumulus-primitives-utility", + "frame-benchmarking", + "frame-executive", + "frame-metadata-hash-extension", + "frame-support", + "frame-system", + "frame-system-benchmarking", + "frame-system-rpc-runtime-api", + "frame-try-runtime", "hex-literal", "log", - "pallet-asset-conversion 10.0.0", - "pallet-asset-conversion-ops 0.1.0", - "pallet-asset-conversion-tx-payment 10.0.0", - "pallet-assets 29.1.0", - "pallet-assets-freezer 0.1.0", - "pallet-aura 27.0.0", - "pallet-authorship 28.0.0", - "pallet-balances 28.0.0", - "pallet-collator-selection 9.0.0", - "pallet-message-queue 31.0.0", - "pallet-multisig 28.0.0", - "pallet-nft-fractionalization 10.0.0", - "pallet-nfts 22.0.0", - "pallet-nfts-runtime-api 14.0.0", - "pallet-proxy 28.0.0", - "pallet-revive 0.1.0", - "pallet-session 28.0.0", - "pallet-state-trie-migration 29.0.0", - "pallet-timestamp 27.0.0", - "pallet-transaction-payment 28.0.0", - "pallet-transaction-payment-rpc-runtime-api 28.0.0", - "pallet-uniques 28.0.0", - "pallet-utility 28.0.0", - "pallet-xcm 7.0.0", - "pallet-xcm-benchmarks 7.0.0", - "pallet-xcm-bridge-hub-router 0.5.0", - "parachains-common 7.0.0", - "parachains-runtimes-test-utils 7.0.0", - "parity-scale-codec", - "polkadot-parachain-primitives 6.0.0", - "polkadot-runtime-common 7.0.0", + "pallet-asset-conversion", + "pallet-asset-conversion-ops", + "pallet-asset-conversion-tx-payment", + "pallet-assets", + "pallet-assets-freezer", + "pallet-aura", + "pallet-authorship", + "pallet-balances", + "pallet-collator-selection", + "pallet-message-queue", + "pallet-multisig", + "pallet-nft-fractionalization", + "pallet-nfts", + "pallet-nfts-runtime-api", + "pallet-proxy", + "pallet-revive", + "pallet-session", + "pallet-state-trie-migration", + "pallet-timestamp", + "pallet-transaction-payment", + "pallet-transaction-payment-rpc-runtime-api", + "pallet-uniques", + "pallet-utility", + "pallet-xcm", + "pallet-xcm-benchmarks", + "pallet-xcm-bridge-hub-router", + "parachains-common", + "parity-scale-codec", + "polkadot-parachain-primitives", + "polkadot-runtime-common", "primitive-types 0.13.1", "scale-info", "serde_json", - "snowbridge-router-primitives 0.9.0", + "snowbridge-router-primitives", "sp-api 26.0.0", - "sp-block-builder 26.0.0", - "sp-consensus-aura 0.32.0", + "sp-block-builder", + "sp-consensus-aura", "sp-core 28.0.0", - "sp-genesis-builder 0.8.0", - "sp-inherents 26.0.0", - "sp-keyring 31.0.0", - "sp-offchain 26.0.0", + "sp-genesis-builder", + "sp-inherents", + "sp-keyring", + "sp-offchain", "sp-runtime 31.0.1", - "sp-session 27.0.0", + "sp-session", "sp-std 14.0.0", "sp-storage 19.0.0", - "sp-transaction-pool 26.0.0", + "sp-transaction-pool", "sp-version 29.0.0", - "staging-parachain-info 0.7.0", - "staging-xcm 7.0.0", - "staging-xcm-builder 7.0.0", - "staging-xcm-executor 7.0.0", - "substrate-wasm-builder 17.0.0", - "testnet-parachains-constants 1.0.0", - "westend-runtime-constants 7.0.0", - "xcm-runtime-apis 0.1.0", + "staging-parachain-info", + "staging-xcm", + "staging-xcm-builder", + "staging-xcm-executor", + "substrate-wasm-builder", + "testnet-parachains-constants", + "westend-runtime-constants", + "xcm-runtime-apis", ] [[package]] name = "asset-test-utils" version = "7.0.0" dependencies = [ - "cumulus-pallet-parachain-system 0.7.0", - "cumulus-pallet-xcmp-queue 0.7.0", - "cumulus-primitives-core 0.7.0", - "frame-support 28.0.0", - "frame-system 28.0.0", + "cumulus-pallet-parachain-system", + "cumulus-pallet-xcmp-queue", + "cumulus-primitives-core", + "frame-support", + "frame-system", "hex-literal", - "pallet-asset-conversion 10.0.0", - "pallet-assets 29.1.0", - "pallet-balances 28.0.0", - "pallet-collator-selection 9.0.0", - "pallet-session 28.0.0", - "pallet-timestamp 27.0.0", - "pallet-xcm 7.0.0", - "pallet-xcm-bridge-hub-router 0.5.0", - "parachains-common 7.0.0", - "parachains-runtimes-test-utils 7.0.0", + "pallet-assets", + "pallet-balances", + "pallet-collator-selection", + "pallet-session", + "pallet-timestamp", + "pallet-xcm", + "pallet-xcm-bridge-hub-router", + "parachains-common", + "parachains-runtimes-test-utils", "parity-scale-codec", "sp-io 30.0.0", "sp-runtime 31.0.1", - "staging-parachain-info 0.7.0", - "staging-xcm 7.0.0", - "staging-xcm-builder 7.0.0", - "staging-xcm-executor 7.0.0", - "substrate-wasm-builder 17.0.0", - "xcm-runtime-apis 0.1.0", -] - -[[package]] -name = "asset-test-utils" -version = "18.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0324df9ce91a9840632e865dd3272bd20162023856f1b189b7ae58afa5c6b61" -dependencies = [ - "cumulus-pallet-parachain-system 0.17.1", - "cumulus-pallet-xcmp-queue 0.17.0", - "cumulus-primitives-core 0.16.0", - "frame-support 38.0.0", - "frame-system 38.0.0", - "pallet-assets 40.0.0", - "pallet-balances 39.0.0", - "pallet-collator-selection 19.0.0", - "pallet-session 38.0.0", - "pallet-timestamp 37.0.0", - "pallet-xcm 17.0.0", - "pallet-xcm-bridge-hub-router 0.15.1", - "parachains-common 18.0.0", - "parachains-runtimes-test-utils 17.0.0", - "parity-scale-codec", - "sp-io 38.0.0", - "sp-runtime 39.0.2", - "staging-parachain-info 0.17.0", - "staging-xcm 14.2.0", - "staging-xcm-builder 17.0.1", - "staging-xcm-executor 17.0.0", - "substrate-wasm-builder 24.0.1", + "staging-parachain-info", + "staging-xcm", + "staging-xcm-builder", + "staging-xcm-executor", + "substrate-wasm-builder", ] [[package]] name = "assets-common" version = "0.7.0" dependencies = [ - "cumulus-primitives-core 0.7.0", - "frame-support 28.0.0", + "cumulus-primitives-core", + "frame-support", "impl-trait-for-tuples", "log", - "pallet-asset-conversion 10.0.0", - "pallet-assets 29.1.0", - "pallet-xcm 7.0.0", - "parachains-common 7.0.0", + "pallet-asset-conversion", + "pallet-assets", + "pallet-xcm", + "parachains-common", "parity-scale-codec", "scale-info", "sp-api 26.0.0", "sp-runtime 31.0.1", - "staging-xcm 7.0.0", - "staging-xcm-builder 7.0.0", - "staging-xcm-executor 7.0.0", - "substrate-wasm-builder 17.0.0", -] - -[[package]] -name = "assets-common" -version = "0.18.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4556e56f9206b129c3f96249cd907b76e8d7ad5265fe368c228c708789a451a3" -dependencies = [ - "cumulus-primitives-core 0.16.0", - "frame-support 38.0.0", - "impl-trait-for-tuples", - "log", - "pallet-asset-conversion 20.0.0", - "pallet-assets 40.0.0", - "pallet-xcm 17.0.0", - "parachains-common 18.0.0", - "parity-scale-codec", - "scale-info", - "sp-api 34.0.0", - "sp-runtime 39.0.2", - "staging-xcm 14.2.0", - "staging-xcm-builder 17.0.1", - "staging-xcm-executor 17.0.0", - "substrate-wasm-builder 24.0.1", + "staging-xcm", + "staging-xcm-builder", + "staging-xcm-executor", + "substrate-wasm-builder", ] [[package]] @@ -1305,7 +1217,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9f2776ead772134d55b62dd45e59a79e21612d85d0af729b8b7d3967d601a62a" dependencies = [ "concurrent-queue", - "event-listener 5.3.1", + "event-listener 5.2.0", "event-listener-strategy", "futures-core", "pin-project-lite", @@ -1396,7 +1308,7 @@ dependencies = [ "futures-lite 2.3.0", "parking", "polling 3.4.0", - "rustix 0.38.21", + "rustix 0.38.25", "slab", "tracing", "windows-sys 0.52.0", @@ -1417,7 +1329,7 @@ version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ff6e472cdea888a4bd64f342f09b3f50e1886d32afe8df3d663c01140b811b18" dependencies = [ - "event-listener 5.3.1", + "event-listener 5.2.0", "event-listener-strategy", "pin-project-lite", ] @@ -1476,9 +1388,9 @@ dependencies = [ "async-task", "blocking", "cfg-if", - "event-listener 5.3.1", + "event-listener 5.2.0", "futures-lite 2.3.0", - "rustix 0.38.21", + "rustix 0.38.25", "tracing", ] @@ -1494,7 +1406,7 @@ dependencies = [ "cfg-if", "futures-core", "futures-io", - "rustix 0.38.21", + "rustix 0.38.25", "signal-hook-registry", "slab", "windows-sys 0.52.0", @@ -1557,9 +1469,9 @@ checksum = "8b75356056920673b02621b35afd0f7dda9306d03c79a30f5c56c44cf256e3de" [[package]] name = "async-trait" -version = "0.1.83" +version = "0.1.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" +checksum = "a27b8a3a6e1a44fa4c8baf1f653e4172e81486d4941f2237e20dc2d0cf4ddff1" dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", @@ -1579,19 +1491,6 @@ dependencies = [ "pin-project-lite", ] -[[package]] -name = "asynchronous-codec" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a860072022177f903e59730004fb5dc13db9275b79bb2aef7ba8ce831956c233" -dependencies = [ - "bytes", - "futures-sink", - "futures-util", - "memchr", - "pin-project-lite", -] - [[package]] name = "atomic-take" version = "1.1.0" @@ -1742,6 +1641,15 @@ dependencies = [ "serde", ] +[[package]] +name = "beef" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a8241f3ebb85c056b509d4327ad0358fbbba6ffb340bf388f26350aeda225b1" +dependencies = [ + "serde", +] + [[package]] name = "binary-merkle-tree" version = "13.0.0" @@ -1755,16 +1663,6 @@ dependencies = [ "sp-tracing 16.0.0", ] -[[package]] -name = "binary-merkle-tree" -version = "15.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "336bf780dd7526a9a4bc1521720b25c1994dc132cccd59553431923fa4d1a693" -dependencies = [ - "hash-db", - "log", -] - [[package]] name = "bincode" version = "1.3.3" @@ -1813,11 +1711,11 @@ dependencies = [ [[package]] name = "bip39" -version = "2.1.0" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33415e24172c1b7d6066f6d999545375ab8e1d95421d6784bdfff9496f292387" +checksum = "93f2635620bf0b9d4576eb7bb9a38a55df78bd1205d26fa994b25911a69f212f" dependencies = [ - "bitcoin_hashes 0.13.0", + "bitcoin_hashes 0.11.0", "serde", "unicode-normalization", ] @@ -1844,10 +1742,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9425c3bf7089c983facbae04de54513cce73b41c7f9ff8c845b54e7bc64ebbfb" [[package]] -name = "bitcoin-io" -version = "0.1.3" +name = "bitcoin_hashes" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b47c4ab7a93edb0c7198c5535ed9b52b63095f4e9b45279c6736cec4b856baf" +checksum = "90064b8dee6815a6470d60bad07bbbaee885c0e12d04177138fa3291a01b7bc4" [[package]] name = "bitcoin_hashes" @@ -1856,17 +1754,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1930a4dabfebb8d7d9992db18ebe3ae2876f0a305fab206fd168df931ede293b" dependencies = [ "bitcoin-internals", - "hex-conservative 0.1.1", -] - -[[package]] -name = "bitcoin_hashes" -version = "0.14.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb18c03d0db0247e147a21a6faafd5a7eb851c743db062de72018b6b7e8e4d16" -dependencies = [ - "bitcoin-io", - "hex-conservative 0.2.1", + "hex-conservative", ] [[package]] @@ -2024,9 +1912,9 @@ dependencies = [ [[package]] name = "bounded-collections" -version = "0.2.2" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d077619e9c237a5d1875166f5e8033e8f6bff0c96f8caf81e1c2d7738c431bf" +checksum = "d32385ecb91a31bddaf908e8dcf4a15aef1bcd3913cc03ebfad02ff6d568abc1" dependencies = [ "log", "parity-scale-codec", @@ -2048,39 +1936,35 @@ dependencies = [ name = "bp-asset-hub-rococo" version = "0.4.0" dependencies = [ - "bp-xcm-bridge-hub-router 0.6.0", - "frame-support 28.0.0", + "bp-xcm-bridge-hub-router", + "frame-support", "parity-scale-codec", "scale-info", - "sp-core 28.0.0", - "staging-xcm 7.0.0", ] [[package]] name = "bp-asset-hub-westend" version = "0.3.0" dependencies = [ - "bp-xcm-bridge-hub-router 0.6.0", - "frame-support 28.0.0", + "bp-xcm-bridge-hub-router", + "frame-support", "parity-scale-codec", "scale-info", - "sp-core 28.0.0", - "staging-xcm 7.0.0", ] [[package]] name = "bp-beefy" version = "0.1.0" dependencies = [ - "binary-merkle-tree 13.0.0", - "bp-runtime 0.7.0", - "frame-support 28.0.0", - "pallet-beefy-mmr 28.0.0", - "pallet-mmr 27.0.0", + "binary-merkle-tree", + "bp-runtime", + "frame-support", + "pallet-beefy-mmr", + "pallet-mmr", "parity-scale-codec", "scale-info", "serde", - "sp-consensus-beefy 13.0.0", + "sp-consensus-beefy", "sp-runtime 31.0.1", "sp-std 14.0.0", ] @@ -2089,12 +1973,12 @@ dependencies = [ name = "bp-bridge-hub-cumulus" version = "0.7.0" dependencies = [ - "bp-messages 0.7.0", - "bp-polkadot-core 0.7.0", - "bp-runtime 0.7.0", - "frame-support 28.0.0", - "frame-system 28.0.0", - "polkadot-primitives 7.0.0", + "bp-messages", + "bp-polkadot-core", + "bp-runtime", + "frame-support", + "frame-system", + "polkadot-primitives", "sp-api 26.0.0", "sp-std 14.0.0", ] @@ -2104,9 +1988,9 @@ name = "bp-bridge-hub-kusama" version = "0.6.0" dependencies = [ "bp-bridge-hub-cumulus", - "bp-messages 0.7.0", - "bp-runtime 0.7.0", - "frame-support 28.0.0", + "bp-messages", + "bp-runtime", + "frame-support", "sp-api 26.0.0", "sp-runtime 31.0.1", "sp-std 14.0.0", @@ -2117,9 +2001,9 @@ name = "bp-bridge-hub-polkadot" version = "0.6.0" dependencies = [ "bp-bridge-hub-cumulus", - "bp-messages 0.7.0", - "bp-runtime 0.7.0", - "frame-support 28.0.0", + "bp-messages", + "bp-runtime", + "frame-support", "sp-api 26.0.0", "sp-runtime 31.0.1", "sp-std 14.0.0", @@ -2130,10 +2014,10 @@ name = "bp-bridge-hub-rococo" version = "0.7.0" dependencies = [ "bp-bridge-hub-cumulus", - "bp-messages 0.7.0", - "bp-runtime 0.7.0", - "bp-xcm-bridge-hub 0.2.0", - "frame-support 28.0.0", + "bp-messages", + "bp-runtime", + "bp-xcm-bridge-hub", + "frame-support", "parity-scale-codec", "sp-api 26.0.0", "sp-runtime 31.0.1", @@ -2145,10 +2029,10 @@ name = "bp-bridge-hub-westend" version = "0.3.0" dependencies = [ "bp-bridge-hub-cumulus", - "bp-messages 0.7.0", - "bp-runtime 0.7.0", - "bp-xcm-bridge-hub 0.2.0", - "frame-support 28.0.0", + "bp-messages", + "bp-runtime", + "bp-xcm-bridge-hub", + "frame-support", "parity-scale-codec", "sp-api 26.0.0", "sp-runtime 31.0.1", @@ -2159,47 +2043,29 @@ dependencies = [ name = "bp-header-chain" version = "0.7.0" dependencies = [ - "bp-runtime 0.7.0", - "bp-test-utils 0.7.0", + "bp-runtime", + "bp-test-utils", "finality-grandpa", - "frame-support 28.0.0", + "frame-support", "hex", "hex-literal", "parity-scale-codec", "scale-info", "serde", - "sp-consensus-grandpa 13.0.0", + "sp-consensus-grandpa", "sp-core 28.0.0", "sp-runtime 31.0.1", "sp-std 14.0.0", ] -[[package]] -name = "bp-header-chain" -version = "0.18.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "890df97cea17ee61ff982466bb9e90cb6b1462adb45380999019388d05e4b92d" -dependencies = [ - "bp-runtime 0.18.0", - "finality-grandpa", - "frame-support 38.0.0", - "parity-scale-codec", - "scale-info", - "serde", - "sp-consensus-grandpa 21.0.0", - "sp-core 34.0.0", - "sp-runtime 39.0.2", - "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "bp-kusama" version = "0.5.0" dependencies = [ - "bp-header-chain 0.7.0", - "bp-polkadot-core 0.7.0", - "bp-runtime 0.7.0", - "frame-support 28.0.0", + "bp-header-chain", + "bp-polkadot-core", + "bp-runtime", + "frame-support", "sp-api 26.0.0", "sp-std 14.0.0", ] @@ -2208,9 +2074,9 @@ dependencies = [ name = "bp-messages" version = "0.7.0" dependencies = [ - "bp-header-chain 0.7.0", - "bp-runtime 0.7.0", - "frame-support 28.0.0", + "bp-header-chain", + "bp-runtime", + "frame-support", "hex", "hex-literal", "parity-scale-codec", @@ -2221,31 +2087,14 @@ dependencies = [ "sp-std 14.0.0", ] -[[package]] -name = "bp-messages" -version = "0.18.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7efabf94339950b914ba87249497f1a0e35a73849934d164fecae4b275928cf6" -dependencies = [ - "bp-header-chain 0.18.1", - "bp-runtime 0.18.0", - "frame-support 38.0.0", - "parity-scale-codec", - "scale-info", - "serde", - "sp-core 34.0.0", - "sp-io 38.0.0", - "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "bp-parachains" version = "0.7.0" dependencies = [ - "bp-header-chain 0.7.0", - "bp-polkadot-core 0.7.0", - "bp-runtime 0.7.0", - "frame-support 28.0.0", + "bp-header-chain", + "bp-polkadot-core", + "bp-runtime", + "frame-support", "impl-trait-for-tuples", "parity-scale-codec", "scale-info", @@ -2254,60 +2103,28 @@ dependencies = [ "sp-std 14.0.0", ] -[[package]] -name = "bp-parachains" -version = "0.18.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9011e5c12c15caf3c4129a98f4f4916ea9165db8daf6ed85867c3106075f40df" -dependencies = [ - "bp-header-chain 0.18.1", - "bp-polkadot-core 0.18.0", - "bp-runtime 0.18.0", - "frame-support 38.0.0", - "impl-trait-for-tuples", - "parity-scale-codec", - "scale-info", - "sp-core 34.0.0", - "sp-runtime 39.0.2", - "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "bp-polkadot" version = "0.5.0" dependencies = [ - "bp-header-chain 0.7.0", - "bp-polkadot-core 0.7.0", - "bp-runtime 0.7.0", - "frame-support 28.0.0", + "bp-header-chain", + "bp-polkadot-core", + "bp-runtime", + "frame-support", "sp-api 26.0.0", "sp-std 14.0.0", ] -[[package]] -name = "bp-polkadot" -version = "0.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa6277dd4333917ecfbcc35e9332a9f11682e0a506e76b617c336224660fce33" -dependencies = [ - "bp-header-chain 0.18.1", - "bp-polkadot-core 0.18.0", - "bp-runtime 0.18.0", - "frame-support 38.0.0", - "sp-api 34.0.0", - "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "bp-polkadot-bulletin" version = "0.4.0" dependencies = [ - "bp-header-chain 0.7.0", - "bp-messages 0.7.0", - "bp-polkadot-core 0.7.0", - "bp-runtime 0.7.0", - "frame-support 28.0.0", - "frame-system 28.0.0", + "bp-header-chain", + "bp-messages", + "bp-polkadot-core", + "bp-runtime", + "frame-support", + "frame-system", "parity-scale-codec", "scale-info", "sp-api 26.0.0", @@ -2319,10 +2136,10 @@ dependencies = [ name = "bp-polkadot-core" version = "0.7.0" dependencies = [ - "bp-messages 0.7.0", - "bp-runtime 0.7.0", - "frame-support 28.0.0", - "frame-system 28.0.0", + "bp-messages", + "bp-runtime", + "frame-support", + "frame-system", "hex", "parity-scale-codec", "scale-info", @@ -2332,71 +2149,33 @@ dependencies = [ "sp-std 14.0.0", ] -[[package]] -name = "bp-polkadot-core" -version = "0.18.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "345cf472bac11ef79d403e4846a666b7d22a13cd16d9c85b62cd6b5e16c4a042" -dependencies = [ - "bp-messages 0.18.0", - "bp-runtime 0.18.0", - "frame-support 38.0.0", - "frame-system 38.0.0", - "parity-scale-codec", - "parity-util-mem", - "scale-info", - "serde", - "sp-core 34.0.0", - "sp-runtime 39.0.2", - "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "bp-relayers" version = "0.7.0" dependencies = [ - "bp-header-chain 0.7.0", - "bp-messages 0.7.0", - "bp-parachains 0.7.0", - "bp-runtime 0.7.0", - "frame-support 28.0.0", - "frame-system 28.0.0", + "bp-header-chain", + "bp-messages", + "bp-parachains", + "bp-runtime", + "frame-support", + "frame-system", "hex", "hex-literal", - "pallet-utility 28.0.0", + "pallet-utility", "parity-scale-codec", "scale-info", "sp-runtime 31.0.1", "sp-std 14.0.0", ] -[[package]] -name = "bp-relayers" -version = "0.18.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9465ad727e466d67d64244a1aa7bb19933a297913fdde34b8e9bda0a341bdeb" -dependencies = [ - "bp-header-chain 0.18.1", - "bp-messages 0.18.0", - "bp-parachains 0.18.0", - "bp-runtime 0.18.0", - "frame-support 38.0.0", - "frame-system 38.0.0", - "pallet-utility 38.0.0", - "parity-scale-codec", - "scale-info", - "sp-runtime 39.0.2", - "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "bp-rococo" version = "0.6.0" dependencies = [ - "bp-header-chain 0.7.0", - "bp-polkadot-core 0.7.0", - "bp-runtime 0.7.0", - "frame-support 28.0.0", + "bp-header-chain", + "bp-polkadot-core", + "bp-runtime", + "frame-support", "sp-api 26.0.0", "sp-std 14.0.0", ] @@ -2405,8 +2184,8 @@ dependencies = [ name = "bp-runtime" version = "0.7.0" dependencies = [ - "frame-support 28.0.0", - "frame-system 28.0.0", + "frame-support", + "frame-system", "hash-db", "hex-literal", "impl-trait-for-tuples", @@ -2421,81 +2200,36 @@ dependencies = [ "sp-state-machine 0.35.0", "sp-std 14.0.0", "sp-trie 29.0.0", - "trie-db", -] - -[[package]] -name = "bp-runtime" -version = "0.18.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "746d9464f912b278f8a5e2400f10541f95da7fc6c7d688a2788b9a46296146ee" -dependencies = [ - "frame-support 38.0.0", - "frame-system 38.0.0", - "hash-db", - "impl-trait-for-tuples", - "log", - "num-traits", - "parity-scale-codec", - "scale-info", - "serde", - "sp-core 34.0.0", - "sp-io 38.0.0", - "sp-runtime 39.0.2", - "sp-state-machine 0.43.0", - "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sp-trie 37.0.0", - "trie-db", + "trie-db 0.29.1", ] [[package]] name = "bp-test-utils" version = "0.7.0" dependencies = [ - "bp-header-chain 0.7.0", - "bp-parachains 0.7.0", - "bp-polkadot-core 0.7.0", - "bp-runtime 0.7.0", + "bp-header-chain", + "bp-parachains", + "bp-polkadot-core", + "bp-runtime", "ed25519-dalek", "finality-grandpa", "parity-scale-codec", "sp-application-crypto 30.0.0", - "sp-consensus-grandpa 13.0.0", + "sp-consensus-grandpa", "sp-core 28.0.0", "sp-runtime 31.0.1", "sp-std 14.0.0", "sp-trie 29.0.0", ] -[[package]] -name = "bp-test-utils" -version = "0.18.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92e659078b54c0b6bd79896738212a305842ad37168976363233516754337826" -dependencies = [ - "bp-header-chain 0.18.1", - "bp-parachains 0.18.0", - "bp-polkadot-core 0.18.0", - "bp-runtime 0.18.0", - "ed25519-dalek", - "finality-grandpa", - "parity-scale-codec", - "sp-application-crypto 38.0.0", - "sp-consensus-grandpa 21.0.0", - "sp-core 34.0.0", - "sp-runtime 39.0.2", - "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sp-trie 37.0.0", -] - [[package]] name = "bp-westend" version = "0.3.0" dependencies = [ - "bp-header-chain 0.7.0", - "bp-polkadot-core 0.7.0", - "bp-runtime 0.7.0", - "frame-support 28.0.0", + "bp-header-chain", + "bp-polkadot-core", + "bp-runtime", + "frame-support", "sp-api 26.0.0", "sp-std 14.0.0", ] @@ -2504,34 +2238,16 @@ dependencies = [ name = "bp-xcm-bridge-hub" version = "0.2.0" dependencies = [ - "bp-messages 0.7.0", - "bp-runtime 0.7.0", - "frame-support 28.0.0", + "bp-messages", + "bp-runtime", + "frame-support", "parity-scale-codec", "scale-info", "serde", "sp-core 28.0.0", "sp-io 30.0.0", "sp-std 14.0.0", - "staging-xcm 7.0.0", -] - -[[package]] -name = "bp-xcm-bridge-hub" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6909117ca87cb93703742939d5f0c4c93e9646d9cda22262e9709d68c929999b" -dependencies = [ - "bp-messages 0.18.0", - "bp-runtime 0.18.0", - "frame-support 38.0.0", - "parity-scale-codec", - "scale-info", - "serde", - "sp-core 34.0.0", - "sp-io 38.0.0", - "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "staging-xcm 14.2.0", + "staging-xcm", ] [[package]] @@ -2542,103 +2258,72 @@ dependencies = [ "scale-info", "sp-core 28.0.0", "sp-runtime 31.0.1", - "staging-xcm 7.0.0", -] - -[[package]] -name = "bp-xcm-bridge-hub-router" -version = "0.14.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9284820ca704f5c065563cad77d2e3d069a23cc9cb3a29db9c0de8dd3b173a87" -dependencies = [ - "parity-scale-codec", - "scale-info", - "sp-core 34.0.0", - "sp-runtime 39.0.2", - "staging-xcm 14.2.0", + "staging-xcm", ] [[package]] name = "bridge-hub-common" version = "0.1.0" dependencies = [ - "cumulus-primitives-core 0.7.0", - "frame-support 28.0.0", - "pallet-message-queue 31.0.0", + "cumulus-primitives-core", + "frame-support", + "pallet-message-queue", "parity-scale-codec", "scale-info", - "snowbridge-core 0.2.0", + "snowbridge-core", "sp-core 28.0.0", "sp-runtime 31.0.1", "sp-std 14.0.0", - "staging-xcm 7.0.0", -] - -[[package]] -name = "bridge-hub-common" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c31b53c53d627e2da38f8910807944bf3121e154b5c0ac9e122995af9dfb13ed" -dependencies = [ - "cumulus-primitives-core 0.16.0", - "frame-support 38.0.0", - "pallet-message-queue 41.0.1", - "parity-scale-codec", - "scale-info", - "snowbridge-core 0.10.0", - "sp-core 34.0.0", - "sp-runtime 39.0.2", - "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "staging-xcm 14.2.0", + "staging-xcm", ] [[package]] name = "bridge-hub-rococo-emulated-chain" version = "0.0.0" dependencies = [ - "bp-messages 0.7.0", - "bridge-hub-common 0.1.0", + "bp-messages", + "bridge-hub-common", "bridge-hub-rococo-runtime", "emulated-integration-tests-common", - "frame-support 28.0.0", - "parachains-common 7.0.0", + "frame-support", + "parachains-common", "sp-core 28.0.0", - "sp-keyring 31.0.0", - "staging-xcm 7.0.0", - "testnet-parachains-constants 1.0.0", + "sp-keyring", + "staging-xcm", + "testnet-parachains-constants", ] [[package]] name = "bridge-hub-rococo-integration-tests" version = "1.0.0" dependencies = [ - "cumulus-pallet-xcmp-queue 0.7.0", + "cumulus-pallet-xcmp-queue", "emulated-integration-tests-common", - "frame-support 28.0.0", + "frame-support", "hex-literal", - "pallet-asset-conversion 10.0.0", - "pallet-assets 29.1.0", - "pallet-balances 28.0.0", - "pallet-bridge-messages 0.7.0", - "pallet-message-queue 31.0.0", - "pallet-xcm 7.0.0", - "pallet-xcm-bridge-hub 0.2.0", - "parachains-common 7.0.0", + "pallet-asset-conversion", + "pallet-assets", + "pallet-balances", + "pallet-bridge-messages", + "pallet-message-queue", + "pallet-xcm", + "pallet-xcm-bridge-hub", + "parachains-common", "parity-scale-codec", "rococo-system-emulated-network", "rococo-westend-system-emulated-network", "scale-info", - "snowbridge-core 0.2.0", - "snowbridge-pallet-inbound-queue-fixtures 0.10.0", - "snowbridge-pallet-outbound-queue 0.2.0", - "snowbridge-pallet-system 0.2.0", - "snowbridge-router-primitives 0.9.0", + "snowbridge-core", + "snowbridge-pallet-inbound-queue-fixtures", + "snowbridge-pallet-outbound-queue", + "snowbridge-pallet-system", + "snowbridge-router-primitives", "sp-core 28.0.0", "sp-runtime 31.0.1", - "staging-xcm 7.0.0", - "staging-xcm-executor 7.0.0", - "testnet-parachains-constants 1.0.0", - "xcm-runtime-apis 0.1.0", + "staging-xcm", + "staging-xcm-executor", + "testnet-parachains-constants", + "xcm-runtime-apis", ] [[package]] @@ -2650,200 +2335,154 @@ dependencies = [ "bp-bridge-hub-polkadot", "bp-bridge-hub-rococo", "bp-bridge-hub-westend", - "bp-header-chain 0.7.0", - "bp-messages 0.7.0", - "bp-parachains 0.7.0", + "bp-header-chain", + "bp-messages", + "bp-parachains", "bp-polkadot-bulletin", - "bp-polkadot-core 0.7.0", - "bp-relayers 0.7.0", + "bp-polkadot-core", + "bp-relayers", "bp-rococo", - "bp-runtime 0.7.0", + "bp-runtime", "bp-westend", - "bp-xcm-bridge-hub-router 0.6.0", - "bridge-hub-common 0.1.0", - "bridge-hub-test-utils 0.7.0", - "bridge-runtime-common 0.7.0", - "cumulus-pallet-aura-ext 0.7.0", - "cumulus-pallet-parachain-system 0.7.0", - "cumulus-pallet-session-benchmarking 9.0.0", - "cumulus-pallet-xcm 0.7.0", - "cumulus-pallet-xcmp-queue 0.7.0", - "cumulus-primitives-aura 0.7.0", - "cumulus-primitives-core 0.7.0", - "cumulus-primitives-storage-weight-reclaim 1.0.0", - "cumulus-primitives-utility 0.7.0", - "frame-benchmarking 28.0.0", - "frame-executive 28.0.0", - "frame-metadata-hash-extension 0.1.0", - "frame-support 28.0.0", - "frame-system 28.0.0", - "frame-system-benchmarking 28.0.0", - "frame-system-rpc-runtime-api 26.0.0", - "frame-try-runtime 0.34.0", + "bridge-hub-common", + "bridge-hub-test-utils", + "bridge-runtime-common", + "cumulus-pallet-aura-ext", + "cumulus-pallet-parachain-system", + "cumulus-pallet-session-benchmarking", + "cumulus-pallet-xcm", + "cumulus-pallet-xcmp-queue", + "cumulus-primitives-aura", + "cumulus-primitives-core", + "cumulus-primitives-storage-weight-reclaim", + "cumulus-primitives-utility", + "frame-benchmarking", + "frame-executive", + "frame-metadata-hash-extension", + "frame-support", + "frame-system", + "frame-system-benchmarking", + "frame-system-rpc-runtime-api", + "frame-try-runtime", "hex-literal", "log", - "pallet-aura 27.0.0", - "pallet-authorship 28.0.0", - "pallet-balances 28.0.0", - "pallet-bridge-grandpa 0.7.0", - "pallet-bridge-messages 0.7.0", - "pallet-bridge-parachains 0.7.0", - "pallet-bridge-relayers 0.7.0", - "pallet-collator-selection 9.0.0", - "pallet-message-queue 31.0.0", - "pallet-multisig 28.0.0", - "pallet-session 28.0.0", - "pallet-timestamp 27.0.0", - "pallet-transaction-payment 28.0.0", - "pallet-transaction-payment-rpc-runtime-api 28.0.0", - "pallet-utility 28.0.0", - "pallet-xcm 7.0.0", - "pallet-xcm-benchmarks 7.0.0", - "pallet-xcm-bridge-hub 0.2.0", - "parachains-common 7.0.0", - "parachains-runtimes-test-utils 7.0.0", - "parity-scale-codec", - "polkadot-parachain-primitives 6.0.0", - "polkadot-runtime-common 7.0.0", - "rococo-runtime-constants 7.0.0", + "pallet-aura", + "pallet-authorship", + "pallet-balances", + "pallet-bridge-grandpa", + "pallet-bridge-messages", + "pallet-bridge-parachains", + "pallet-bridge-relayers", + "pallet-collator-selection", + "pallet-message-queue", + "pallet-multisig", + "pallet-session", + "pallet-timestamp", + "pallet-transaction-payment", + "pallet-transaction-payment-rpc-runtime-api", + "pallet-utility", + "pallet-xcm", + "pallet-xcm-benchmarks", + "pallet-xcm-bridge-hub", + "parachains-common", + "parity-scale-codec", + "polkadot-parachain-primitives", + "polkadot-runtime-common", + "rococo-runtime-constants", "scale-info", "serde", "serde_json", - "snowbridge-beacon-primitives 0.2.0", - "snowbridge-core 0.2.0", - "snowbridge-outbound-queue-runtime-api 0.2.0", - "snowbridge-pallet-ethereum-client 0.2.0", - "snowbridge-pallet-inbound-queue 0.2.0", - "snowbridge-pallet-outbound-queue 0.2.0", - "snowbridge-pallet-system 0.2.0", - "snowbridge-router-primitives 0.9.0", - "snowbridge-runtime-common 0.2.0", - "snowbridge-runtime-test-common 0.2.0", - "snowbridge-system-runtime-api 0.2.0", + "snowbridge-beacon-primitives", + "snowbridge-core", + "snowbridge-outbound-queue-runtime-api", + "snowbridge-pallet-ethereum-client", + "snowbridge-pallet-inbound-queue", + "snowbridge-pallet-outbound-queue", + "snowbridge-pallet-system", + "snowbridge-router-primitives", + "snowbridge-runtime-common", + "snowbridge-runtime-test-common", + "snowbridge-system-runtime-api", "sp-api 26.0.0", - "sp-block-builder 26.0.0", - "sp-consensus-aura 0.32.0", + "sp-block-builder", + "sp-consensus-aura", "sp-core 28.0.0", - "sp-genesis-builder 0.8.0", - "sp-inherents 26.0.0", + "sp-genesis-builder", + "sp-inherents", "sp-io 30.0.0", - "sp-keyring 31.0.0", - "sp-offchain 26.0.0", + "sp-keyring", + "sp-offchain", "sp-runtime 31.0.1", - "sp-session 27.0.0", + "sp-session", "sp-std 14.0.0", "sp-storage 19.0.0", - "sp-transaction-pool 26.0.0", + "sp-transaction-pool", "sp-version 29.0.0", - "staging-parachain-info 0.7.0", - "staging-xcm 7.0.0", - "staging-xcm-builder 7.0.0", - "staging-xcm-executor 7.0.0", - "substrate-wasm-builder 17.0.0", - "testnet-parachains-constants 1.0.0", - "xcm-runtime-apis 0.1.0", + "staging-parachain-info", + "staging-xcm", + "staging-xcm-builder", + "staging-xcm-executor", + "substrate-wasm-builder", + "testnet-parachains-constants", + "xcm-runtime-apis", ] [[package]] name = "bridge-hub-test-utils" version = "0.7.0" dependencies = [ - "asset-test-utils 7.0.0", - "bp-header-chain 0.7.0", - "bp-messages 0.7.0", - "bp-parachains 0.7.0", - "bp-polkadot-core 0.7.0", - "bp-relayers 0.7.0", - "bp-runtime 0.7.0", - "bp-test-utils 0.7.0", - "bp-xcm-bridge-hub 0.2.0", - "bridge-runtime-common 0.7.0", - "cumulus-pallet-parachain-system 0.7.0", - "cumulus-pallet-xcmp-queue 0.7.0", - "frame-support 28.0.0", - "frame-system 28.0.0", + "asset-test-utils", + "bp-header-chain", + "bp-messages", + "bp-parachains", + "bp-polkadot-core", + "bp-relayers", + "bp-runtime", + "bp-test-utils", + "bp-xcm-bridge-hub", + "bridge-runtime-common", + "cumulus-pallet-parachain-system", + "cumulus-pallet-xcmp-queue", + "frame-support", + "frame-system", "impl-trait-for-tuples", "log", - "pallet-balances 28.0.0", - "pallet-bridge-grandpa 0.7.0", - "pallet-bridge-messages 0.7.0", - "pallet-bridge-parachains 0.7.0", - "pallet-bridge-relayers 0.7.0", - "pallet-timestamp 27.0.0", - "pallet-utility 28.0.0", - "pallet-xcm 7.0.0", - "pallet-xcm-bridge-hub 0.2.0", - "parachains-common 7.0.0", - "parachains-runtimes-test-utils 7.0.0", + "pallet-balances", + "pallet-bridge-grandpa", + "pallet-bridge-messages", + "pallet-bridge-parachains", + "pallet-bridge-relayers", + "pallet-timestamp", + "pallet-utility", + "pallet-xcm", + "pallet-xcm-bridge-hub", + "parachains-common", + "parachains-runtimes-test-utils", "parity-scale-codec", "sp-core 28.0.0", "sp-io 30.0.0", - "sp-keyring 31.0.0", + "sp-keyring", "sp-runtime 31.0.1", "sp-tracing 16.0.0", - "staging-xcm 7.0.0", - "staging-xcm-builder 7.0.0", - "staging-xcm-executor 7.0.0", -] - -[[package]] -name = "bridge-hub-test-utils" -version = "0.18.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de0b3aa5fd8481a06ca16e47fd3d2d9c6abe76b27d922ec8980a853f242173b3" -dependencies = [ - "asset-test-utils 18.0.0", - "bp-header-chain 0.18.1", - "bp-messages 0.18.0", - "bp-parachains 0.18.0", - "bp-polkadot-core 0.18.0", - "bp-relayers 0.18.0", - "bp-runtime 0.18.0", - "bp-test-utils 0.18.0", - "bp-xcm-bridge-hub 0.4.0", - "bridge-runtime-common 0.18.0", - "cumulus-pallet-parachain-system 0.17.1", - "cumulus-pallet-xcmp-queue 0.17.0", - "frame-support 38.0.0", - "frame-system 38.0.0", - "impl-trait-for-tuples", - "log", - "pallet-balances 39.0.0", - "pallet-bridge-grandpa 0.18.0", - "pallet-bridge-messages 0.18.0", - "pallet-bridge-parachains 0.18.0", - "pallet-bridge-relayers 0.18.0", - "pallet-timestamp 37.0.0", - "pallet-utility 38.0.0", - "pallet-xcm 17.0.0", - "pallet-xcm-bridge-hub 0.13.0", - "parachains-common 18.0.0", - "parachains-runtimes-test-utils 17.0.0", - "parity-scale-codec", - "sp-core 34.0.0", - "sp-io 38.0.0", - "sp-keyring 39.0.0", - "sp-runtime 39.0.2", - "sp-tracing 17.0.1", - "staging-xcm 14.2.0", - "staging-xcm-builder 17.0.1", - "staging-xcm-executor 17.0.0", + "staging-xcm", + "staging-xcm-builder", + "staging-xcm-executor", ] [[package]] name = "bridge-hub-westend-emulated-chain" version = "0.0.0" dependencies = [ - "bp-messages 0.7.0", - "bridge-hub-common 0.1.0", + "bp-messages", + "bridge-hub-common", "bridge-hub-westend-runtime", "emulated-integration-tests-common", - "frame-support 28.0.0", - "parachains-common 7.0.0", + "frame-support", + "parachains-common", "sp-core 28.0.0", - "sp-keyring 31.0.0", - "staging-xcm 7.0.0", - "testnet-parachains-constants 1.0.0", + "sp-keyring", + "staging-xcm", + "testnet-parachains-constants", ] [[package]] @@ -2852,34 +2491,34 @@ version = "1.0.0" dependencies = [ "asset-hub-westend-runtime", "bridge-hub-westend-runtime", - "cumulus-pallet-xcmp-queue 0.7.0", + "cumulus-pallet-xcmp-queue", "emulated-integration-tests-common", - "frame-support 28.0.0", + "frame-support", "hex-literal", "log", - "pallet-asset-conversion 10.0.0", - "pallet-assets 29.1.0", - "pallet-balances 28.0.0", - "pallet-bridge-messages 0.7.0", - "pallet-message-queue 31.0.0", - "pallet-xcm 7.0.0", - "pallet-xcm-bridge-hub 0.2.0", - "parachains-common 7.0.0", + "pallet-asset-conversion", + "pallet-assets", + "pallet-balances", + "pallet-bridge-messages", + "pallet-message-queue", + "pallet-xcm", + "pallet-xcm-bridge-hub", + "parachains-common", "parity-scale-codec", "rococo-westend-system-emulated-network", "scale-info", - "snowbridge-core 0.2.0", - "snowbridge-pallet-inbound-queue 0.2.0", - "snowbridge-pallet-inbound-queue-fixtures 0.10.0", - "snowbridge-pallet-outbound-queue 0.2.0", - "snowbridge-pallet-system 0.2.0", - "snowbridge-router-primitives 0.9.0", + "snowbridge-core", + "snowbridge-pallet-inbound-queue", + "snowbridge-pallet-inbound-queue-fixtures", + "snowbridge-pallet-outbound-queue", + "snowbridge-pallet-system", + "snowbridge-router-primitives", "sp-core 28.0.0", "sp-runtime 31.0.1", - "staging-xcm 7.0.0", - "staging-xcm-executor 7.0.0", - "testnet-parachains-constants 1.0.0", - "xcm-runtime-apis 0.1.0", + "staging-xcm", + "staging-xcm-executor", + "testnet-parachains-constants", + "xcm-runtime-apis", ] [[package]] @@ -2890,121 +2529,119 @@ dependencies = [ "bp-asset-hub-westend", "bp-bridge-hub-rococo", "bp-bridge-hub-westend", - "bp-header-chain 0.7.0", - "bp-messages 0.7.0", - "bp-parachains 0.7.0", - "bp-polkadot-core 0.7.0", - "bp-relayers 0.7.0", + "bp-header-chain", + "bp-messages", + "bp-parachains", + "bp-polkadot-core", + "bp-relayers", "bp-rococo", - "bp-runtime 0.7.0", + "bp-runtime", "bp-westend", - "bp-xcm-bridge-hub-router 0.6.0", - "bridge-hub-common 0.1.0", - "bridge-hub-test-utils 0.7.0", - "bridge-runtime-common 0.7.0", - "cumulus-pallet-aura-ext 0.7.0", - "cumulus-pallet-parachain-system 0.7.0", - "cumulus-pallet-session-benchmarking 9.0.0", - "cumulus-pallet-xcm 0.7.0", - "cumulus-pallet-xcmp-queue 0.7.0", - "cumulus-primitives-aura 0.7.0", - "cumulus-primitives-core 0.7.0", - "cumulus-primitives-storage-weight-reclaim 1.0.0", - "cumulus-primitives-utility 0.7.0", - "frame-benchmarking 28.0.0", - "frame-executive 28.0.0", - "frame-metadata-hash-extension 0.1.0", - "frame-support 28.0.0", - "frame-system 28.0.0", - "frame-system-benchmarking 28.0.0", - "frame-system-rpc-runtime-api 26.0.0", - "frame-try-runtime 0.34.0", + "bridge-hub-common", + "bridge-hub-test-utils", + "bridge-runtime-common", + "cumulus-pallet-aura-ext", + "cumulus-pallet-parachain-system", + "cumulus-pallet-session-benchmarking", + "cumulus-pallet-xcm", + "cumulus-pallet-xcmp-queue", + "cumulus-primitives-aura", + "cumulus-primitives-core", + "cumulus-primitives-storage-weight-reclaim", + "cumulus-primitives-utility", + "frame-benchmarking", + "frame-executive", + "frame-metadata-hash-extension", + "frame-support", + "frame-system", + "frame-system-benchmarking", + "frame-system-rpc-runtime-api", + "frame-try-runtime", "hex-literal", "log", - "pallet-aura 27.0.0", - "pallet-authorship 28.0.0", - "pallet-balances 28.0.0", - "pallet-bridge-grandpa 0.7.0", - "pallet-bridge-messages 0.7.0", - "pallet-bridge-parachains 0.7.0", - "pallet-bridge-relayers 0.7.0", - "pallet-collator-selection 9.0.0", - "pallet-message-queue 31.0.0", - "pallet-multisig 28.0.0", - "pallet-session 28.0.0", - "pallet-timestamp 27.0.0", - "pallet-transaction-payment 28.0.0", - "pallet-transaction-payment-rpc-runtime-api 28.0.0", - "pallet-utility 28.0.0", - "pallet-xcm 7.0.0", - "pallet-xcm-benchmarks 7.0.0", - "pallet-xcm-bridge-hub 0.2.0", - "parachains-common 7.0.0", - "parachains-runtimes-test-utils 7.0.0", - "parity-scale-codec", - "polkadot-parachain-primitives 6.0.0", - "polkadot-runtime-common 7.0.0", + "pallet-aura", + "pallet-authorship", + "pallet-balances", + "pallet-bridge-grandpa", + "pallet-bridge-messages", + "pallet-bridge-parachains", + "pallet-bridge-relayers", + "pallet-collator-selection", + "pallet-message-queue", + "pallet-multisig", + "pallet-session", + "pallet-timestamp", + "pallet-transaction-payment", + "pallet-transaction-payment-rpc-runtime-api", + "pallet-utility", + "pallet-xcm", + "pallet-xcm-benchmarks", + "pallet-xcm-bridge-hub", + "parachains-common", + "parity-scale-codec", + "polkadot-parachain-primitives", + "polkadot-runtime-common", "scale-info", "serde", "serde_json", - "snowbridge-beacon-primitives 0.2.0", - "snowbridge-core 0.2.0", - "snowbridge-outbound-queue-runtime-api 0.2.0", - "snowbridge-pallet-ethereum-client 0.2.0", - "snowbridge-pallet-inbound-queue 0.2.0", - "snowbridge-pallet-outbound-queue 0.2.0", - "snowbridge-pallet-system 0.2.0", - "snowbridge-router-primitives 0.9.0", - "snowbridge-runtime-common 0.2.0", - "snowbridge-runtime-test-common 0.2.0", - "snowbridge-system-runtime-api 0.2.0", + "snowbridge-beacon-primitives", + "snowbridge-core", + "snowbridge-outbound-queue-runtime-api", + "snowbridge-pallet-ethereum-client", + "snowbridge-pallet-inbound-queue", + "snowbridge-pallet-outbound-queue", + "snowbridge-pallet-system", + "snowbridge-router-primitives", + "snowbridge-runtime-common", + "snowbridge-runtime-test-common", + "snowbridge-system-runtime-api", "sp-api 26.0.0", - "sp-block-builder 26.0.0", - "sp-consensus-aura 0.32.0", + "sp-block-builder", + "sp-consensus-aura", "sp-core 28.0.0", - "sp-genesis-builder 0.8.0", - "sp-inherents 26.0.0", + "sp-genesis-builder", + "sp-inherents", "sp-io 30.0.0", - "sp-keyring 31.0.0", - "sp-offchain 26.0.0", + "sp-keyring", + "sp-offchain", "sp-runtime 31.0.1", - "sp-session 27.0.0", + "sp-session", "sp-std 14.0.0", "sp-storage 19.0.0", - "sp-transaction-pool 26.0.0", + "sp-transaction-pool", "sp-version 29.0.0", - "staging-parachain-info 0.7.0", - "staging-xcm 7.0.0", - "staging-xcm-builder 7.0.0", - "staging-xcm-executor 7.0.0", - "substrate-wasm-builder 17.0.0", - "testnet-parachains-constants 1.0.0", - "westend-runtime-constants 7.0.0", - "xcm-runtime-apis 0.1.0", + "staging-parachain-info", + "staging-xcm", + "staging-xcm-builder", + "staging-xcm-executor", + "substrate-wasm-builder", + "testnet-parachains-constants", + "westend-runtime-constants", + "xcm-runtime-apis", ] [[package]] name = "bridge-runtime-common" version = "0.7.0" dependencies = [ - "bp-header-chain 0.7.0", - "bp-messages 0.7.0", - "bp-parachains 0.7.0", - "bp-polkadot-core 0.7.0", - "bp-relayers 0.7.0", - "bp-runtime 0.7.0", - "bp-test-utils 0.7.0", - "bp-xcm-bridge-hub 0.2.0", - "frame-support 28.0.0", - "frame-system 28.0.0", + "bp-header-chain", + "bp-messages", + "bp-parachains", + "bp-polkadot-core", + "bp-relayers", + "bp-runtime", + "bp-test-utils", + "bp-xcm-bridge-hub", + "frame-support", + "frame-system", "log", - "pallet-balances 28.0.0", - "pallet-bridge-grandpa 0.7.0", - "pallet-bridge-messages 0.7.0", - "pallet-bridge-parachains 0.7.0", - "pallet-bridge-relayers 0.7.0", - "pallet-transaction-payment 28.0.0", - "pallet-utility 28.0.0", + "pallet-balances", + "pallet-bridge-grandpa", + "pallet-bridge-messages", + "pallet-bridge-parachains", + "pallet-bridge-relayers", + "pallet-transaction-payment", + "pallet-utility", "parity-scale-codec", "scale-info", "sp-core 28.0.0", @@ -3013,43 +2650,11 @@ dependencies = [ "sp-std 14.0.0", "sp-trie 29.0.0", "sp-weights 27.0.0", - "staging-xcm 7.0.0", + "staging-xcm", "static_assertions", "tuplex", ] -[[package]] -name = "bridge-runtime-common" -version = "0.18.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c639aa22de6e904156a3e8b0e6b9e6af790cb27a1299688cc07997e1ffe5b648" -dependencies = [ - "bp-header-chain 0.18.1", - "bp-messages 0.18.0", - "bp-parachains 0.18.0", - "bp-polkadot-core 0.18.0", - "bp-relayers 0.18.0", - "bp-runtime 0.18.0", - "bp-xcm-bridge-hub 0.4.0", - "frame-support 38.0.0", - "frame-system 38.0.0", - "log", - "pallet-bridge-grandpa 0.18.0", - "pallet-bridge-messages 0.18.0", - "pallet-bridge-parachains 0.18.0", - "pallet-bridge-relayers 0.18.0", - "pallet-transaction-payment 38.0.0", - "pallet-utility 38.0.0", - "parity-scale-codec", - "scale-info", - "sp-io 38.0.0", - "sp-runtime 39.0.2", - "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sp-trie 37.0.0", - "staging-xcm 14.2.0", - "tuplex", -] - [[package]] name = "bs58" version = "0.5.1" @@ -3115,9 +2720,6 @@ name = "bytes" version = "1.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "428d9aa8fbc0670b7b8d6030a7fadd0f86151cae55e4dbbece15f3780a3dfaf3" -dependencies = [ - "serde", -] [[package]] name = "bzip2-sys" @@ -3294,27 +2896,26 @@ dependencies = [ name = "chain-spec-guide-runtime" version = "0.0.0" dependencies = [ - "cmd_lib", "docify", - "frame-support 28.0.0", - "pallet-balances 28.0.0", - "pallet-sudo 28.0.0", - "pallet-timestamp 27.0.0", - "pallet-transaction-payment 28.0.0", - "pallet-transaction-payment-rpc-runtime-api 28.0.0", - "parity-scale-codec", - "polkadot-sdk-frame 0.1.0", + "frame-support", + "pallet-balances", + "pallet-sudo", + "pallet-timestamp", + "pallet-transaction-payment", + "pallet-transaction-payment-rpc-runtime-api", + "parity-scale-codec", + "polkadot-sdk-frame", "sc-chain-spec", "scale-info", "serde", "serde_json", "sp-application-crypto 30.0.0", "sp-core 28.0.0", - "sp-genesis-builder 0.8.0", - "sp-keyring 31.0.0", + "sp-genesis-builder", + "sp-keyring", "sp-runtime 31.0.1", "staging-chain-spec-builder", - "substrate-wasm-builder 17.0.0", + "substrate-wasm-builder", ] [[package]] @@ -3602,12 +3203,12 @@ name = "collectives-westend-emulated-chain" version = "0.0.0" dependencies = [ "collectives-westend-runtime", - "cumulus-primitives-core 0.7.0", + "cumulus-primitives-core", "emulated-integration-tests-common", - "frame-support 28.0.0", - "parachains-common 7.0.0", + "frame-support", + "parachains-common", "sp-core 28.0.0", - "testnet-parachains-constants 1.0.0", + "testnet-parachains-constants", ] [[package]] @@ -3615,26 +3216,26 @@ name = "collectives-westend-integration-tests" version = "1.0.0" dependencies = [ "assert_matches", - "cumulus-pallet-parachain-system 0.7.0", - "cumulus-pallet-xcmp-queue 0.7.0", + "cumulus-pallet-parachain-system", + "cumulus-pallet-xcmp-queue", "emulated-integration-tests-common", - "frame-support 28.0.0", - "pallet-asset-rate 7.0.0", - "pallet-assets 29.1.0", - "pallet-balances 28.0.0", - "pallet-message-queue 31.0.0", - "pallet-treasury 27.0.0", - "pallet-utility 28.0.0", - "pallet-whitelist 27.0.0", - "pallet-xcm 7.0.0", - "parachains-common 7.0.0", - "parity-scale-codec", - "polkadot-runtime-common 7.0.0", - "sp-runtime 31.0.1", - "staging-xcm 7.0.0", - "staging-xcm-executor 7.0.0", - "testnet-parachains-constants 1.0.0", - "westend-runtime-constants 7.0.0", + "frame-support", + "pallet-asset-rate", + "pallet-assets", + "pallet-balances", + "pallet-message-queue", + "pallet-treasury", + "pallet-utility", + "pallet-whitelist", + "pallet-xcm", + "parachains-common", + "parity-scale-codec", + "polkadot-runtime-common", + "sp-runtime 31.0.1", + "staging-xcm", + "staging-xcm-executor", + "testnet-parachains-constants", + "westend-runtime-constants", "westend-system-emulated-network", ] @@ -3642,80 +3243,79 @@ dependencies = [ name = "collectives-westend-runtime" version = "3.0.0" dependencies = [ - "cumulus-pallet-aura-ext 0.7.0", - "cumulus-pallet-parachain-system 0.7.0", - "cumulus-pallet-session-benchmarking 9.0.0", - "cumulus-pallet-xcm 0.7.0", - "cumulus-pallet-xcmp-queue 0.7.0", - "cumulus-primitives-aura 0.7.0", - "cumulus-primitives-core 0.7.0", - "cumulus-primitives-storage-weight-reclaim 1.0.0", - "cumulus-primitives-utility 0.7.0", - "frame-benchmarking 28.0.0", - "frame-executive 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", - "frame-system-benchmarking 28.0.0", - "frame-system-rpc-runtime-api 26.0.0", - "frame-try-runtime 0.34.0", + "cumulus-pallet-aura-ext", + "cumulus-pallet-parachain-system", + "cumulus-pallet-session-benchmarking", + "cumulus-pallet-xcm", + "cumulus-pallet-xcmp-queue", + "cumulus-primitives-aura", + "cumulus-primitives-core", + "cumulus-primitives-storage-weight-reclaim", + "cumulus-primitives-utility", + "frame-benchmarking", + "frame-executive", + "frame-support", + "frame-system", + "frame-system-benchmarking", + "frame-system-rpc-runtime-api", + "frame-try-runtime", "hex-literal", "log", - "pallet-alliance 27.0.0", - "pallet-asset-rate 7.0.0", - "pallet-aura 27.0.0", - "pallet-authorship 28.0.0", - "pallet-balances 28.0.0", - "pallet-collator-selection 9.0.0", - "pallet-collective 28.0.0", - "pallet-collective-content 0.6.0", - "pallet-core-fellowship 12.0.0", - "pallet-message-queue 31.0.0", - "pallet-multisig 28.0.0", - "pallet-preimage 28.0.0", - "pallet-proxy 28.0.0", - "pallet-ranked-collective 28.0.0", - "pallet-referenda 28.0.0", - "pallet-salary 13.0.0", - "pallet-scheduler 29.0.0", - "pallet-session 28.0.0", - "pallet-state-trie-migration 29.0.0", - "pallet-timestamp 27.0.0", - "pallet-transaction-payment 28.0.0", - "pallet-transaction-payment-rpc-runtime-api 28.0.0", - "pallet-treasury 27.0.0", - "pallet-utility 28.0.0", - "pallet-xcm 7.0.0", - "parachains-common 7.0.0", - "parachains-runtimes-test-utils 7.0.0", - "parity-scale-codec", - "polkadot-parachain-primitives 6.0.0", - "polkadot-runtime-common 7.0.0", + "pallet-alliance", + "pallet-asset-rate", + "pallet-aura", + "pallet-authorship", + "pallet-balances", + "pallet-collator-selection", + "pallet-collective", + "pallet-collective-content", + "pallet-core-fellowship", + "pallet-message-queue", + "pallet-multisig", + "pallet-preimage", + "pallet-proxy", + "pallet-ranked-collective", + "pallet-referenda", + "pallet-salary", + "pallet-scheduler", + "pallet-session", + "pallet-state-trie-migration", + "pallet-timestamp", + "pallet-transaction-payment", + "pallet-transaction-payment-rpc-runtime-api", + "pallet-treasury", + "pallet-utility", + "pallet-xcm", + "parachains-common", + "parity-scale-codec", + "polkadot-parachain-primitives", + "polkadot-runtime-common", "scale-info", "serde_json", "sp-api 26.0.0", "sp-arithmetic 23.0.0", - "sp-block-builder 26.0.0", - "sp-consensus-aura 0.32.0", + "sp-block-builder", + "sp-consensus-aura", "sp-core 28.0.0", - "sp-genesis-builder 0.8.0", - "sp-inherents 26.0.0", + "sp-genesis-builder", + "sp-inherents", "sp-io 30.0.0", - "sp-keyring 31.0.0", - "sp-offchain 26.0.0", + "sp-keyring", + "sp-offchain", "sp-runtime 31.0.1", - "sp-session 27.0.0", + "sp-session", "sp-std 14.0.0", "sp-storage 19.0.0", - "sp-transaction-pool 26.0.0", + "sp-transaction-pool", "sp-version 29.0.0", - "staging-parachain-info 0.7.0", - "staging-xcm 7.0.0", - "staging-xcm-builder 7.0.0", - "staging-xcm-executor 7.0.0", - "substrate-wasm-builder 17.0.0", - "testnet-parachains-constants 1.0.0", - "westend-runtime-constants 7.0.0", - "xcm-runtime-apis 0.1.0", + "staging-parachain-info", + "staging-xcm", + "staging-xcm-builder", + "staging-xcm-executor", + "substrate-wasm-builder", + "testnet-parachains-constants", + "westend-runtime-constants", + "xcm-runtime-apis", ] [[package]] @@ -3850,9 +3450,9 @@ dependencies = [ [[package]] name = "concurrent-queue" -version = "2.5.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ca0197aee26d1ae37445ee532fefce43251d24cc7c166799f4d46817f1d3973" +checksum = "62ec6771ecfa0762d24683ee5a32ad78487a3d3afdc0fb8cae19d2c5deb50b7c" dependencies = [ "crossbeam-utils", ] @@ -3882,9 +3482,9 @@ dependencies = [ [[package]] name = "const-hex" -version = "1.14.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b0485bab839b018a8f1723fc5391819fea5f8f0f32288ef8a735fd096b6160c" +checksum = "a5104de16b218eddf8e34ffe2f86f74bfa4e61e95a1b89732fccf6325efd0557" dependencies = [ "cfg-if", "cpufeatures", @@ -3949,64 +3549,64 @@ checksum = "f272d0c4cf831b4fa80ee529c7707f76585986e910e1fbce1d7921970bc1a241" name = "contracts-rococo-runtime" version = "0.8.0" dependencies = [ - "cumulus-pallet-aura-ext 0.7.0", - "cumulus-pallet-parachain-system 0.7.0", - "cumulus-pallet-session-benchmarking 9.0.0", - "cumulus-pallet-xcm 0.7.0", - "cumulus-pallet-xcmp-queue 0.7.0", - "cumulus-primitives-aura 0.7.0", - "cumulus-primitives-core 0.7.0", - "cumulus-primitives-storage-weight-reclaim 1.0.0", - "cumulus-primitives-utility 0.7.0", - "frame-benchmarking 28.0.0", - "frame-executive 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", - "frame-system-benchmarking 28.0.0", - "frame-system-rpc-runtime-api 26.0.0", - "frame-try-runtime 0.34.0", + "cumulus-pallet-aura-ext", + "cumulus-pallet-parachain-system", + "cumulus-pallet-session-benchmarking", + "cumulus-pallet-xcm", + "cumulus-pallet-xcmp-queue", + "cumulus-primitives-aura", + "cumulus-primitives-core", + "cumulus-primitives-storage-weight-reclaim", + "cumulus-primitives-utility", + "frame-benchmarking", + "frame-executive", + "frame-support", + "frame-system", + "frame-system-benchmarking", + "frame-system-rpc-runtime-api", + "frame-try-runtime", "hex-literal", "log", - "pallet-aura 27.0.0", - "pallet-authorship 28.0.0", - "pallet-balances 28.0.0", - "pallet-collator-selection 9.0.0", - "pallet-contracts 27.0.0", - "pallet-insecure-randomness-collective-flip 16.0.0", - "pallet-message-queue 31.0.0", - "pallet-multisig 28.0.0", - "pallet-session 28.0.0", - "pallet-sudo 28.0.0", - "pallet-timestamp 27.0.0", - "pallet-transaction-payment 28.0.0", - "pallet-transaction-payment-rpc-runtime-api 28.0.0", - "pallet-utility 28.0.0", - "pallet-xcm 7.0.0", - "parachains-common 7.0.0", - "parity-scale-codec", - "polkadot-parachain-primitives 6.0.0", - "polkadot-runtime-common 7.0.0", - "rococo-runtime-constants 7.0.0", + "pallet-aura", + "pallet-authorship", + "pallet-balances", + "pallet-collator-selection", + "pallet-contracts", + "pallet-insecure-randomness-collective-flip", + "pallet-message-queue", + "pallet-multisig", + "pallet-session", + "pallet-sudo", + "pallet-timestamp", + "pallet-transaction-payment", + "pallet-transaction-payment-rpc-runtime-api", + "pallet-utility", + "pallet-xcm", + "parachains-common", + "parity-scale-codec", + "polkadot-parachain-primitives", + "polkadot-runtime-common", + "rococo-runtime-constants", "scale-info", "sp-api 26.0.0", - "sp-block-builder 26.0.0", - "sp-consensus-aura 0.32.0", + "sp-block-builder", + "sp-consensus-aura", "sp-core 28.0.0", - "sp-genesis-builder 0.8.0", - "sp-inherents 26.0.0", - "sp-offchain 26.0.0", + "sp-genesis-builder", + "sp-inherents", + "sp-offchain", "sp-runtime 31.0.1", - "sp-session 27.0.0", + "sp-session", "sp-storage 19.0.0", - "sp-transaction-pool 26.0.0", + "sp-transaction-pool", "sp-version 29.0.0", - "staging-parachain-info 0.7.0", - "staging-xcm 7.0.0", - "staging-xcm-builder 7.0.0", - "staging-xcm-executor 7.0.0", - "substrate-wasm-builder 17.0.0", - "testnet-parachains-constants 1.0.0", - "xcm-runtime-apis 0.1.0", + "staging-parachain-info", + "staging-xcm", + "staging-xcm-builder", + "staging-xcm-executor", + "substrate-wasm-builder", + "testnet-parachains-constants", + "xcm-runtime-apis", ] [[package]] @@ -4060,100 +3660,99 @@ name = "coretime-rococo-emulated-chain" version = "0.1.0" dependencies = [ "coretime-rococo-runtime", - "cumulus-primitives-core 0.7.0", + "cumulus-primitives-core", "emulated-integration-tests-common", - "frame-support 28.0.0", - "parachains-common 7.0.0", + "frame-support", + "parachains-common", "sp-core 28.0.0", - "testnet-parachains-constants 1.0.0", + "testnet-parachains-constants", ] [[package]] name = "coretime-rococo-integration-tests" version = "0.0.0" dependencies = [ - "cumulus-pallet-parachain-system 0.7.0", + "cumulus-pallet-parachain-system", "emulated-integration-tests-common", - "frame-support 28.0.0", - "pallet-balances 28.0.0", - "pallet-broker 0.6.0", - "pallet-identity 29.0.0", - "pallet-message-queue 31.0.0", - "polkadot-runtime-common 7.0.0", - "polkadot-runtime-parachains 7.0.0", - "rococo-runtime-constants 7.0.0", + "frame-support", + "pallet-balances", + "pallet-broker", + "pallet-identity", + "pallet-message-queue", + "polkadot-runtime-common", + "polkadot-runtime-parachains", + "rococo-runtime-constants", "rococo-system-emulated-network", "sp-runtime 31.0.1", - "staging-xcm 7.0.0", - "staging-xcm-executor 7.0.0", + "staging-xcm", + "staging-xcm-executor", ] [[package]] name = "coretime-rococo-runtime" version = "0.1.0" dependencies = [ - "cumulus-pallet-aura-ext 0.7.0", - "cumulus-pallet-parachain-system 0.7.0", - "cumulus-pallet-session-benchmarking 9.0.0", - "cumulus-pallet-xcm 0.7.0", - "cumulus-pallet-xcmp-queue 0.7.0", - "cumulus-primitives-aura 0.7.0", - "cumulus-primitives-core 0.7.0", - "cumulus-primitives-storage-weight-reclaim 1.0.0", - "cumulus-primitives-utility 0.7.0", - "frame-benchmarking 28.0.0", - "frame-executive 28.0.0", - "frame-metadata-hash-extension 0.1.0", - "frame-support 28.0.0", - "frame-system 28.0.0", - "frame-system-benchmarking 28.0.0", - "frame-system-rpc-runtime-api 26.0.0", - "frame-try-runtime 0.34.0", + "cumulus-pallet-aura-ext", + "cumulus-pallet-parachain-system", + "cumulus-pallet-session-benchmarking", + "cumulus-pallet-xcm", + "cumulus-pallet-xcmp-queue", + "cumulus-primitives-aura", + "cumulus-primitives-core", + "cumulus-primitives-storage-weight-reclaim", + "cumulus-primitives-utility", + "frame-benchmarking", + "frame-executive", + "frame-metadata-hash-extension", + "frame-support", + "frame-system", + "frame-system-benchmarking", + "frame-system-rpc-runtime-api", + "frame-try-runtime", "hex-literal", "log", - "pallet-aura 27.0.0", - "pallet-authorship 28.0.0", - "pallet-balances 28.0.0", - "pallet-broker 0.6.0", - "pallet-collator-selection 9.0.0", - "pallet-message-queue 31.0.0", - "pallet-multisig 28.0.0", - "pallet-proxy 28.0.0", - "pallet-session 28.0.0", - "pallet-sudo 28.0.0", - "pallet-timestamp 27.0.0", - "pallet-transaction-payment 28.0.0", - "pallet-transaction-payment-rpc-runtime-api 28.0.0", - "pallet-utility 28.0.0", - "pallet-xcm 7.0.0", - "pallet-xcm-benchmarks 7.0.0", - "parachains-common 7.0.0", - "parachains-runtimes-test-utils 7.0.0", - "parity-scale-codec", - "polkadot-parachain-primitives 6.0.0", - "polkadot-runtime-common 7.0.0", - "rococo-runtime-constants 7.0.0", + "pallet-aura", + "pallet-authorship", + "pallet-balances", + "pallet-broker", + "pallet-collator-selection", + "pallet-message-queue", + "pallet-multisig", + "pallet-proxy", + "pallet-session", + "pallet-sudo", + "pallet-timestamp", + "pallet-transaction-payment", + "pallet-transaction-payment-rpc-runtime-api", + "pallet-utility", + "pallet-xcm", + "pallet-xcm-benchmarks", + "parachains-common", + "parity-scale-codec", + "polkadot-parachain-primitives", + "polkadot-runtime-common", + "rococo-runtime-constants", "scale-info", "serde", "sp-api 26.0.0", - "sp-block-builder 26.0.0", - "sp-consensus-aura 0.32.0", + "sp-block-builder", + "sp-consensus-aura", "sp-core 28.0.0", - "sp-genesis-builder 0.8.0", - "sp-inherents 26.0.0", - "sp-offchain 26.0.0", + "sp-genesis-builder", + "sp-inherents", + "sp-offchain", "sp-runtime 31.0.1", - "sp-session 27.0.0", + "sp-session", "sp-storage 19.0.0", - "sp-transaction-pool 26.0.0", + "sp-transaction-pool", "sp-version 29.0.0", - "staging-parachain-info 0.7.0", - "staging-xcm 7.0.0", - "staging-xcm-builder 7.0.0", - "staging-xcm-executor 7.0.0", - "substrate-wasm-builder 17.0.0", - "testnet-parachains-constants 1.0.0", - "xcm-runtime-apis 0.1.0", + "staging-parachain-info", + "staging-xcm", + "staging-xcm-builder", + "staging-xcm-executor", + "substrate-wasm-builder", + "testnet-parachains-constants", + "xcm-runtime-apis", ] [[package]] @@ -4161,31 +3760,31 @@ name = "coretime-westend-emulated-chain" version = "0.1.0" dependencies = [ "coretime-westend-runtime", - "cumulus-primitives-core 0.7.0", + "cumulus-primitives-core", "emulated-integration-tests-common", - "frame-support 28.0.0", - "parachains-common 7.0.0", + "frame-support", + "parachains-common", "sp-core 28.0.0", - "testnet-parachains-constants 1.0.0", + "testnet-parachains-constants", ] [[package]] name = "coretime-westend-integration-tests" version = "0.0.0" dependencies = [ - "cumulus-pallet-parachain-system 0.7.0", + "cumulus-pallet-parachain-system", "emulated-integration-tests-common", - "frame-support 28.0.0", - "pallet-balances 28.0.0", - "pallet-broker 0.6.0", - "pallet-identity 29.0.0", - "pallet-message-queue 31.0.0", - "polkadot-runtime-common 7.0.0", - "polkadot-runtime-parachains 7.0.0", - "sp-runtime 31.0.1", - "staging-xcm 7.0.0", - "staging-xcm-executor 7.0.0", - "westend-runtime-constants 7.0.0", + "frame-support", + "pallet-balances", + "pallet-broker", + "pallet-identity", + "pallet-message-queue", + "polkadot-runtime-common", + "polkadot-runtime-parachains", + "sp-runtime 31.0.1", + "staging-xcm", + "staging-xcm-executor", + "westend-runtime-constants", "westend-system-emulated-network", ] @@ -4193,67 +3792,66 @@ dependencies = [ name = "coretime-westend-runtime" version = "0.1.0" dependencies = [ - "cumulus-pallet-aura-ext 0.7.0", - "cumulus-pallet-parachain-system 0.7.0", - "cumulus-pallet-session-benchmarking 9.0.0", - "cumulus-pallet-xcm 0.7.0", - "cumulus-pallet-xcmp-queue 0.7.0", - "cumulus-primitives-aura 0.7.0", - "cumulus-primitives-core 0.7.0", - "cumulus-primitives-storage-weight-reclaim 1.0.0", - "cumulus-primitives-utility 0.7.0", - "frame-benchmarking 28.0.0", - "frame-executive 28.0.0", - "frame-metadata-hash-extension 0.1.0", - "frame-support 28.0.0", - "frame-system 28.0.0", - "frame-system-benchmarking 28.0.0", - "frame-system-rpc-runtime-api 26.0.0", - "frame-try-runtime 0.34.0", + "cumulus-pallet-aura-ext", + "cumulus-pallet-parachain-system", + "cumulus-pallet-session-benchmarking", + "cumulus-pallet-xcm", + "cumulus-pallet-xcmp-queue", + "cumulus-primitives-aura", + "cumulus-primitives-core", + "cumulus-primitives-storage-weight-reclaim", + "cumulus-primitives-utility", + "frame-benchmarking", + "frame-executive", + "frame-metadata-hash-extension", + "frame-support", + "frame-system", + "frame-system-benchmarking", + "frame-system-rpc-runtime-api", + "frame-try-runtime", "hex-literal", "log", - "pallet-aura 27.0.0", - "pallet-authorship 28.0.0", - "pallet-balances 28.0.0", - "pallet-broker 0.6.0", - "pallet-collator-selection 9.0.0", - "pallet-message-queue 31.0.0", - "pallet-multisig 28.0.0", - "pallet-proxy 28.0.0", - "pallet-session 28.0.0", - "pallet-timestamp 27.0.0", - "pallet-transaction-payment 28.0.0", - "pallet-transaction-payment-rpc-runtime-api 28.0.0", - "pallet-utility 28.0.0", - "pallet-xcm 7.0.0", - "pallet-xcm-benchmarks 7.0.0", - "parachains-common 7.0.0", - "parachains-runtimes-test-utils 7.0.0", - "parity-scale-codec", - "polkadot-parachain-primitives 6.0.0", - "polkadot-runtime-common 7.0.0", + "pallet-aura", + "pallet-authorship", + "pallet-balances", + "pallet-broker", + "pallet-collator-selection", + "pallet-message-queue", + "pallet-multisig", + "pallet-proxy", + "pallet-session", + "pallet-timestamp", + "pallet-transaction-payment", + "pallet-transaction-payment-rpc-runtime-api", + "pallet-utility", + "pallet-xcm", + "pallet-xcm-benchmarks", + "parachains-common", + "parity-scale-codec", + "polkadot-parachain-primitives", + "polkadot-runtime-common", "scale-info", "serde", "sp-api 26.0.0", - "sp-block-builder 26.0.0", - "sp-consensus-aura 0.32.0", + "sp-block-builder", + "sp-consensus-aura", "sp-core 28.0.0", - "sp-genesis-builder 0.8.0", - "sp-inherents 26.0.0", - "sp-offchain 26.0.0", + "sp-genesis-builder", + "sp-inherents", + "sp-offchain", "sp-runtime 31.0.1", - "sp-session 27.0.0", + "sp-session", "sp-storage 19.0.0", - "sp-transaction-pool 26.0.0", + "sp-transaction-pool", "sp-version 29.0.0", - "staging-parachain-info 0.7.0", - "staging-xcm 7.0.0", - "staging-xcm-builder 7.0.0", - "staging-xcm-executor 7.0.0", - "substrate-wasm-builder 17.0.0", - "testnet-parachains-constants 1.0.0", - "westend-runtime-constants 7.0.0", - "xcm-runtime-apis 0.1.0", + "staging-parachain-info", + "staging-xcm", + "staging-xcm-builder", + "staging-xcm-executor", + "substrate-wasm-builder", + "testnet-parachains-constants", + "westend-runtime-constants", + "xcm-runtime-apis", ] [[package]] @@ -4464,18 +4062,22 @@ dependencies = [ [[package]] name = "crossbeam-queue" -version = "0.3.11" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df0346b5d5e76ac2fe4e327c5fd1118d6be7c51dfb18f9b7922923f287471e35" +checksum = "d1cfb3ea8a53f37c40dea2c7bedcbd88bdfae54f5e2175d6ecaff1c988353add" dependencies = [ + "cfg-if", "crossbeam-utils", ] [[package]] name = "crossbeam-utils" -version = "0.8.20" +version = "0.8.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22ec99545bb0ed0ea7bb9b8e1e9122ea386ff8a48c0922e43f36d45ab09e0e80" +checksum = "5a22b2d63d4d1dc0b7f1b6b2747dd0088008a9be28b6ddf0b1e7d335e3037294" +dependencies = [ + "cfg-if", +] [[package]] name = "crunchy" @@ -4526,21 +4128,6 @@ dependencies = [ "subtle 2.5.0", ] -[[package]] -name = "crypto_secretbox" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9d6cf87adf719ddf43a805e92c6870a531aedda35ff640442cbaf8674e141e1" -dependencies = [ - "aead", - "cipher 0.4.4", - "generic-array 0.14.7", - "poly1305", - "salsa20", - "subtle 2.5.0", - "zeroize", -] - [[package]] name = "ctr" version = "0.9.2" @@ -4573,9 +4160,9 @@ dependencies = [ "async-trait", "cumulus-client-consensus-common", "cumulus-client-network", - "cumulus-primitives-core 0.7.0", + "cumulus-primitives-core", "cumulus-test-client", - "cumulus-test-relay-sproof-builder 0.7.0", + "cumulus-test-relay-sproof-builder", "cumulus-test-runtime", "futures", "parity-scale-codec", @@ -4584,7 +4171,7 @@ dependencies = [ "polkadot-node-subsystem", "polkadot-node-subsystem-test-helpers", "polkadot-overseer", - "polkadot-primitives 7.0.0", + "polkadot-primitives", "sc-client-api", "sp-api 26.0.0", "sp-consensus", @@ -4605,8 +4192,8 @@ dependencies = [ "cumulus-client-consensus-common", "cumulus-client-consensus-proposer", "cumulus-client-parachain-inherent", - "cumulus-primitives-aura 0.7.0", - "cumulus-primitives-core 0.7.0", + "cumulus-primitives-aura", + "cumulus-primitives-core", "cumulus-relay-chain-interface", "futures", "parity-scale-codec", @@ -4615,7 +4202,7 @@ dependencies = [ "polkadot-node-subsystem", "polkadot-node-subsystem-util", "polkadot-overseer", - "polkadot-primitives 7.0.0", + "polkadot-primitives", "sc-client-api", "sc-consensus", "sc-consensus-aura", @@ -4626,17 +4213,16 @@ dependencies = [ "schnellru", "sp-api 26.0.0", "sp-application-crypto 30.0.0", - "sp-block-builder 26.0.0", + "sp-block-builder", "sp-blockchain", "sp-consensus", - "sp-consensus-aura 0.32.0", + "sp-consensus-aura", "sp-core 28.0.0", - "sp-inherents 26.0.0", + "sp-inherents", "sp-keystore 0.34.0", "sp-runtime 31.0.1", "sp-state-machine 0.35.0", - "sp-timestamp 26.0.0", - "sp-trie 29.0.0", + "sp-timestamp", "substrate-prometheus-endpoint", "tokio", "tracing", @@ -4648,26 +4234,26 @@ version = "0.7.0" dependencies = [ "async-trait", "cumulus-client-pov-recovery", - "cumulus-primitives-core 0.7.0", + "cumulus-primitives-core", "cumulus-relay-chain-interface", "cumulus-test-client", - "cumulus-test-relay-sproof-builder 0.7.0", + "cumulus-test-relay-sproof-builder", "dyn-clone", "futures", "futures-timer", "log", "parity-scale-codec", - "polkadot-primitives 7.0.0", + "polkadot-primitives", "sc-client-api", "sc-consensus", "sc-consensus-babe", "schnellru", "sp-blockchain", "sp-consensus", - "sp-consensus-slots 0.32.0", + "sp-consensus-slots", "sp-core 28.0.0", "sp-runtime 31.0.1", - "sp-timestamp 26.0.0", + "sp-timestamp", "sp-tracing 16.0.0", "sp-trie 29.0.0", "sp-version 29.0.0", @@ -4681,9 +4267,9 @@ version = "0.7.0" dependencies = [ "anyhow", "async-trait", - "cumulus-primitives-parachain-inherent 0.7.0", + "cumulus-primitives-parachain-inherent", "sp-consensus", - "sp-inherents 26.0.0", + "sp-inherents", "sp-runtime 31.0.1", "sp-state-machine 0.35.0", "thiserror", @@ -4695,17 +4281,17 @@ version = "0.7.0" dependencies = [ "async-trait", "cumulus-client-consensus-common", - "cumulus-primitives-core 0.7.0", + "cumulus-primitives-core", "cumulus-relay-chain-interface", "futures", "parking_lot 0.12.3", "sc-consensus", "sp-api 26.0.0", - "sp-block-builder 26.0.0", + "sp-block-builder", "sp-blockchain", "sp-consensus", "sp-core 28.0.0", - "sp-inherents 26.0.0", + "sp-inherents", "sp-runtime 31.0.1", "substrate-prometheus-endpoint", "tracing", @@ -4716,7 +4302,7 @@ name = "cumulus-client-network" version = "0.7.0" dependencies = [ "async-trait", - "cumulus-primitives-core 0.7.0", + "cumulus-primitives-core", "cumulus-relay-chain-inprocess-interface", "cumulus-relay-chain-interface", "cumulus-test-service", @@ -4726,8 +4312,8 @@ dependencies = [ "parking_lot 0.12.3", "polkadot-node-primitives", "polkadot-node-subsystem", - "polkadot-parachain-primitives 6.0.0", - "polkadot-primitives 7.0.0", + "polkadot-parachain-primitives", + "polkadot-primitives", "polkadot-test-client", "portpicker", "rstest", @@ -4737,7 +4323,7 @@ dependencies = [ "sp-blockchain", "sp-consensus", "sp-core 28.0.0", - "sp-keyring 31.0.0", + "sp-keyring", "sp-keystore 0.34.0", "sp-runtime 31.0.1", "sp-state-machine 0.35.0", @@ -4753,15 +4339,15 @@ name = "cumulus-client-parachain-inherent" version = "0.1.0" dependencies = [ "async-trait", - "cumulus-primitives-core 0.7.0", - "cumulus-primitives-parachain-inherent 0.7.0", + "cumulus-primitives-core", + "cumulus-primitives-parachain-inherent", "cumulus-relay-chain-interface", - "cumulus-test-relay-sproof-builder 0.7.0", + "cumulus-test-relay-sproof-builder", "parity-scale-codec", "sc-client-api", "sp-api 26.0.0", "sp-crypto-hashing 0.1.0", - "sp-inherents 26.0.0", + "sp-inherents", "sp-runtime 31.0.1", "sp-state-machine 0.35.0", "sp-storage 19.0.0", @@ -4775,7 +4361,7 @@ version = "0.7.0" dependencies = [ "assert_matches", "async-trait", - "cumulus-primitives-core 0.7.0", + "cumulus-primitives-core", "cumulus-relay-chain-interface", "cumulus-test-client", "cumulus-test-service", @@ -4785,7 +4371,7 @@ dependencies = [ "polkadot-node-primitives", "polkadot-node-subsystem", "polkadot-overseer", - "polkadot-primitives 7.0.0", + "polkadot-primitives", "portpicker", "rand", "rstest", @@ -4814,14 +4400,13 @@ dependencies = [ "cumulus-client-consensus-common", "cumulus-client-network", "cumulus-client-pov-recovery", - "cumulus-primitives-core 0.7.0", - "cumulus-primitives-proof-size-hostfunction 0.2.0", + "cumulus-primitives-core", + "cumulus-primitives-proof-size-hostfunction", "cumulus-relay-chain-inprocess-interface", "cumulus-relay-chain-interface", "cumulus-relay-chain-minimal-node", "futures", - "futures-timer", - "polkadot-primitives 7.0.0", + "polkadot-primitives", "sc-client-api", "sc-consensus", "sc-network", @@ -4839,51 +4424,33 @@ dependencies = [ "sp-core 28.0.0", "sp-io 30.0.0", "sp-runtime 31.0.1", - "sp-transaction-pool 26.0.0", + "sp-transaction-pool", ] [[package]] name = "cumulus-pallet-aura-ext" version = "0.7.0" dependencies = [ - "cumulus-pallet-parachain-system 0.7.0", - "frame-support 28.0.0", - "frame-system 28.0.0", - "pallet-aura 27.0.0", - "pallet-timestamp 27.0.0", + "cumulus-pallet-parachain-system", + "frame-support", + "frame-system", + "pallet-aura", + "pallet-timestamp", "parity-scale-codec", "scale-info", "sp-application-crypto 30.0.0", - "sp-consensus-aura 0.32.0", + "sp-consensus-aura", "sp-runtime 31.0.1", ] -[[package]] -name = "cumulus-pallet-aura-ext" -version = "0.17.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2cbe2735fc7cf2b6521eab00cb1a1ab025abc1575cc36887b36dc8c5cb1c9434" -dependencies = [ - "cumulus-pallet-parachain-system 0.17.1", - "frame-support 38.0.0", - "frame-system 38.0.0", - "pallet-aura 37.0.0", - "pallet-timestamp 37.0.0", - "parity-scale-codec", - "scale-info", - "sp-application-crypto 38.0.0", - "sp-consensus-aura 0.40.0", - "sp-runtime 39.0.2", -] - [[package]] name = "cumulus-pallet-dmp-queue" version = "0.7.0" dependencies = [ - "cumulus-primitives-core 0.7.0", - "frame-benchmarking 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", + "cumulus-primitives-core", + "frame-benchmarking", + "frame-support", + "frame-system", "log", "parity-scale-codec", "scale-info", @@ -4891,25 +4458,7 @@ dependencies = [ "sp-io 30.0.0", "sp-runtime 31.0.1", "sp-tracing 16.0.0", - "staging-xcm 7.0.0", -] - -[[package]] -name = "cumulus-pallet-dmp-queue" -version = "0.17.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97263a8e758d201ebe81db7cea7b278b4fb869c11442f77acef70138ac1a252f" -dependencies = [ - "cumulus-primitives-core 0.16.0", - "frame-benchmarking 38.0.0", - "frame-support 38.0.0", - "frame-system 38.0.0", - "log", - "parity-scale-codec", - "scale-info", - "sp-io 38.0.0", - "sp-runtime 39.0.2", - "staging-xcm 14.2.0", + "staging-xcm", ] [[package]] @@ -4918,99 +4467,51 @@ version = "0.7.0" dependencies = [ "assert_matches", "bytes", - "cumulus-pallet-parachain-system-proc-macro 0.6.0", - "cumulus-primitives-core 0.7.0", - "cumulus-primitives-parachain-inherent 0.7.0", - "cumulus-primitives-proof-size-hostfunction 0.2.0", + "cumulus-pallet-parachain-system-proc-macro", + "cumulus-primitives-core", + "cumulus-primitives-parachain-inherent", + "cumulus-primitives-proof-size-hostfunction", "cumulus-test-client", - "cumulus-test-relay-sproof-builder 0.7.0", + "cumulus-test-relay-sproof-builder", "cumulus-test-runtime", "environmental", - "frame-benchmarking 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", + "frame-benchmarking", + "frame-support", + "frame-system", "futures", "hex-literal", "impl-trait-for-tuples", "log", - "pallet-message-queue 31.0.0", + "pallet-message-queue", "parity-scale-codec", - "polkadot-parachain-primitives 6.0.0", - "polkadot-runtime-parachains 7.0.0", + "polkadot-parachain-primitives", + "polkadot-runtime-common", + "polkadot-runtime-parachains", "rand", "sc-client-api", "scale-info", - "sp-consensus-slots 0.32.0", + "sp-consensus-slots", "sp-core 28.0.0", "sp-crypto-hashing 0.1.0", "sp-externalities 0.25.0", - "sp-inherents 26.0.0", + "sp-inherents", "sp-io 30.0.0", - "sp-keyring 31.0.0", + "sp-keyring", "sp-runtime 31.0.1", "sp-state-machine 0.35.0", "sp-std 14.0.0", "sp-tracing 16.0.0", "sp-trie 29.0.0", "sp-version 29.0.0", - "staging-xcm 7.0.0", - "staging-xcm-builder 7.0.0", - "trie-db", + "staging-xcm", + "staging-xcm-builder", + "trie-db 0.29.1", "trie-standardmap", ] -[[package]] -name = "cumulus-pallet-parachain-system" -version = "0.17.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "546403ee1185f4051a74cc9c9d76e82c63cac3fb68e1bf29f61efb5604c96488" -dependencies = [ - "bytes", - "cumulus-pallet-parachain-system-proc-macro 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", - "cumulus-primitives-core 0.16.0", - "cumulus-primitives-parachain-inherent 0.16.0", - "cumulus-primitives-proof-size-hostfunction 0.10.0", - "environmental", - "frame-benchmarking 38.0.0", - "frame-support 38.0.0", - "frame-system 38.0.0", - "impl-trait-for-tuples", - "log", - "pallet-message-queue 41.0.1", - "parity-scale-codec", - "polkadot-parachain-primitives 14.0.0", - "polkadot-runtime-common 17.0.0", - "polkadot-runtime-parachains 17.0.1", - "scale-info", - "sp-core 34.0.0", - "sp-externalities 0.29.0", - "sp-inherents 34.0.0", - "sp-io 38.0.0", - "sp-runtime 39.0.2", - "sp-state-machine 0.43.0", - "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sp-trie 37.0.0", - "sp-version 37.0.0", - "staging-xcm 14.2.0", - "staging-xcm-builder 17.0.1", - "trie-db", -] - -[[package]] -name = "cumulus-pallet-parachain-system-proc-macro" -version = "0.6.0" -dependencies = [ - "proc-macro-crate 3.1.0", - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.87", -] - [[package]] name = "cumulus-pallet-parachain-system-proc-macro" version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "befbaf3a1ce23ac8476481484fef5f4d500cbd15b4dad6380ce1d28134b0c1f7" dependencies = [ "proc-macro-crate 3.1.0", "proc-macro2 1.0.86", @@ -5022,86 +4523,40 @@ dependencies = [ name = "cumulus-pallet-session-benchmarking" version = "9.0.0" dependencies = [ - "frame-benchmarking 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", - "pallet-session 28.0.0", + "frame-benchmarking", + "frame-support", + "frame-system", + "pallet-session", "parity-scale-codec", "sp-runtime 31.0.1", ] -[[package]] -name = "cumulus-pallet-session-benchmarking" -version = "19.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18168570689417abfb514ac8812fca7e6429764d01942750e395d7d8ce0716ef" -dependencies = [ - "frame-benchmarking 38.0.0", - "frame-support 38.0.0", - "frame-system 38.0.0", - "pallet-session 38.0.0", - "parity-scale-codec", - "sp-runtime 39.0.2", -] - [[package]] name = "cumulus-pallet-solo-to-para" version = "0.7.0" dependencies = [ - "cumulus-pallet-parachain-system 0.7.0", - "frame-support 28.0.0", - "frame-system 28.0.0", - "pallet-sudo 28.0.0", + "cumulus-pallet-parachain-system", + "frame-support", + "frame-system", + "pallet-sudo", "parity-scale-codec", - "polkadot-primitives 7.0.0", + "polkadot-primitives", "scale-info", "sp-runtime 31.0.1", ] -[[package]] -name = "cumulus-pallet-solo-to-para" -version = "0.17.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f42c74548c8cab75da6f2479a953f044b582cfce98479862344a24df7bbd215" -dependencies = [ - "cumulus-pallet-parachain-system 0.17.1", - "frame-support 38.0.0", - "frame-system 38.0.0", - "pallet-sudo 38.0.0", - "parity-scale-codec", - "polkadot-primitives 16.0.0", - "scale-info", - "sp-runtime 39.0.2", -] - [[package]] name = "cumulus-pallet-xcm" version = "0.7.0" dependencies = [ - "cumulus-primitives-core 0.7.0", - "frame-support 28.0.0", - "frame-system 28.0.0", + "cumulus-primitives-core", + "frame-support", + "frame-system", "parity-scale-codec", "scale-info", "sp-io 30.0.0", "sp-runtime 31.0.1", - "staging-xcm 7.0.0", -] - -[[package]] -name = "cumulus-pallet-xcm" -version = "0.17.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e49231f6cd8274438b078305dc8ce44c54c0d3f4a28e902589bcbaa53d954608" -dependencies = [ - "cumulus-primitives-core 0.16.0", - "frame-support 38.0.0", - "frame-system 38.0.0", - "parity-scale-codec", - "scale-info", - "sp-io 38.0.0", - "sp-runtime 39.0.2", - "staging-xcm 14.2.0", + "staging-xcm", ] [[package]] @@ -5109,81 +4564,39 @@ name = "cumulus-pallet-xcmp-queue" version = "0.7.0" dependencies = [ "bounded-collections", - "bp-xcm-bridge-hub-router 0.6.0", - "cumulus-pallet-parachain-system 0.7.0", - "cumulus-primitives-core 0.7.0", - "frame-benchmarking 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", + "bp-xcm-bridge-hub-router", + "cumulus-pallet-parachain-system", + "cumulus-primitives-core", + "frame-benchmarking", + "frame-support", + "frame-system", "log", - "pallet-balances 28.0.0", - "pallet-message-queue 31.0.0", + "pallet-balances", + "pallet-message-queue", "parity-scale-codec", - "polkadot-runtime-common 7.0.0", - "polkadot-runtime-parachains 7.0.0", + "polkadot-runtime-common", + "polkadot-runtime-parachains", "scale-info", "sp-core 28.0.0", "sp-io 30.0.0", "sp-runtime 31.0.1", - "staging-xcm 7.0.0", - "staging-xcm-builder 7.0.0", - "staging-xcm-executor 7.0.0", -] - -[[package]] -name = "cumulus-pallet-xcmp-queue" -version = "0.17.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f788bdac9474795ea13ba791b55798fb664b2e3da8c3a7385b480c9af4e6539" -dependencies = [ - "bounded-collections", - "bp-xcm-bridge-hub-router 0.14.1", - "cumulus-primitives-core 0.16.0", - "frame-benchmarking 38.0.0", - "frame-support 38.0.0", - "frame-system 38.0.0", - "log", - "pallet-message-queue 41.0.1", - "parity-scale-codec", - "polkadot-runtime-common 17.0.0", - "polkadot-runtime-parachains 17.0.1", - "scale-info", - "sp-core 34.0.0", - "sp-io 38.0.0", - "sp-runtime 39.0.2", - "staging-xcm 14.2.0", - "staging-xcm-builder 17.0.1", - "staging-xcm-executor 17.0.0", + "staging-xcm", + "staging-xcm-builder", + "staging-xcm-executor", ] [[package]] name = "cumulus-ping" version = "0.7.0" dependencies = [ - "cumulus-pallet-xcm 0.7.0", - "cumulus-primitives-core 0.7.0", - "frame-support 28.0.0", - "frame-system 28.0.0", + "cumulus-pallet-xcm", + "cumulus-primitives-core", + "frame-support", + "frame-system", "parity-scale-codec", "scale-info", "sp-runtime 31.0.1", - "staging-xcm 7.0.0", -] - -[[package]] -name = "cumulus-ping" -version = "0.17.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f47128f797359951723e2d106a80e592d007bb7446c299958cdbafb1489ddbf0" -dependencies = [ - "cumulus-pallet-xcm 0.17.0", - "cumulus-primitives-core 0.16.0", - "frame-support 38.0.0", - "frame-system 38.0.0", - "parity-scale-codec", - "scale-info", - "sp-runtime 39.0.2", - "staging-xcm 14.2.0", + "staging-xcm", ] [[package]] @@ -5194,8 +4607,8 @@ dependencies = [ "clap 4.5.13", "parity-scale-codec", "polkadot-node-primitives", - "polkadot-parachain-primitives 6.0.0", - "polkadot-primitives 7.0.0", + "polkadot-parachain-primitives", + "polkadot-primitives", "sc-executor 0.32.0", "sp-core 28.0.0", "sp-io 30.0.0", @@ -5209,21 +4622,7 @@ name = "cumulus-primitives-aura" version = "0.7.0" dependencies = [ "sp-api 26.0.0", - "sp-consensus-aura 0.32.0", -] - -[[package]] -name = "cumulus-primitives-aura" -version = "0.15.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11e7825bcf3cc6c962a5b9b9f47e02dc381109e521d0bc00cad785c65da18471" -dependencies = [ - "parity-scale-codec", - "polkadot-core-primitives 15.0.0", - "polkadot-primitives 15.0.0", - "sp-api 34.0.0", - "sp-consensus-aura 0.40.0", - "sp-runtime 39.0.2", + "sp-consensus-aura", ] [[package]] @@ -5231,31 +4630,14 @@ name = "cumulus-primitives-core" version = "0.7.0" dependencies = [ "parity-scale-codec", - "polkadot-core-primitives 7.0.0", - "polkadot-parachain-primitives 6.0.0", - "polkadot-primitives 7.0.0", + "polkadot-core-primitives", + "polkadot-parachain-primitives", + "polkadot-primitives", "scale-info", "sp-api 26.0.0", "sp-runtime 31.0.1", "sp-trie 29.0.0", - "staging-xcm 7.0.0", -] - -[[package]] -name = "cumulus-primitives-core" -version = "0.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c6b5221a4a3097f2ebef66c84c1e6d7a0b8ec7e63f2bd5ae04c1e6d3fc7514e" -dependencies = [ - "parity-scale-codec", - "polkadot-core-primitives 15.0.0", - "polkadot-parachain-primitives 14.0.0", - "polkadot-primitives 16.0.0", - "scale-info", - "sp-api 34.0.0", - "sp-runtime 39.0.2", - "sp-trie 37.0.0", - "staging-xcm 14.2.0", + "staging-xcm", ] [[package]] @@ -5263,29 +4645,14 @@ name = "cumulus-primitives-parachain-inherent" version = "0.7.0" dependencies = [ "async-trait", - "cumulus-primitives-core 0.7.0", + "cumulus-primitives-core", "parity-scale-codec", "scale-info", "sp-core 28.0.0", - "sp-inherents 26.0.0", + "sp-inherents", "sp-trie 29.0.0", ] -[[package]] -name = "cumulus-primitives-parachain-inherent" -version = "0.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "842a694901e04a62d88995418dec35c22f7dba2b34d32d2b8de37d6b92f973ff" -dependencies = [ - "async-trait", - "cumulus-primitives-core 0.16.0", - "parity-scale-codec", - "scale-info", - "sp-core 34.0.0", - "sp-inherents 34.0.0", - "sp-trie 37.0.0", -] - [[package]] name = "cumulus-primitives-proof-size-hostfunction" version = "0.2.0" @@ -5298,28 +4665,17 @@ dependencies = [ "sp-trie 29.0.0", ] -[[package]] -name = "cumulus-primitives-proof-size-hostfunction" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "421f03af054aac7c89e87a49e47964886e53a8d7395990eab27b6f201d42524f" -dependencies = [ - "sp-externalities 0.29.0", - "sp-runtime-interface 28.0.0", - "sp-trie 37.0.0", -] - [[package]] name = "cumulus-primitives-storage-weight-reclaim" version = "1.0.0" dependencies = [ - "cumulus-primitives-core 0.7.0", - "cumulus-primitives-proof-size-hostfunction 0.2.0", + "cumulus-primitives-core", + "cumulus-primitives-proof-size-hostfunction", "cumulus-test-runtime", "docify", - "frame-benchmarking 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", + "frame-benchmarking", + "frame-support", + "frame-system", "log", "parity-scale-codec", "scale-info", @@ -5328,75 +4684,29 @@ dependencies = [ "sp-trie 29.0.0", ] -[[package]] -name = "cumulus-primitives-storage-weight-reclaim" -version = "8.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fc49dfec0ba3438afad73787736cc0dba88d15b5855881f12a4d8b812a72927" -dependencies = [ - "cumulus-primitives-core 0.16.0", - "cumulus-primitives-proof-size-hostfunction 0.10.0", - "docify", - "frame-support 38.0.0", - "frame-system 38.0.0", - "log", - "parity-scale-codec", - "scale-info", - "sp-runtime 39.0.2", -] - [[package]] name = "cumulus-primitives-timestamp" version = "0.7.0" dependencies = [ - "cumulus-primitives-core 0.7.0", - "sp-inherents 26.0.0", - "sp-timestamp 26.0.0", -] - -[[package]] -name = "cumulus-primitives-timestamp" -version = "0.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33cffb8f010f39ac36b31d38994b8f9d9256d9b5e495d96b4ec59d3e30852d53" -dependencies = [ - "cumulus-primitives-core 0.16.0", - "sp-inherents 34.0.0", - "sp-timestamp 34.0.0", + "cumulus-primitives-core", + "sp-inherents", + "sp-timestamp", ] [[package]] name = "cumulus-primitives-utility" version = "0.7.0" dependencies = [ - "cumulus-primitives-core 0.7.0", - "frame-support 28.0.0", + "cumulus-primitives-core", + "frame-support", "log", - "pallet-asset-conversion 10.0.0", + "pallet-asset-conversion", "parity-scale-codec", - "polkadot-runtime-common 7.0.0", + "polkadot-runtime-common", "sp-runtime 31.0.1", - "staging-xcm 7.0.0", - "staging-xcm-builder 7.0.0", - "staging-xcm-executor 7.0.0", -] - -[[package]] -name = "cumulus-primitives-utility" -version = "0.17.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bdcf4d46dd93f1e6d5dd6d379133566a44042ba6476d04bdcbdb4981c622ae4" -dependencies = [ - "cumulus-primitives-core 0.16.0", - "frame-support 38.0.0", - "log", - "pallet-asset-conversion 20.0.0", - "parity-scale-codec", - "polkadot-runtime-common 17.0.0", - "sp-runtime 39.0.2", - "staging-xcm 14.2.0", - "staging-xcm-builder 17.0.1", - "staging-xcm-executor 17.0.0", + "staging-xcm", + "staging-xcm-builder", + "staging-xcm-executor", ] [[package]] @@ -5404,13 +4714,13 @@ name = "cumulus-relay-chain-inprocess-interface" version = "0.7.0" dependencies = [ "async-trait", - "cumulus-primitives-core 0.7.0", + "cumulus-primitives-core", "cumulus-relay-chain-interface", "cumulus-test-service", "futures", "futures-timer", "polkadot-cli", - "polkadot-primitives 7.0.0", + "polkadot-primitives", "polkadot-service", "polkadot-test-client", "prioritized-metered-channel", @@ -5422,7 +4732,7 @@ dependencies = [ "sp-api 26.0.0", "sp-consensus", "sp-core 28.0.0", - "sp-keyring 31.0.0", + "sp-keyring", "sp-runtime 31.0.1", "sp-state-machine 0.35.0", ] @@ -5432,9 +4742,9 @@ name = "cumulus-relay-chain-interface" version = "0.7.0" dependencies = [ "async-trait", - "cumulus-primitives-core 0.7.0", + "cumulus-primitives-core", "futures", - "jsonrpsee-core", + "jsonrpsee-core 0.24.3", "parity-scale-codec", "polkadot-overseer", "sc-client-api", @@ -5451,16 +4761,16 @@ version = "0.7.0" dependencies = [ "array-bytes", "async-trait", - "cumulus-primitives-core 0.7.0", + "cumulus-primitives-core", "cumulus-relay-chain-interface", "cumulus-relay-chain-rpc-interface", "futures", - "polkadot-core-primitives 7.0.0", + "polkadot-core-primitives", "polkadot-network-bridge", "polkadot-node-network-protocol", "polkadot-node-subsystem-util", "polkadot-overseer", - "polkadot-primitives 7.0.0", + "polkadot-primitives", "polkadot-service", "sc-authority-discovery", "sc-client-api", @@ -5472,7 +4782,7 @@ dependencies = [ "sp-api 26.0.0", "sp-blockchain", "sp-consensus", - "sp-consensus-babe 0.32.0", + "sp-consensus-babe", "sp-runtime 31.0.1", "substrate-prometheus-endpoint", "tokio", @@ -5484,12 +4794,12 @@ name = "cumulus-relay-chain-rpc-interface" version = "0.7.0" dependencies = [ "async-trait", - "cumulus-primitives-core 0.7.0", + "cumulus-primitives-core", "cumulus-relay-chain-interface", "either", "futures", "futures-timer", - "jsonrpsee", + "jsonrpsee 0.24.3", "parity-scale-codec", "pin-project", "polkadot-overseer", @@ -5505,8 +4815,8 @@ dependencies = [ "smoldot 0.11.0", "smoldot-light 0.9.0", "sp-api 26.0.0", - "sp-authority-discovery 26.0.0", - "sp-consensus-babe 0.32.0", + "sp-authority-discovery", + "sp-consensus-babe", "sp-core 28.0.0", "sp-runtime 31.0.1", "sp-state-machine 0.35.0", @@ -5524,19 +4834,19 @@ dependencies = [ name = "cumulus-test-client" version = "0.1.0" dependencies = [ - "cumulus-primitives-core 0.7.0", - "cumulus-primitives-parachain-inherent 0.7.0", - "cumulus-primitives-proof-size-hostfunction 0.2.0", - "cumulus-primitives-storage-weight-reclaim 1.0.0", - "cumulus-test-relay-sproof-builder 0.7.0", + "cumulus-primitives-core", + "cumulus-primitives-parachain-inherent", + "cumulus-primitives-proof-size-hostfunction", + "cumulus-primitives-storage-weight-reclaim", + "cumulus-test-relay-sproof-builder", "cumulus-test-runtime", "cumulus-test-service", - "frame-system 28.0.0", - "pallet-balances 28.0.0", - "pallet-transaction-payment 28.0.0", + "frame-system", + "pallet-balances", + "pallet-transaction-payment", "parity-scale-codec", - "polkadot-parachain-primitives 6.0.0", - "polkadot-primitives 7.0.0", + "polkadot-parachain-primitives", + "polkadot-primitives", "sc-block-builder", "sc-consensus", "sc-consensus-aura", @@ -5546,14 +4856,14 @@ dependencies = [ "sp-api 26.0.0", "sp-application-crypto 30.0.0", "sp-blockchain", - "sp-consensus-aura 0.32.0", + "sp-consensus-aura", "sp-core 28.0.0", - "sp-inherents 26.0.0", + "sp-inherents", "sp-io 30.0.0", - "sp-keyring 31.0.0", + "sp-keyring", "sp-keystore 0.34.0", "sp-runtime 31.0.1", - "sp-timestamp 26.0.0", + "sp-timestamp", "substrate-test-client", ] @@ -5561,69 +4871,55 @@ dependencies = [ name = "cumulus-test-relay-sproof-builder" version = "0.7.0" dependencies = [ - "cumulus-primitives-core 0.7.0", + "cumulus-primitives-core", "parity-scale-codec", - "polkadot-primitives 7.0.0", + "polkadot-primitives", "sp-runtime 31.0.1", "sp-state-machine 0.35.0", "sp-trie 29.0.0", ] -[[package]] -name = "cumulus-test-relay-sproof-builder" -version = "0.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e570e41c3f05a8143ebff967bbb0c7dcaaa6f0bebd8639b9418b8005b13eda03" -dependencies = [ - "cumulus-primitives-core 0.16.0", - "parity-scale-codec", - "polkadot-primitives 16.0.0", - "sp-runtime 39.0.2", - "sp-state-machine 0.43.0", - "sp-trie 37.0.0", -] - [[package]] name = "cumulus-test-runtime" version = "0.1.0" dependencies = [ - "cumulus-pallet-aura-ext 0.7.0", - "cumulus-pallet-parachain-system 0.7.0", - "cumulus-primitives-aura 0.7.0", - "cumulus-primitives-core 0.7.0", - "cumulus-primitives-storage-weight-reclaim 1.0.0", - "frame-executive 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", - "frame-system-rpc-runtime-api 26.0.0", - "pallet-aura 27.0.0", - "pallet-authorship 28.0.0", - "pallet-balances 28.0.0", - "pallet-collator-selection 9.0.0", - "pallet-glutton 14.0.0", - "pallet-message-queue 31.0.0", - "pallet-session 28.0.0", - "pallet-sudo 28.0.0", - "pallet-timestamp 27.0.0", - "pallet-transaction-payment 28.0.0", + "cumulus-pallet-aura-ext", + "cumulus-pallet-parachain-system", + "cumulus-primitives-aura", + "cumulus-primitives-core", + "cumulus-primitives-storage-weight-reclaim", + "frame-executive", + "frame-support", + "frame-system", + "frame-system-rpc-runtime-api", + "pallet-aura", + "pallet-authorship", + "pallet-balances", + "pallet-collator-selection", + "pallet-glutton", + "pallet-message-queue", + "pallet-session", + "pallet-sudo", + "pallet-timestamp", + "pallet-transaction-payment", "parity-scale-codec", "scale-info", "serde_json", "sp-api 26.0.0", - "sp-block-builder 26.0.0", - "sp-consensus-aura 0.32.0", + "sp-block-builder", + "sp-consensus-aura", "sp-core 28.0.0", - "sp-genesis-builder 0.8.0", - "sp-inherents 26.0.0", + "sp-genesis-builder", + "sp-inherents", "sp-io 30.0.0", - "sp-keyring 31.0.0", - "sp-offchain 26.0.0", + "sp-keyring", + "sp-offchain", "sp-runtime 31.0.1", - "sp-session 27.0.0", - "sp-transaction-pool 26.0.0", + "sp-session", + "sp-transaction-pool", "sp-version 29.0.0", - "staging-parachain-info 0.7.0", - "substrate-wasm-builder 17.0.0", + "staging-parachain-info", + "substrate-wasm-builder", ] [[package]] @@ -5642,27 +4938,27 @@ dependencies = [ "cumulus-client-parachain-inherent", "cumulus-client-pov-recovery", "cumulus-client-service", - "cumulus-pallet-parachain-system 0.7.0", - "cumulus-primitives-core 0.7.0", - "cumulus-primitives-storage-weight-reclaim 1.0.0", + "cumulus-pallet-parachain-system", + "cumulus-primitives-core", + "cumulus-primitives-storage-weight-reclaim", "cumulus-relay-chain-inprocess-interface", "cumulus-relay-chain-interface", "cumulus-relay-chain-minimal-node", "cumulus-test-client", - "cumulus-test-relay-sproof-builder 0.7.0", + "cumulus-test-relay-sproof-builder", "cumulus-test-runtime", - "frame-system 28.0.0", - "frame-system-rpc-runtime-api 26.0.0", + "frame-system", + "frame-system-rpc-runtime-api", "futures", - "jsonrpsee", - "pallet-timestamp 27.0.0", - "pallet-transaction-payment 28.0.0", - "parachains-common 7.0.0", + "jsonrpsee 0.24.3", + "pallet-timestamp", + "pallet-transaction-payment", + "parachains-common", "parity-scale-codec", "polkadot-cli", "polkadot-node-subsystem", "polkadot-overseer", - "polkadot-primitives 7.0.0", + "polkadot-primitives", "polkadot-service", "polkadot-test-service", "portpicker", @@ -5688,17 +4984,17 @@ dependencies = [ "serde_json", "sp-api 26.0.0", "sp-arithmetic 23.0.0", - "sp-authority-discovery 26.0.0", + "sp-authority-discovery", "sp-blockchain", "sp-consensus", - "sp-consensus-aura 0.32.0", + "sp-consensus-aura", "sp-core 28.0.0", - "sp-genesis-builder 0.8.0", + "sp-genesis-builder", "sp-io 30.0.0", - "sp-keyring 31.0.0", + "sp-keyring", "sp-runtime 31.0.1", "sp-state-machine 0.35.0", - "sp-timestamp 26.0.0", + "sp-timestamp", "sp-tracing 16.0.0", "substrate-test-client", "substrate-test-utils", @@ -5836,14 +5132,38 @@ dependencies = [ "syn 2.0.87", ] +[[package]] +name = "darling" +version = "0.14.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b750cb3417fd1b327431a470f388520309479ab0bf5e323505daf0290cd3850" +dependencies = [ + "darling_core 0.14.4", + "darling_macro 0.14.4", +] + [[package]] name = "darling" version = "0.20.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6f63b86c8a8826a49b8c21f08a2d07338eec8d900540f8630dc76284be802989" dependencies = [ - "darling_core", - "darling_macro", + "darling_core 0.20.10", + "darling_macro 0.20.10", +] + +[[package]] +name = "darling_core" +version = "0.14.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "109c1ca6e6b7f82cc233a97004ea8ed7ca123a9af07a8230878fcfda9b158bf0" +dependencies = [ + "fnv", + "ident_case", + "proc-macro2 1.0.86", + "quote 1.0.37", + "strsim 0.10.0", + "syn 1.0.109", ] [[package]] @@ -5860,13 +5180,24 @@ dependencies = [ "syn 2.0.87", ] +[[package]] +name = "darling_macro" +version = "0.14.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4aab4dbc9f7611d8b55048a3a16d2d010c2c8334e46304b40ac1cc14bf3b48e" +dependencies = [ + "darling_core 0.14.4", + "quote 1.0.37", + "syn 1.0.109", +] + [[package]] name = "darling_macro" version = "0.20.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ - "darling_core", + "darling_core 0.20.10", "quote 1.0.37", "syn 2.0.87", ] @@ -5886,9 +5217,9 @@ dependencies = [ [[package]] name = "data-encoding" -version = "2.6.0" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8566979429cf69b49a5c740c60791108e86440e8be149bbea4fe54d2c32d6e2" +checksum = "c2e66c9d817f1720209181c316d28635c050fa304f9c79e47a520882661b7308" [[package]] name = "data-encoding-macro" @@ -5929,13 +5260,27 @@ dependencies = [ "zeroize", ] +[[package]] +name = "der-parser" +version = "8.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dbd676fbbab537128ef0278adb5576cf363cff6aa22a7b24effe97347cfab61e" +dependencies = [ + "asn1-rs 0.5.2", + "displaydoc", + "nom", + "num-bigint", + "num-traits", + "rusticata-macros", +] + [[package]] name = "der-parser" version = "9.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5cd0a5c643689626bec213c4d8bd4d96acc8ffdb4ad4bb6bc16abf27d5f4b553" dependencies = [ - "asn1-rs", + "asn1-rs 0.6.1", "displaydoc", "nom", "num-bigint", @@ -6009,27 +5354,6 @@ dependencies = [ "syn 1.0.109", ] -[[package]] -name = "derive_more" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a9b99b9cbbe49445b21764dc0625032a89b145a2642e67603e1c936f5458d05" -dependencies = [ - "derive_more-impl", -] - -[[package]] -name = "derive_more-impl" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb7330aeadfbe296029522e6c40f315320aba36fc43a5b3632f3795348f3bd22" -dependencies = [ - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.87", - "unicode-xid 0.2.4", -] - [[package]] name = "diff" version = "0.1.13" @@ -6091,15 +5415,6 @@ dependencies = [ "dirs-sys-next", ] -[[package]] -name = "dirs" -version = "5.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44c45a9d03d6676652bcb5e724c7e988de1acad23a711b5217ab9cbecbec2225" -dependencies = [ - "dirs-sys", -] - [[package]] name = "dirs-sys" version = "0.4.1" @@ -6357,34 +5672,34 @@ dependencies = [ name = "emulated-integration-tests-common" version = "3.0.0" dependencies = [ - "asset-test-utils 7.0.0", - "bp-messages 0.7.0", - "bp-xcm-bridge-hub 0.2.0", - "bridge-runtime-common 0.7.0", - "cumulus-pallet-parachain-system 0.7.0", - "cumulus-pallet-xcmp-queue 0.7.0", - "cumulus-primitives-core 0.7.0", - "frame-support 28.0.0", - "pallet-assets 29.1.0", - "pallet-balances 28.0.0", - "pallet-bridge-messages 0.7.0", - "pallet-message-queue 31.0.0", - "pallet-xcm 7.0.0", - "pallet-xcm-bridge-hub 0.2.0", - "parachains-common 7.0.0", + "asset-test-utils", + "bp-messages", + "bp-xcm-bridge-hub", + "bridge-runtime-common", + "cumulus-pallet-parachain-system", + "cumulus-pallet-xcmp-queue", + "cumulus-primitives-core", + "frame-support", + "pallet-assets", + "pallet-balances", + "pallet-bridge-messages", + "pallet-message-queue", + "pallet-xcm", + "pallet-xcm-bridge-hub", + "parachains-common", "parity-scale-codec", "paste", - "polkadot-parachain-primitives 6.0.0", - "polkadot-primitives 7.0.0", - "polkadot-runtime-parachains 7.0.0", + "polkadot-parachain-primitives", + "polkadot-primitives", + "polkadot-runtime-parachains", "sc-consensus-grandpa", - "sp-authority-discovery 26.0.0", - "sp-consensus-babe 0.32.0", - "sp-consensus-beefy 13.0.0", + "sp-authority-discovery", + "sp-consensus-babe", + "sp-consensus-beefy", "sp-core 28.0.0", - "sp-keyring 31.0.0", + "sp-keyring", "sp-runtime 31.0.1", - "staging-xcm 7.0.0", + "staging-xcm", "xcm-emulator", ] @@ -6403,6 +5718,18 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "enum-as-inner" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c9720bba047d567ffc8a3cba48bf19126600e249ab7f128e9233e6376976a116" +dependencies = [ + "heck 0.4.1", + "proc-macro2 1.0.86", + "quote 1.0.37", + "syn 1.0.109", +] + [[package]] name = "enum-as-inner" version = "0.6.0" @@ -6510,9 +5837,9 @@ version = "0.1.0" dependencies = [ "async-std", "async-trait", - "bp-header-chain 0.7.0", + "bp-header-chain", "finality-relay", - "frame-support 28.0.0", + "frame-support", "futures", "log", "num-traits", @@ -6535,7 +5862,7 @@ dependencies = [ "honggfuzz", "polkadot-erasure-coding", "polkadot-node-primitives", - "polkadot-primitives 7.0.0", + "polkadot-primitives", ] [[package]] @@ -6559,55 +5886,13 @@ dependencies = [ "libc", ] -[[package]] -name = "ethabi" -version = "18.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7413c5f74cc903ea37386a8965a936cbeb334bd270862fdece542c1b2dcbc898" -dependencies = [ - "ethereum-types 0.14.1", - "hex", - "once_cell", - "regex", - "serde", - "serde_json", - "sha3 0.10.8", - "thiserror", - "uint 0.9.5", -] - -[[package]] -name = "ethabi-decode" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09d398648d65820a727d6a81e58b962f874473396a047e4c30bafe3240953417" -dependencies = [ - "ethereum-types 0.14.1", - "tiny-keccak", -] - [[package]] name = "ethabi-decode" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52029c4087f9f01108f851d0d02df9c21feb5660a19713466724b7f95bd2d773" -dependencies = [ - "ethereum-types 0.15.1", - "tiny-keccak", -] - -[[package]] -name = "ethbloom" -version = "0.13.0" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c22d4b5885b6aa2fe5e8b9329fb8d232bf739e434e6b87347c63bdd00c120f60" +checksum = "f9af52ec57c5147716872863c2567c886e7d62f539465b94352dbc0108fe5293" dependencies = [ - "crunchy", - "fixed-hash", - "impl-codec 0.6.0", - "impl-rlp 0.3.0", - "impl-serde 0.4.0", - "scale-info", + "ethereum-types", "tiny-keccak", ] @@ -6620,38 +5905,22 @@ dependencies = [ "crunchy", "fixed-hash", "impl-codec 0.7.0", - "impl-rlp 0.4.0", + "impl-rlp", "impl-serde 0.5.0", "scale-info", "tiny-keccak", ] -[[package]] -name = "ethereum-types" -version = "0.14.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02d215cbf040552efcbe99a38372fe80ab9d00268e20012b79fcd0f073edd8ee" -dependencies = [ - "ethbloom 0.13.0", - "fixed-hash", - "impl-codec 0.6.0", - "impl-rlp 0.3.0", - "impl-serde 0.4.0", - "primitive-types 0.12.2", - "scale-info", - "uint 0.9.5", -] - [[package]] name = "ethereum-types" version = "0.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1ab15ed80916029f878e0267c3a9f92b67df55e79af370bf66199059ae2b4ee3" dependencies = [ - "ethbloom 0.14.1", + "ethbloom", "fixed-hash", "impl-codec 0.7.0", - "impl-rlp 0.4.0", + "impl-rlp", "impl-serde 0.5.0", "primitive-types 0.13.1", "scale-info", @@ -6666,9 +5935,19 @@ checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" [[package]] name = "event-listener" -version = "5.3.1" +version = "4.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67b215c49b2b248c855fb73579eb1f4f26c38ffdc12973e20e07b91d78d5646e" +dependencies = [ + "concurrent-queue", + "pin-project-lite", +] + +[[package]] +name = "event-listener" +version = "5.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6032be9bd27023a771701cc49f9f053c751055f71efb2e0ae5c15809093675ba" +checksum = "2b5fb89194fa3cad959b833185b3063ba881dbfc7030680b314250779fb4cc91" dependencies = [ "concurrent-queue", "parking", @@ -6681,7 +5960,7 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0f214dc438f977e6d4e3500aaa277f5ad94ca83fbbd9b1a15713ce2344ccc5a1" dependencies = [ - "event-listener 5.3.1", + "event-listener 5.2.0", "pin-project-lite", ] @@ -6785,7 +6064,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eb42427514b063d97ce21d5199f36c0c307d981434a6be32582bc79fe5bd2303" dependencies = [ "expander", - "indexmap 2.7.0", + "indexmap 2.2.3", "proc-macro-crate 3.1.0", "proc-macro2 1.0.86", "quote 1.0.37", @@ -6903,7 +6182,7 @@ dependencies = [ "async-std", "async-trait", "backoff", - "bp-header-chain 0.7.0", + "bp-header-chain", "futures", "log", "num-traits", @@ -7044,55 +6323,27 @@ name = "frame-benchmarking" version = "28.0.0" dependencies = [ "array-bytes", - "frame-support 28.0.0", - "frame-support-procedural 23.0.0", - "frame-system 28.0.0", + "frame-support", + "frame-support-procedural", + "frame-system", "linregress", "log", "parity-scale-codec", "paste", "rusty-fork", - "sc-client-db", "scale-info", "serde", "sp-api 26.0.0", "sp-application-crypto 30.0.0", "sp-core 28.0.0", - "sp-externalities 0.25.0", "sp-io 30.0.0", "sp-keystore 0.34.0", "sp-runtime 31.0.1", "sp-runtime-interface 24.0.0", - "sp-state-machine 0.35.0", "sp-storage 19.0.0", "static_assertions", ] -[[package]] -name = "frame-benchmarking" -version = "38.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a01bdd47c2d541b38bd892da647d1e972c9d85b4ecd7094ad64f7600175da54d" -dependencies = [ - "frame-support 38.0.0", - "frame-support-procedural 30.0.4", - "frame-system 38.0.0", - "linregress", - "log", - "parity-scale-codec", - "paste", - "scale-info", - "serde", - "sp-api 34.0.0", - "sp-application-crypto 38.0.0", - "sp-core 34.0.0", - "sp-io 38.0.0", - "sp-runtime 39.0.2", - "sp-runtime-interface 28.0.0", - "sp-storage 21.0.0", - "static_assertions", -] - [[package]] name = "frame-benchmarking-cli" version = "32.0.0" @@ -7103,11 +6354,11 @@ dependencies = [ "clap 4.5.13", "comfy-table", "cumulus-client-parachain-inherent", - "cumulus-primitives-proof-size-hostfunction 0.2.0", + "cumulus-primitives-proof-size-hostfunction", "cumulus-test-runtime", - "frame-benchmarking 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", + "frame-benchmarking", + "frame-support", + "frame-system", "gethostname", "handlebars", "hex", @@ -7115,8 +6366,8 @@ dependencies = [ "linked-hash-map", "log", "parity-scale-codec", - "polkadot-parachain-primitives 6.0.0", - "polkadot-primitives 7.0.0", + "polkadot-parachain-primitives", + "polkadot-primitives", "rand", "rand_pcg", "sc-block-builder", @@ -7126,27 +6377,26 @@ dependencies = [ "sc-client-db", "sc-executor 0.32.0", "sc-executor-common 0.29.0", - "sc-runtime-utilities", "sc-service", "sc-sysinfo", "serde", "serde_json", "sp-api 26.0.0", - "sp-block-builder 26.0.0", + "sp-block-builder", "sp-blockchain", "sp-core 28.0.0", "sp-crypto-hashing 0.1.0", "sp-database", "sp-externalities 0.25.0", - "sp-genesis-builder 0.8.0", - "sp-inherents 26.0.0", + "sp-genesis-builder", + "sp-inherents", "sp-io 30.0.0", "sp-keystore 0.34.0", "sp-runtime 31.0.1", "sp-state-machine 0.35.0", "sp-storage 19.0.0", - "sp-timestamp 26.0.0", - "sp-transaction-pool 26.0.0", + "sp-timestamp", + "sp-transaction-pool", "sp-trie 29.0.0", "sp-version 29.0.0", "sp-wasm-interface 20.0.0", @@ -7162,9 +6412,9 @@ dependencies = [ name = "frame-benchmarking-pallet-pov" version = "18.0.0" dependencies = [ - "frame-benchmarking 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", + "frame-benchmarking", + "frame-support", + "frame-system", "parity-scale-codec", "scale-info", "sp-io 30.0.0", @@ -7172,110 +6422,52 @@ dependencies = [ ] [[package]] -name = "frame-benchmarking-pallet-pov" -version = "28.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ffde6f573a63eeb1ccb7d2667c5741a11ce93bc30f33712e5326b9d8a811c29" +name = "frame-election-provider-solution-type" +version = "13.0.0" dependencies = [ - "frame-benchmarking 38.0.0", - "frame-support 38.0.0", - "frame-system 38.0.0", + "frame-election-provider-support", + "frame-support", "parity-scale-codec", + "proc-macro-crate 3.1.0", + "proc-macro2 1.0.86", + "quote 1.0.37", "scale-info", - "sp-io 38.0.0", - "sp-runtime 39.0.2", + "sp-arithmetic 23.0.0", + "syn 2.0.87", + "trybuild", ] [[package]] -name = "frame-decode" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02d3379df61ff3dd871e2dde7d1bcdc0263e613c21c7579b149fd4f0ad9b1dc2" +name = "frame-election-provider-support" +version = "28.0.0" dependencies = [ - "frame-metadata 17.0.0", + "frame-election-provider-solution-type", + "frame-support", + "frame-system", "parity-scale-codec", - "scale-decode 0.14.0", - "scale-info", - "scale-type-resolver", - "sp-crypto-hashing 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "frame-election-provider-solution-type" -version = "13.0.0" -dependencies = [ - "frame-election-provider-support 28.0.0", - "frame-support 28.0.0", - "parity-scale-codec", - "proc-macro-crate 3.1.0", - "proc-macro2 1.0.86", - "quote 1.0.37", - "scale-info", - "sp-arithmetic 23.0.0", - "syn 2.0.87", - "trybuild", -] - -[[package]] -name = "frame-election-provider-solution-type" -version = "14.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8156f209055d352994ecd49e19658c6b469d7c6de923bd79868957d0dcfb6f71" -dependencies = [ - "proc-macro-crate 3.1.0", - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.87", -] - -[[package]] -name = "frame-election-provider-support" -version = "28.0.0" -dependencies = [ - "frame-election-provider-solution-type 13.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", - "parity-scale-codec", - "rand", + "rand", "scale-info", "sp-arithmetic 23.0.0", "sp-core 28.0.0", "sp-io 30.0.0", - "sp-npos-elections 26.0.0", + "sp-npos-elections", "sp-runtime 31.0.1", ] -[[package]] -name = "frame-election-provider-support" -version = "38.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c36f5116192c63d39f1b4556fa30ac7db5a6a52575fa241b045f7dfa82ecc2be" -dependencies = [ - "frame-election-provider-solution-type 14.0.1", - "frame-support 38.0.0", - "frame-system 38.0.0", - "parity-scale-codec", - "scale-info", - "sp-arithmetic 26.0.0", - "sp-core 34.0.0", - "sp-npos-elections 34.0.0", - "sp-runtime 39.0.2", -] - [[package]] name = "frame-election-solution-type-fuzzer" version = "2.0.0-alpha.5" dependencies = [ "clap 4.5.13", - "frame-election-provider-solution-type 13.0.0", - "frame-election-provider-support 28.0.0", - "frame-support 28.0.0", + "frame-election-provider-solution-type", + "frame-election-provider-support", + "frame-support", "honggfuzz", "parity-scale-codec", "rand", "scale-info", "sp-arithmetic 23.0.0", - "sp-npos-elections 26.0.0", + "sp-npos-elections", "sp-runtime 31.0.1", ] @@ -7285,70 +6477,38 @@ version = "28.0.0" dependencies = [ "aquamarine", "array-bytes", - "frame-support 28.0.0", - "frame-system 28.0.0", - "frame-try-runtime 0.34.0", + "frame-support", + "frame-system", + "frame-try-runtime", "log", - "pallet-balances 28.0.0", - "pallet-transaction-payment 28.0.0", + "pallet-balances", + "pallet-transaction-payment", "parity-scale-codec", "scale-info", "sp-core 28.0.0", - "sp-inherents 26.0.0", + "sp-inherents", "sp-io 30.0.0", "sp-runtime 31.0.1", "sp-tracing 16.0.0", "sp-version 29.0.0", ] -[[package]] -name = "frame-executive" -version = "38.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c365bf3879de25bbee28e9584096955a02fbe8d7e7624e10675800317f1cee5b" -dependencies = [ - "aquamarine", - "frame-support 38.0.0", - "frame-system 38.0.0", - "frame-try-runtime 0.44.0", - "log", - "parity-scale-codec", - "scale-info", - "sp-core 34.0.0", - "sp-io 38.0.0", - "sp-runtime 39.0.2", - "sp-tracing 17.0.1", -] - -[[package]] -name = "frame-metadata" -version = "16.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87cf1549fba25a6fcac22785b61698317d958e96cac72a59102ea45b9ae64692" -dependencies = [ - "cfg-if", - "parity-scale-codec", - "scale-info", - "serde", -] - [[package]] name = "frame-metadata" -version = "17.0.0" +version = "15.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "701bac17e9b55e0f95067c428ebcb46496587f08e8cf4ccc0fe5903bea10dbb8" +checksum = "878babb0b136e731cc77ec2fd883ff02745ff21e6fb662729953d44923df009c" dependencies = [ "cfg-if", "parity-scale-codec", "scale-info", - "serde", ] [[package]] name = "frame-metadata" -version = "18.0.0" +version = "16.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "daaf440c68eb2c3d88e5760fe8c7af3f9fee9181fab6c2f2c4e7cc48dcc40bb8" +checksum = "87cf1549fba25a6fcac22785b61698317d958e96cac72a59102ea45b9ae64692" dependencies = [ "cfg-if", "parity-scale-codec", @@ -7363,9 +6523,9 @@ dependencies = [ "array-bytes", "const-hex", "docify", - "frame-metadata 18.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", + "frame-metadata 16.0.0", + "frame-support", + "frame-system", "log", "merkleized-metadata", "parity-scale-codec", @@ -7373,25 +6533,9 @@ dependencies = [ "sp-api 26.0.0", "sp-runtime 31.0.1", "sp-tracing 16.0.0", - "sp-transaction-pool 26.0.0", + "sp-transaction-pool", "substrate-test-runtime-client", - "substrate-wasm-builder 17.0.0", -] - -[[package]] -name = "frame-metadata-hash-extension" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56ac71dbd97039c49fdd69f416a4dd5d8da3652fdcafc3738b45772ad79eb4ec" -dependencies = [ - "array-bytes", - "docify", - "frame-support 38.0.0", - "frame-system 38.0.0", - "log", - "parity-scale-codec", - "scale-info", - "sp-runtime 39.0.2", + "substrate-wasm-builder", ] [[package]] @@ -7400,15 +6544,15 @@ version = "0.1.0" dependencies = [ "assert_cmd", "clap 4.5.13", - "cumulus-primitives-proof-size-hostfunction 0.2.0", + "cumulus-primitives-proof-size-hostfunction", "cumulus-test-runtime", "frame-benchmarking-cli", "log", "sc-chain-spec", "sc-cli", - "sp-genesis-builder 0.8.0", + "sp-genesis-builder", "sp-runtime 31.0.1", - "sp-statement-store 10.0.0", + "sp-statement-store", "sp-tracing 16.0.0", "tempfile", "tracing-subscriber 0.3.18", @@ -7420,7 +6564,7 @@ version = "0.35.0" dependencies = [ "futures", "indicatif", - "jsonrpsee", + "jsonrpsee 0.24.3", "log", "parity-scale-codec", "serde", @@ -7444,13 +6588,13 @@ dependencies = [ "aquamarine", "array-bytes", "assert_matches", - "binary-merkle-tree 13.0.0", + "binary-merkle-tree", "bitflags 1.3.2", "docify", "environmental", - "frame-metadata 18.0.0", - "frame-support-procedural 23.0.0", - "frame-system 28.0.0", + "frame-metadata 16.0.0", + "frame-support-procedural", + "frame-system", "impl-trait-for-tuples", "k256", "log", @@ -7468,15 +6612,15 @@ dependencies = [ "sp-crypto-hashing 0.1.0", "sp-crypto-hashing-proc-macro 0.1.0", "sp-debug-derive 14.0.0", - "sp-genesis-builder 0.8.0", - "sp-inherents 26.0.0", + "sp-genesis-builder", + "sp-inherents", "sp-io 30.0.0", "sp-metadata-ir 0.6.0", "sp-runtime 31.0.1", - "sp-staking 26.0.0", + "sp-staking", "sp-state-machine 0.35.0", "sp-std 14.0.0", - "sp-timestamp 26.0.0", + "sp-timestamp", "sp-tracing 16.0.0", "sp-trie 29.0.0", "sp-weights 27.0.0", @@ -7484,48 +6628,6 @@ dependencies = [ "tt-call", ] -[[package]] -name = "frame-support" -version = "38.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e44af69fa61bc5005ffe0339e198957e77f0f255704a9bee720da18a733e3dc" -dependencies = [ - "aquamarine", - "array-bytes", - "bitflags 1.3.2", - "docify", - "environmental", - "frame-metadata 16.0.0", - "frame-support-procedural 30.0.4", - "impl-trait-for-tuples", - "k256", - "log", - "macro_magic", - "parity-scale-codec", - "paste", - "scale-info", - "serde", - "serde_json", - "smallvec", - "sp-api 34.0.0", - "sp-arithmetic 26.0.0", - "sp-core 34.0.0", - "sp-crypto-hashing-proc-macro 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sp-debug-derive 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sp-genesis-builder 0.15.1", - "sp-inherents 34.0.0", - "sp-io 38.0.0", - "sp-metadata-ir 0.7.0", - "sp-runtime 39.0.2", - "sp-staking 36.0.0", - "sp-state-machine 0.43.0", - "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sp-tracing 17.0.1", - "sp-weights 31.0.0", - "static_assertions", - "tt-call", -] - [[package]] name = "frame-support-procedural" version = "23.0.0" @@ -7535,14 +6637,14 @@ dependencies = [ "derive-syn-parse", "docify", "expander", - "frame-support 28.0.0", - "frame-support-procedural-tools 10.0.0", - "frame-system 28.0.0", + "frame-support", + "frame-support-procedural-tools", + "frame-system", "itertools 0.11.0", "macro_magic", "parity-scale-codec", "pretty_assertions", - "proc-macro-warning", + "proc-macro-warning 1.0.0", "proc-macro2 1.0.86", "quote 1.0.37", "regex", @@ -7556,44 +6658,11 @@ dependencies = [ "syn 2.0.87", ] -[[package]] -name = "frame-support-procedural" -version = "30.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e8f9b6bc1517a6fcbf0b2377e5c8c6d39f5bb7862b191a59a9992081d63972d" -dependencies = [ - "Inflector", - "cfg-expr", - "derive-syn-parse", - "expander", - "frame-support-procedural-tools 13.0.0", - "itertools 0.11.0", - "macro_magic", - "proc-macro-warning", - "proc-macro2 1.0.86", - "quote 1.0.37", - "sp-crypto-hashing 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 2.0.87", -] - [[package]] name = "frame-support-procedural-tools" version = "10.0.0" dependencies = [ - "frame-support-procedural-tools-derive 11.0.0", - "proc-macro-crate 3.1.0", - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.87", -] - -[[package]] -name = "frame-support-procedural-tools" -version = "13.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bead15a320be1764cdd50458c4cfacb23e0cee65f64f500f8e34136a94c7eeca" -dependencies = [ - "frame-support-procedural-tools-derive 12.0.0", + "frame-support-procedural-tools-derive", "proc-macro-crate 3.1.0", "proc-macro2 1.0.86", "quote 1.0.37", @@ -7609,27 +6678,16 @@ dependencies = [ "syn 2.0.87", ] -[[package]] -name = "frame-support-procedural-tools-derive" -version = "12.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed971c6435503a099bdac99fe4c5bea08981709e5b5a0a8535a1856f48561191" -dependencies = [ - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.87", -] - [[package]] name = "frame-support-test" version = "3.0.0" dependencies = [ - "frame-benchmarking 28.0.0", - "frame-executive 28.0.0", - "frame-metadata 18.0.0", - "frame-support 28.0.0", + "frame-benchmarking", + "frame-executive", + "frame-metadata 16.0.0", + "frame-support", "frame-support-test-pallet", - "frame-system 28.0.0", + "frame-system", "parity-scale-codec", "pretty_assertions", "rustversion", @@ -7651,8 +6709,8 @@ dependencies = [ name = "frame-support-test-compile-pass" version = "4.0.0-dev" dependencies = [ - "frame-support 28.0.0", - "frame-system 28.0.0", + "frame-support", + "frame-system", "parity-scale-codec", "scale-info", "sp-core 28.0.0", @@ -7664,8 +6722,8 @@ dependencies = [ name = "frame-support-test-pallet" version = "4.0.0-dev" dependencies = [ - "frame-support 28.0.0", - "frame-system 28.0.0", + "frame-support", + "frame-system", "parity-scale-codec", "scale-info", "serde", @@ -7677,7 +6735,7 @@ name = "frame-support-test-stg-frame-crate" version = "0.1.0" dependencies = [ "parity-scale-codec", - "polkadot-sdk-frame 0.1.0", + "polkadot-sdk-frame", "scale-info", ] @@ -7688,7 +6746,7 @@ dependencies = [ "cfg-if", "criterion", "docify", - "frame-support 28.0.0", + "frame-support", "log", "parity-scale-codec", "scale-info", @@ -7703,34 +6761,13 @@ dependencies = [ "substrate-test-runtime-client", ] -[[package]] -name = "frame-system" -version = "38.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3c7fa02f8c305496d2ae52edaecdb9d165f11afa965e05686d7d7dd1ce93611" -dependencies = [ - "cfg-if", - "docify", - "frame-support 38.0.0", - "log", - "parity-scale-codec", - "scale-info", - "serde", - "sp-core 34.0.0", - "sp-io 38.0.0", - "sp-runtime 39.0.2", - "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sp-version 37.0.0", - "sp-weights 31.0.0", -] - [[package]] name = "frame-system-benchmarking" version = "28.0.0" dependencies = [ - "frame-benchmarking 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", + "frame-benchmarking", + "frame-support", + "frame-system", "parity-scale-codec", "scale-info", "sp-core 28.0.0", @@ -7740,21 +6777,6 @@ dependencies = [ "sp-version 29.0.0", ] -[[package]] -name = "frame-system-benchmarking" -version = "38.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9693b2a736beb076e673520e1e8dee4fc128b8d35b020ef3e8a4b1b5ad63d9f2" -dependencies = [ - "frame-benchmarking 38.0.0", - "frame-support 38.0.0", - "frame-system 38.0.0", - "parity-scale-codec", - "scale-info", - "sp-core 34.0.0", - "sp-runtime 39.0.2", -] - [[package]] name = "frame-system-rpc-runtime-api" version = "26.0.0" @@ -7764,39 +6786,16 @@ dependencies = [ "sp-api 26.0.0", ] -[[package]] -name = "frame-system-rpc-runtime-api" -version = "34.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "475c4f8604ba7e4f05cd2c881ba71105093e638b9591ec71a8db14a64b3b4ec3" -dependencies = [ - "docify", - "parity-scale-codec", - "sp-api 34.0.0", -] - [[package]] name = "frame-try-runtime" version = "0.34.0" dependencies = [ - "frame-support 28.0.0", + "frame-support", "parity-scale-codec", "sp-api 26.0.0", "sp-runtime 31.0.1", ] -[[package]] -name = "frame-try-runtime" -version = "0.44.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83c811a5a1f5429c7fb5ebbf6cf9502d8f9b673fd395c12cf46c44a30a7daf0e" -dependencies = [ - "frame-support 38.0.0", - "parity-scale-codec", - "sp-api 34.0.0", - "sp-runtime 39.0.2", -] - [[package]] name = "fs-err" version = "2.9.0" @@ -7819,7 +6818,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "29f9df8a11882c4e3335eb2d18a0137c505d9ca927470b0cac9c6f0ae07d28f7" dependencies = [ - "rustix 0.38.21", + "rustix 0.38.25", "windows-sys 0.48.0", ] @@ -7852,9 +6851,9 @@ dependencies = [ [[package]] name = "futures-bounded" -version = "0.2.4" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91f328e7fb845fc832912fb6a34f40cf6d1888c92f974d1893a54e97b5ff542e" +checksum = "8b07bbbe7d7e78809544c6f718d875627addc73a7c3582447abc052cd3dc67e0" dependencies = [ "futures-timer", "futures-util", @@ -7935,13 +6934,12 @@ dependencies = [ [[package]] name = "futures-rustls" -version = "0.26.0" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8f2f12607f92c69b12ed746fabf9ca4f5c482cba46679c1a75b874ed7c26adb" +checksum = "35bd3cf68c183738046838e300353e4716c674dc5e56890de4826801a6622a28" dependencies = [ "futures-io", - "rustls 0.23.18", - "rustls-pki-types", + "rustls 0.21.7", ] [[package]] @@ -7963,7 +6961,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f288b0a4f20f9a56b5d1da57e2227c661b7b16168e2f72365f57b63326e29b24" dependencies = [ "gloo-timers", - "send_wrapper", + "send_wrapper 0.4.0", ] [[package]] @@ -7998,12 +6996,12 @@ name = "generate-bags" version = "28.0.0" dependencies = [ "chrono", - "frame-election-provider-support 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", + "frame-election-provider-support", + "frame-support", + "frame-system", "num-format", - "pallet-staking 28.0.0", - "sp-staking 26.0.0", + "pallet-staking", + "sp-staking", ] [[package]] @@ -8160,45 +7158,45 @@ dependencies = [ name = "glutton-westend-runtime" version = "3.0.0" dependencies = [ - "cumulus-pallet-aura-ext 0.7.0", - "cumulus-pallet-parachain-system 0.7.0", - "cumulus-pallet-xcm 0.7.0", - "cumulus-primitives-aura 0.7.0", - "cumulus-primitives-core 0.7.0", - "cumulus-primitives-timestamp 0.7.0", - "frame-benchmarking 28.0.0", - "frame-executive 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", - "frame-system-benchmarking 28.0.0", - "frame-system-rpc-runtime-api 26.0.0", - "frame-try-runtime 0.34.0", - "pallet-aura 27.0.0", - "pallet-glutton 14.0.0", - "pallet-message-queue 31.0.0", - "pallet-sudo 28.0.0", - "pallet-timestamp 27.0.0", - "parachains-common 7.0.0", + "cumulus-pallet-aura-ext", + "cumulus-pallet-parachain-system", + "cumulus-pallet-xcm", + "cumulus-primitives-aura", + "cumulus-primitives-core", + "cumulus-primitives-timestamp", + "frame-benchmarking", + "frame-executive", + "frame-support", + "frame-system", + "frame-system-benchmarking", + "frame-system-rpc-runtime-api", + "frame-try-runtime", + "pallet-aura", + "pallet-glutton", + "pallet-message-queue", + "pallet-sudo", + "pallet-timestamp", + "parachains-common", "parity-scale-codec", "scale-info", "sp-api 26.0.0", - "sp-block-builder 26.0.0", - "sp-consensus-aura 0.32.0", + "sp-block-builder", + "sp-consensus-aura", "sp-core 28.0.0", - "sp-genesis-builder 0.8.0", - "sp-inherents 26.0.0", - "sp-offchain 26.0.0", + "sp-genesis-builder", + "sp-inherents", + "sp-offchain", "sp-runtime 31.0.1", - "sp-session 27.0.0", + "sp-session", "sp-storage 19.0.0", - "sp-transaction-pool 26.0.0", + "sp-transaction-pool", "sp-version 29.0.0", - "staging-parachain-info 0.7.0", - "staging-xcm 7.0.0", - "staging-xcm-builder 7.0.0", - "staging-xcm-executor 7.0.0", - "substrate-wasm-builder 17.0.0", - "testnet-parachains-constants 1.0.0", + "staging-parachain-info", + "staging-xcm", + "staging-xcm-builder", + "staging-xcm-executor", + "substrate-wasm-builder", + "testnet-parachains-constants", ] [[package]] @@ -8242,7 +7240,7 @@ dependencies = [ "futures-sink", "futures-util", "http 0.2.9", - "indexmap 2.7.0", + "indexmap 2.2.3", "slab", "tokio", "tokio-util", @@ -8261,7 +7259,7 @@ dependencies = [ "futures-core", "futures-sink", "http 1.1.0", - "indexmap 2.7.0", + "indexmap 2.2.3", "slab", "tokio", "tokio-util", @@ -8332,16 +7330,6 @@ dependencies = [ "serde", ] -[[package]] -name = "hashbrown" -version = "0.15.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf151400ff0baff5465007dd2f3e717f3fe502074ca563069ce3a6629d07b289" -dependencies = [ - "foldhash", - "serde", -] - [[package]] name = "hashlink" version = "0.8.4" @@ -8392,9 +7380,6 @@ name = "hex" version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" -dependencies = [ - "serde", -] [[package]] name = "hex-conservative" @@ -8402,15 +7387,6 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "30ed443af458ccb6d81c1e7e661545f94d3176752fb1df2f543b902a1e0f51e2" -[[package]] -name = "hex-conservative" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5313b072ce3c597065a808dbf612c4c8e8590bdbf8b579508bf7a762c5eae6cd" -dependencies = [ - "arrayvec 0.7.4", -] - [[package]] name = "hex-literal" version = "0.4.1" @@ -8426,7 +7402,7 @@ dependencies = [ "async-trait", "cfg-if", "data-encoding", - "enum-as-inner", + "enum-as-inner 0.6.0", "futures-channel", "futures-io", "futures-util", @@ -8434,7 +7410,6 @@ dependencies = [ "ipnet", "once_cell", "rand", - "socket2 0.5.7", "thiserror", "tinyvec", "tokio", @@ -8444,9 +7419,9 @@ dependencies = [ [[package]] name = "hickory-resolver" -version = "0.24.2" +version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a2e2aba9c389ce5267d31cf1e4dace82390ae276b0b364ea55630b1fa1b44b4" +checksum = "28757f23aa75c98f254cf0405e6d8c25b831b32921b050a66692427679b1f243" dependencies = [ "cfg-if", "futures-util", @@ -8686,7 +7661,7 @@ dependencies = [ "hyper 1.3.1", "hyper-util", "log", - "rustls 0.23.18", + "rustls 0.23.14", "rustls-native-certs 0.8.0", "rustls-pki-types", "tokio", @@ -8769,6 +7744,17 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" +[[package]] +name = "idna" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "418a0a6fab821475f634efe3ccc45c013f742efe03d853e8d3355d5cb850ecf8" +dependencies = [ + "matches", + "unicode-bidi", + "unicode-normalization", +] + [[package]] name = "idna" version = "0.4.0" @@ -8877,15 +7863,6 @@ dependencies = [ "uint 0.10.0", ] -[[package]] -name = "impl-rlp" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f28220f89297a075ddc7245cd538076ee98b01f2a9c23a53a4f1105d5a322808" -dependencies = [ - "rlp 0.5.2", -] - [[package]] name = "impl-rlp" version = "0.4.0" @@ -8962,13 +7939,12 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.7.0" +version = "2.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62f822373a4fe84d4bb149bf54e584a7f4abec90e072ed49cda0edea5b95471f" +checksum = "233cf39063f058ea2caae4091bf4a3ef70a653afbc026f5c4a4135d114e3c177" dependencies = [ "equivalent", - "hashbrown 0.15.2", - "serde", + "hashbrown 0.14.5", ] [[package]] @@ -9043,7 +8019,7 @@ dependencies = [ "socket2 0.5.7", "widestring", "windows-sys 0.48.0", - "winreg", + "winreg 0.50.0", ] [[package]] @@ -9082,7 +8058,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b" dependencies = [ "hermit-abi 0.3.9", - "rustix 0.38.21", + "rustix 0.38.25", "windows-sys 0.48.0", ] @@ -9149,15 +8125,6 @@ dependencies = [ "either", ] -[[package]] -name = "itertools" -version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186" -dependencies = [ - "either", -] - [[package]] name = "itoa" version = "1.0.9" @@ -9212,9 +8179,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.72" +version = "0.3.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a88f1bda2bd75b0452a14784937d796722fdebfe50df998aeb3f0b7603019a9" +checksum = "c5f195fe497f702db0f318b07fdd68edb16955aed830df8363d837542f8f935a" dependencies = [ "wasm-bindgen", ] @@ -9262,36 +8229,103 @@ dependencies = [ [[package]] name = "jsonrpsee" -version = "0.24.7" +version = "0.22.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfdb12a2381ea5b2e68c3469ec604a007b367778cdb14d09612c8069ebd616ad" +dependencies = [ + "jsonrpsee-client-transport 0.22.5", + "jsonrpsee-core 0.22.5", + "jsonrpsee-http-client 0.22.5", + "jsonrpsee-types 0.22.5", +] + +[[package]] +name = "jsonrpsee" +version = "0.23.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62b089779ad7f80768693755a031cc14a7766aba707cbe886674e3f79e9b7e47" +dependencies = [ + "jsonrpsee-core 0.23.2", + "jsonrpsee-types 0.23.2", + "jsonrpsee-ws-client 0.23.2", +] + +[[package]] +name = "jsonrpsee" +version = "0.24.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5c71d8c1a731cc4227c2f698d377e7848ca12c8a48866fc5e6951c43a4db843" +checksum = "5ec465b607a36dc5dd45d48b7689bc83f679f66a3ac6b6b21cc787a11e0f8685" dependencies = [ - "jsonrpsee-client-transport", - "jsonrpsee-core", - "jsonrpsee-http-client", + "jsonrpsee-client-transport 0.24.3", + "jsonrpsee-core 0.24.3", + "jsonrpsee-http-client 0.24.3", "jsonrpsee-proc-macros", "jsonrpsee-server", - "jsonrpsee-types", + "jsonrpsee-types 0.24.3", "jsonrpsee-wasm-client", - "jsonrpsee-ws-client", + "jsonrpsee-ws-client 0.24.3", + "tokio", + "tracing", +] + +[[package]] +name = "jsonrpsee-client-transport" +version = "0.22.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4978087a58c3ab02efc5b07c5e5e2803024536106fd5506f558db172c889b3aa" +dependencies = [ + "futures-util", + "http 0.2.9", + "jsonrpsee-core 0.22.5", + "pin-project", + "rustls-native-certs 0.7.0", + "rustls-pki-types", + "soketto 0.7.1", + "thiserror", + "tokio", + "tokio-rustls 0.25.0", + "tokio-util", + "tracing", + "url", +] + +[[package]] +name = "jsonrpsee-client-transport" +version = "0.23.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08163edd8bcc466c33d79e10f695cdc98c00d1e6ddfb95cec41b6b0279dd5432" +dependencies = [ + "base64 0.22.1", + "futures-util", + "http 1.1.0", + "jsonrpsee-core 0.23.2", + "pin-project", + "rustls 0.23.14", + "rustls-pki-types", + "rustls-platform-verifier", + "soketto 0.8.0", + "thiserror", "tokio", + "tokio-rustls 0.26.0", + "tokio-util", "tracing", + "url", ] [[package]] name = "jsonrpsee-client-transport" -version = "0.24.7" +version = "0.24.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "548125b159ba1314104f5bb5f38519e03a41862786aa3925cf349aae9cdd546e" +checksum = "90f0977f9c15694371b8024c35ab58ca043dbbf4b51ccb03db8858a021241df1" dependencies = [ "base64 0.22.1", "futures-channel", "futures-util", "gloo-net", "http 1.1.0", - "jsonrpsee-core", + "jsonrpsee-core 0.24.3", "pin-project", - "rustls 0.23.18", + "rustls 0.23.14", "rustls-pki-types", "rustls-platform-verifier", "soketto 0.8.0", @@ -9305,9 +8339,54 @@ dependencies = [ [[package]] name = "jsonrpsee-core" -version = "0.24.7" +version = "0.22.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4b257e1ec385e07b0255dde0b933f948b5c8b8c28d42afda9587c3a967b896d" +dependencies = [ + "anyhow", + "async-trait", + "beef", + "futures-timer", + "futures-util", + "hyper 0.14.29", + "jsonrpsee-types 0.22.5", + "pin-project", + "rustc-hash 1.1.0", + "serde", + "serde_json", + "thiserror", + "tokio", + "tokio-stream", + "tracing", +] + +[[package]] +name = "jsonrpsee-core" +version = "0.23.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "79712302e737d23ca0daa178e752c9334846b08321d439fd89af9a384f8c830b" +dependencies = [ + "anyhow", + "async-trait", + "beef", + "futures-timer", + "futures-util", + "jsonrpsee-types 0.23.2", + "pin-project", + "rustc-hash 1.1.0", + "serde", + "serde_json", + "thiserror", + "tokio", + "tokio-stream", + "tracing", +] + +[[package]] +name = "jsonrpsee-core" +version = "0.24.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2882f6f8acb9fdaec7cefc4fd607119a9bd709831df7d7672a1d3b644628280" +checksum = "e942c55635fbf5dc421938b8558a8141c7e773720640f4f1dbe1f4164ca4e221" dependencies = [ "async-trait", "bytes", @@ -9316,7 +8395,7 @@ dependencies = [ "http 1.1.0", "http-body 1.0.0", "http-body-util", - "jsonrpsee-types", + "jsonrpsee-types 0.24.3", "parking_lot 0.12.3", "pin-project", "rand", @@ -9332,9 +8411,29 @@ dependencies = [ [[package]] name = "jsonrpsee-http-client" -version = "0.24.7" +version = "0.22.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ccf93fc4a0bfe05d851d37d7c32b7f370fe94336b52a2f0efc5f1981895c2e5" +dependencies = [ + "async-trait", + "hyper 0.14.29", + "hyper-rustls 0.24.2", + "jsonrpsee-core 0.22.5", + "jsonrpsee-types 0.22.5", + "serde", + "serde_json", + "thiserror", + "tokio", + "tower", + "tracing", + "url", +] + +[[package]] +name = "jsonrpsee-http-client" +version = "0.24.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3638bc4617f96675973253b3a45006933bde93c2fd8a6170b33c777cc389e5b" +checksum = "e33774602df12b68a2310b38a535733c477ca4a498751739f89fe8dbbb62ec4c" dependencies = [ "async-trait", "base64 0.22.1", @@ -9342,9 +8441,9 @@ dependencies = [ "hyper 1.3.1", "hyper-rustls 0.27.3", "hyper-util", - "jsonrpsee-core", - "jsonrpsee-types", - "rustls 0.23.18", + "jsonrpsee-core 0.24.3", + "jsonrpsee-types 0.24.3", + "rustls 0.23.14", "rustls-platform-verifier", "serde", "serde_json", @@ -9357,9 +8456,9 @@ dependencies = [ [[package]] name = "jsonrpsee-proc-macros" -version = "0.24.7" +version = "0.24.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c06c01ae0007548e73412c08e2285ffe5d723195bf268bce67b1b77c3bb2a14d" +checksum = "6b07a2daf52077ab1b197aea69a5c990c060143835bf04c77070e98903791715" dependencies = [ "heck 0.5.0", "proc-macro-crate 3.1.0", @@ -9370,9 +8469,9 @@ dependencies = [ [[package]] name = "jsonrpsee-server" -version = "0.24.7" +version = "0.24.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82ad8ddc14be1d4290cd68046e7d1d37acd408efed6d3ca08aefcc3ad6da069c" +checksum = "038fb697a709bec7134e9ccbdbecfea0e2d15183f7140254afef7c5610a3f488" dependencies = [ "futures-util", "http 1.1.0", @@ -9380,8 +8479,8 @@ dependencies = [ "http-body-util", "hyper 1.3.1", "hyper-util", - "jsonrpsee-core", - "jsonrpsee-types", + "jsonrpsee-core 0.24.3", + "jsonrpsee-types 0.24.3", "pin-project", "route-recognizer", "serde", @@ -9397,9 +8496,35 @@ dependencies = [ [[package]] name = "jsonrpsee-types" -version = "0.24.7" +version = "0.22.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "150d6168405890a7a3231a3c74843f58b8959471f6df76078db2619ddee1d07d" +dependencies = [ + "anyhow", + "beef", + "serde", + "serde_json", + "thiserror", +] + +[[package]] +name = "jsonrpsee-types" +version = "0.23.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9c465fbe385238e861fdc4d1c85e04ada6c1fd246161d26385c1b311724d2af" +dependencies = [ + "beef", + "http 1.1.0", + "serde", + "serde_json", + "thiserror", +] + +[[package]] +name = "jsonrpsee-types" +version = "0.24.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a178c60086f24cc35bb82f57c651d0d25d99c4742b4d335de04e97fa1f08a8a1" +checksum = "23b67d6e008164f027afbc2e7bb79662650158d26df200040282d2aa1cbb093b" dependencies = [ "http 1.1.0", "serde", @@ -9409,25 +8534,38 @@ dependencies = [ [[package]] name = "jsonrpsee-wasm-client" -version = "0.24.7" +version = "0.24.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a01cd500915d24ab28ca17527e23901ef1be6d659a2322451e1045532516c25" +checksum = "0470d0ae043ffcb0cd323797a631e637fb4b55fe3eaa6002934819458bba62a7" dependencies = [ - "jsonrpsee-client-transport", - "jsonrpsee-core", - "jsonrpsee-types", + "jsonrpsee-client-transport 0.24.3", + "jsonrpsee-core 0.24.3", + "jsonrpsee-types 0.24.3", ] [[package]] name = "jsonrpsee-ws-client" -version = "0.24.7" +version = "0.23.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fe322e0896d0955a3ebdd5bf813571c53fea29edd713bc315b76620b327e86d" +checksum = "1c28759775f5cb2f1ea9667672d3fe2b0e701d1f4b7b67954e60afe7fd058b5e" dependencies = [ "http 1.1.0", - "jsonrpsee-client-transport", - "jsonrpsee-core", - "jsonrpsee-types", + "jsonrpsee-client-transport 0.23.2", + "jsonrpsee-core 0.23.2", + "jsonrpsee-types 0.23.2", + "url", +] + +[[package]] +name = "jsonrpsee-ws-client" +version = "0.24.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "992bf67d1132f88edf4a4f8cff474cf01abb2be203004a2b8e11c2b20795b99e" +dependencies = [ + "http 1.1.0", + "jsonrpsee-client-transport 0.24.3", + "jsonrpsee-core 0.24.3", + "jsonrpsee-types 0.24.3", "url", ] @@ -9468,23 +8606,13 @@ dependencies = [ "cpufeatures", ] -[[package]] -name = "keccak-asm" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "505d1856a39b200489082f90d897c3f07c455563880bc5952e38eabf731c83b6" -dependencies = [ - "digest 0.10.7", - "sha3-asm", -] - [[package]] name = "keccak-hash" -version = "0.11.0" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e1b8590eb6148af2ea2d75f38e7d29f5ca970d5a4df456b3ef19b8b415d0264" +checksum = "4b286e6b663fb926e1eeb68528e69cb70ed46c6d65871a21b2215ae8154c6d3c" dependencies = [ - "primitive-types 0.13.1", + "primitive-types 0.12.2", "tiny-keccak", ] @@ -9514,13 +8642,13 @@ dependencies = [ "pallet-example-mbm", "pallet-example-tasks", "parity-scale-codec", - "polkadot-sdk 0.1.0", + "polkadot-sdk", "primitive-types 0.13.1", "scale-info", "serde_json", "sp-debug-derive 14.0.0", "static_assertions", - "substrate-wasm-builder 17.0.0", + "substrate-wasm-builder", ] [[package]] @@ -9560,7 +8688,7 @@ dependencies = [ "rand", "rustls 0.21.7", "rustls-pemfile 1.0.3", - "secrecy 0.8.0", + "secrecy", "serde", "serde_json", "serde_yaml", @@ -9771,31 +8899,9 @@ dependencies = [ "futures-timer", "getrandom", "instant", - "libp2p-allow-block-list 0.2.0", - "libp2p-connection-limits 0.2.1", - "libp2p-core 0.40.1", - "libp2p-identity", - "libp2p-swarm 0.43.7", - "multiaddr 0.18.1", - "pin-project", - "rw-stream-sink", - "thiserror", -] - -[[package]] -name = "libp2p" -version = "0.54.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbbe80f9c7e00526cd6b838075b9c171919404a4732cb2fa8ece0a093223bfc4" -dependencies = [ - "bytes", - "either", - "futures", - "futures-timer", - "getrandom", - "libp2p-allow-block-list 0.4.0", - "libp2p-connection-limits 0.4.0", - "libp2p-core 0.42.0", + "libp2p-allow-block-list", + "libp2p-connection-limits", + "libp2p-core", "libp2p-dns", "libp2p-identify", "libp2p-identity", @@ -9806,9 +8912,10 @@ dependencies = [ "libp2p-ping", "libp2p-quic", "libp2p-request-response", - "libp2p-swarm 0.45.1", + "libp2p-swarm", "libp2p-tcp", "libp2p-upnp", + "libp2p-wasm-ext", "libp2p-websocket", "libp2p-yamux", "multiaddr 0.18.1", @@ -9823,21 +8930,9 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "55b46558c5c0bf99d3e2a1a38fd54ff5476ca66dd1737b12466a1824dd219311" dependencies = [ - "libp2p-core 0.40.1", - "libp2p-identity", - "libp2p-swarm 0.43.7", - "void", -] - -[[package]] -name = "libp2p-allow-block-list" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1027ccf8d70320ed77e984f273bc8ce952f623762cb9bf2d126df73caef8041" -dependencies = [ - "libp2p-core 0.42.0", + "libp2p-core", "libp2p-identity", - "libp2p-swarm 0.45.1", + "libp2p-swarm", "void", ] @@ -9847,21 +8942,9 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2f5107ad45cb20b2f6c3628c7b6014b996fcb13a88053f4569c872c6e30abf58" dependencies = [ - "libp2p-core 0.40.1", - "libp2p-identity", - "libp2p-swarm 0.43.7", - "void", -] - -[[package]] -name = "libp2p-connection-limits" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d003540ee8baef0d254f7b6bfd79bac3ddf774662ca0abf69186d517ef82ad8" -dependencies = [ - "libp2p-core 0.42.0", + "libp2p-core", "libp2p-identity", - "libp2p-swarm 0.45.1", + "libp2p-swarm", "void", ] @@ -9893,70 +8976,42 @@ dependencies = [ "void", ] -[[package]] -name = "libp2p-core" -version = "0.42.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a61f26c83ed111104cd820fe9bc3aaabbac5f1652a1d213ed6e900b7918a1298" -dependencies = [ - "either", - "fnv", - "futures", - "futures-timer", - "libp2p-identity", - "multiaddr 0.18.1", - "multihash 0.19.1", - "multistream-select", - "once_cell", - "parking_lot 0.12.3", - "pin-project", - "quick-protobuf 0.8.1", - "rand", - "rw-stream-sink", - "smallvec", - "thiserror", - "tracing", - "unsigned-varint 0.8.0", - "void", - "web-time", -] - [[package]] name = "libp2p-dns" -version = "0.42.0" +version = "0.40.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97f37f30d5c7275db282ecd86e54f29dd2176bd3ac656f06abf43bedb21eb8bd" +checksum = "e6a18db73084b4da2871438f6239fef35190b05023de7656e877c18a00541a3b" dependencies = [ "async-trait", "futures", - "hickory-resolver", - "libp2p-core 0.42.0", + "libp2p-core", "libp2p-identity", + "log", "parking_lot 0.12.3", "smallvec", - "tracing", + "trust-dns-resolver", ] [[package]] name = "libp2p-identify" -version = "0.45.0" +version = "0.43.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1711b004a273be4f30202778856368683bd9a83c4c7dcc8f848847606831a4e3" +checksum = "45a96638a0a176bec0a4bcaebc1afa8cf909b114477209d7456ade52c61cd9cd" dependencies = [ - "asynchronous-codec 0.7.0", + "asynchronous-codec", "either", "futures", "futures-bounded", "futures-timer", - "libp2p-core 0.42.0", + "libp2p-core", "libp2p-identity", - "libp2p-swarm 0.45.1", + "libp2p-swarm", + "log", "lru 0.12.3", "quick-protobuf 0.8.1", "quick-protobuf-codec", "smallvec", "thiserror", - "tracing", "void", ] @@ -9980,84 +9035,83 @@ dependencies = [ [[package]] name = "libp2p-kad" -version = "0.46.2" +version = "0.44.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ced237d0bd84bbebb7c2cad4c073160dacb4fe40534963c32ed6d4c6bb7702a3" +checksum = "16ea178dabba6dde6ffc260a8e0452ccdc8f79becf544946692fff9d412fc29d" dependencies = [ "arrayvec 0.7.4", - "asynchronous-codec 0.7.0", + "asynchronous-codec", "bytes", "either", "fnv", "futures", - "futures-bounded", "futures-timer", - "libp2p-core 0.42.0", + "instant", + "libp2p-core", "libp2p-identity", - "libp2p-swarm 0.45.1", + "libp2p-swarm", + "log", "quick-protobuf 0.8.1", "quick-protobuf-codec", "rand", "sha2 0.10.8", "smallvec", "thiserror", - "tracing", "uint 0.9.5", + "unsigned-varint 0.7.2", "void", - "web-time", ] [[package]] name = "libp2p-mdns" -version = "0.46.0" +version = "0.44.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14b8546b6644032565eb29046b42744aee1e9f261ed99671b2c93fb140dba417" +checksum = "42a2567c305232f5ef54185e9604579a894fd0674819402bb0ac0246da82f52a" dependencies = [ "data-encoding", "futures", - "hickory-proto", "if-watch", - "libp2p-core 0.42.0", + "libp2p-core", "libp2p-identity", - "libp2p-swarm 0.45.1", + "libp2p-swarm", + "log", "rand", "smallvec", "socket2 0.5.7", "tokio", - "tracing", + "trust-dns-proto 0.22.0", "void", ] [[package]] name = "libp2p-metrics" -version = "0.15.0" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77ebafa94a717c8442d8db8d3ae5d1c6a15e30f2d347e0cd31d057ca72e42566" +checksum = "239ba7d28f8d0b5d77760dc6619c05c7e88e74ec8fbbe97f856f20a56745e620" dependencies = [ - "futures", - "libp2p-core 0.42.0", + "instant", + "libp2p-core", "libp2p-identify", "libp2p-identity", "libp2p-kad", "libp2p-ping", - "libp2p-swarm 0.45.1", - "pin-project", + "libp2p-swarm", + "once_cell", "prometheus-client", - "web-time", ] [[package]] name = "libp2p-noise" -version = "0.45.0" +version = "0.43.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36b137cb1ae86ee39f8e5d6245a296518912014eaa87427d24e6ff58cfc1b28c" +checksum = "d2eeec39ad3ad0677551907dd304b2f13f17208ccebe333bef194076cd2e8921" dependencies = [ - "asynchronous-codec 0.7.0", "bytes", "curve25519-dalek 4.1.3", "futures", - "libp2p-core 0.42.0", + "libp2p-core", "libp2p-identity", + "log", "multiaddr 0.18.1", "multihash 0.19.1", "once_cell", @@ -10067,71 +9121,68 @@ dependencies = [ "snow", "static_assertions", "thiserror", - "tracing", "x25519-dalek", "zeroize", ] [[package]] name = "libp2p-ping" -version = "0.45.0" +version = "0.43.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "005a34420359223b974ee344457095f027e51346e992d1e0dcd35173f4cdd422" +checksum = "e702d75cd0827dfa15f8fd92d15b9932abe38d10d21f47c50438c71dd1b5dae3" dependencies = [ "either", "futures", "futures-timer", - "libp2p-core 0.42.0", + "instant", + "libp2p-core", "libp2p-identity", - "libp2p-swarm 0.45.1", + "libp2p-swarm", + "log", "rand", - "tracing", "void", - "web-time", ] [[package]] name = "libp2p-quic" -version = "0.11.1" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46352ac5cd040c70e88e7ff8257a2ae2f891a4076abad2c439584a31c15fd24e" +checksum = "130d451d83f21b81eb7b35b360bc7972aeafb15177784adc56528db082e6b927" dependencies = [ "bytes", "futures", "futures-timer", "if-watch", - "libp2p-core 0.42.0", + "libp2p-core", "libp2p-identity", "libp2p-tls", + "log", "parking_lot 0.12.3", - "quinn", + "quinn 0.10.2", "rand", - "ring 0.17.8", - "rustls 0.23.18", + "ring 0.16.20", + "rustls 0.21.7", "socket2 0.5.7", "thiserror", "tokio", - "tracing", ] [[package]] name = "libp2p-request-response" -version = "0.27.0" +version = "0.25.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1356c9e376a94a75ae830c42cdaea3d4fe1290ba409a22c809033d1b7dcab0a6" +checksum = "d8e3b4d67870478db72bac87bfc260ee6641d0734e0e3e275798f089c3fecfd4" dependencies = [ "async-trait", "futures", - "futures-bounded", - "futures-timer", - "libp2p-core 0.42.0", + "instant", + "libp2p-core", "libp2p-identity", - "libp2p-swarm 0.45.1", + "libp2p-swarm", + "log", "rand", "smallvec", - "tracing", "void", - "web-time", ] [[package]] @@ -10145,47 +9196,26 @@ dependencies = [ "futures", "futures-timer", "instant", - "libp2p-core 0.40.1", - "libp2p-identity", - "log", - "multistream-select", - "once_cell", - "rand", - "smallvec", - "void", -] - -[[package]] -name = "libp2p-swarm" -version = "0.45.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7dd6741793d2c1fb2088f67f82cf07261f25272ebe3c0b0c311e0c6b50e851a" -dependencies = [ - "either", - "fnv", - "futures", - "futures-timer", - "libp2p-core 0.42.0", + "libp2p-core", "libp2p-identity", "libp2p-swarm-derive", - "lru 0.12.3", + "log", "multistream-select", "once_cell", "rand", "smallvec", "tokio", - "tracing", "void", - "web-time", ] [[package]] name = "libp2p-swarm-derive" -version = "0.35.0" +version = "0.33.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "206e0aa0ebe004d778d79fb0966aa0de996c19894e2c0605ba2f8524dd4443d8" +checksum = "c4d5ec2a3df00c7836d7696c136274c9c59705bac69133253696a6c932cd1d74" dependencies = [ - "heck 0.5.0", + "heck 0.4.1", + "proc-macro-warning 0.4.2", "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.87", @@ -10193,90 +9223,105 @@ dependencies = [ [[package]] name = "libp2p-tcp" -version = "0.42.0" +version = "0.40.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad964f312c59dcfcac840acd8c555de8403e295d39edf96f5240048b5fcaa314" +checksum = "b558dd40d1bcd1aaaed9de898e9ec6a436019ecc2420dd0016e712fbb61c5508" dependencies = [ "futures", "futures-timer", "if-watch", "libc", - "libp2p-core 0.42.0", + "libp2p-core", "libp2p-identity", + "log", "socket2 0.5.7", "tokio", - "tracing", ] [[package]] name = "libp2p-tls" -version = "0.5.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47b23dddc2b9c355f73c1e36eb0c3ae86f7dc964a3715f0731cfad352db4d847" +checksum = "8218d1d5482b122ccae396bbf38abdcb283ecc96fa54760e1dfd251f0546ac61" dependencies = [ "futures", "futures-rustls", - "libp2p-core 0.42.0", + "libp2p-core", "libp2p-identity", - "rcgen 0.11.3", - "ring 0.17.8", - "rustls 0.23.18", + "rcgen", + "ring 0.16.20", + "rustls 0.21.7", "rustls-webpki 0.101.4", "thiserror", - "x509-parser", + "x509-parser 0.15.1", "yasna", ] [[package]] name = "libp2p-upnp" -version = "0.3.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01bf2d1b772bd3abca049214a3304615e6a36fa6ffc742bdd1ba774486200b8f" +checksum = "82775a47b34f10f787ad3e2a22e2c1541e6ebef4fe9f28f3ac553921554c94c1" dependencies = [ "futures", "futures-timer", "igd-next", - "libp2p-core 0.42.0", - "libp2p-swarm 0.45.1", + "libp2p-core", + "libp2p-swarm", + "log", "tokio", - "tracing", "void", ] +[[package]] +name = "libp2p-wasm-ext" +version = "0.40.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e5d8e3a9e07da0ef5b55a9f26c009c8fb3c725d492d8bb4b431715786eea79c" +dependencies = [ + "futures", + "js-sys", + "libp2p-core", + "send_wrapper 0.6.0", + "wasm-bindgen", + "wasm-bindgen-futures", +] + [[package]] name = "libp2p-websocket" -version = "0.44.0" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "888b2ff2e5d8dcef97283daab35ad1043d18952b65e05279eecbe02af4c6e347" +checksum = "004ee9c4a4631435169aee6aad2f62e3984dc031c43b6d29731e8e82a016c538" dependencies = [ "either", "futures", "futures-rustls", - "libp2p-core 0.42.0", + "libp2p-core", "libp2p-identity", + "log", "parking_lot 0.12.3", "pin-project-lite", "rw-stream-sink", "soketto 0.8.0", "thiserror", - "tracing", "url", "webpki-roots 0.25.2", ] [[package]] name = "libp2p-yamux" -version = "0.46.0" +version = "0.44.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "788b61c80789dba9760d8c669a5bedb642c8267555c803fabd8396e4ca5c5882" +checksum = "8eedcb62824c4300efb9cfd4e2a6edaf3ca097b9e68b36dabe45a44469fd6a85" dependencies = [ - "either", "futures", - "libp2p-core 0.42.0", + "libp2p-core", + "log", "thiserror", "tracing", "yamux 0.12.1", "yamux 0.13.4", + "yamux", ] [[package]] @@ -10401,9 +9446,9 @@ checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" [[package]] name = "linux-raw-sys" -version = "0.4.10" +version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da2479e8c062e40bf0066ffa0bc823de0a9368974af99c9f6df941d2c231e03f" +checksum = "969488b55f8ac402214f3f5fd243ebb7206cf82de60d3172994707a4bcc2b829" [[package]] name = "lioness" @@ -10437,9 +9482,9 @@ dependencies = [ [[package]] name = "litep2p" -version = "0.8.4" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b0fef34af8847e816003bf7fdeac5ea50b9a7a88441ac927a6166b5e812ab79" +checksum = "7286b1971f85d1d60be40ef49e81c1f3b5a0d8b83cfa02ab53591cdacae22901" dependencies = [ "async-trait", "bs58", @@ -10450,7 +9495,7 @@ dependencies = [ "futures-timer", "hex-literal", "hickory-resolver", - "indexmap 2.7.0", + "indexmap 2.2.3", "libc", "mockall 0.13.0", "multiaddr 0.17.1", @@ -10462,7 +9507,7 @@ dependencies = [ "prost 0.12.6", "prost-build", "rand", - "rcgen 0.10.0", + "rcgen", "ring 0.16.20", "rustls 0.20.9", "serde", @@ -10482,7 +9527,7 @@ dependencies = [ "unsigned-varint 0.8.0", "url", "x25519-dalek", - "x509-parser", + "x509-parser 0.16.0", "yasna", "zeroize", ] @@ -10513,15 +9558,6 @@ dependencies = [ "value-bag", ] -[[package]] -name = "lru" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6e8aaa3f231bb4bd57b84b2d5dc3ae7f350265df8aa96492e0bc394a1571909" -dependencies = [ - "hashbrown 0.12.3", -] - [[package]] name = "lru" version = "0.11.0" @@ -10675,6 +9711,12 @@ dependencies = [ "regex-automata 0.1.10", ] +[[package]] +name = "matches" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2532096657941c2fea9c289d370a250971c689d4f143798ff67113ec042024a5" + [[package]] name = "matrixmultiply" version = "0.3.7" @@ -10747,15 +9789,15 @@ dependencies = [ [[package]] name = "merkleized-metadata" -version = "0.2.0" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38c592efaf1b3250df14c8f3c2d952233f0302bb81d3586db2f303666c1cd607" +checksum = "f313fcff1d2a4bcaa2deeaa00bf7530d77d5f7bd0467a117dde2e29a75a7a17a" dependencies = [ "array-bytes", "blake3", - "frame-metadata 18.0.0", + "frame-metadata 16.0.0", "parity-scale-codec", - "scale-decode 0.13.1", + "scale-decode", "scale-info", ] @@ -10777,7 +9819,7 @@ version = "0.1.0" dependencies = [ "async-std", "async-trait", - "bp-messages 0.7.0", + "bp-messages", "finality-relay", "futures", "hex", @@ -10809,9 +9851,9 @@ dependencies = [ "docify", "futures", "futures-timer", - "jsonrpsee", + "jsonrpsee 0.24.3", "minimal-template-runtime", - "polkadot-sdk 0.1.0", + "polkadot-sdk", "serde_json", ] @@ -10821,7 +9863,7 @@ version = "0.0.0" dependencies = [ "pallet-minimal-template", "parity-scale-codec", - "polkadot-sdk 0.1.0", + "polkadot-sdk", "scale-info", "serde_json", ] @@ -10886,9 +9928,9 @@ dependencies = [ "sp-api 26.0.0", "sp-blockchain", "sp-consensus", - "sp-consensus-beefy 13.0.0", + "sp-consensus-beefy", "sp-core 28.0.0", - "sp-mmr-primitives 26.0.0", + "sp-mmr-primitives", "sp-runtime 31.0.1", "sp-tracing 16.0.0", "substrate-test-runtime-client", @@ -10899,14 +9941,14 @@ dependencies = [ name = "mmr-rpc" version = "28.0.0" dependencies = [ - "jsonrpsee", + "jsonrpsee 0.24.3", "parity-scale-codec", "serde", "serde_json", "sp-api 26.0.0", "sp-blockchain", "sp-core 28.0.0", - "sp-mmr-primitives 26.0.0", + "sp-mmr-primitives", "sp-runtime 31.0.1", ] @@ -11273,13 +10315,14 @@ dependencies = [ [[package]] name = "nix" -version = "0.26.4" +version = "0.26.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "598beaf3cc6fdd9a5dfb1630c2800c7acd31df7aaf0f565796fba2b53ca1af1b" +checksum = "bfdda3d196821d6af13126e40375cdf7da646a96114af134d5f417a9a1dc8e1a" dependencies = [ "bitflags 1.3.2", "cfg-if", "libc", + "static_assertions", ] [[package]] @@ -11295,13 +10338,13 @@ dependencies = [ [[package]] name = "nix" -version = "0.29.0" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71e2746dc3a24dd78b3cfcb7be93368c6de9963d30f43a6a73998a9cf4b17b46" +checksum = "ab2156c4fce2f8df6c499cc1c763e4394b7482525bf2a9701c9d79d215f519e4" dependencies = [ "bitflags 2.6.0", "cfg-if", - "cfg_aliases 0.2.1", + "cfg_aliases 0.1.1", "libc", ] @@ -11322,9 +10365,8 @@ name = "node-bench" version = "0.9.0-dev" dependencies = [ "array-bytes", - "async-trait", "clap 4.5.13", - "derive_more 0.99.17", + "derive_more", "fs_extra", "futures", "hash-db", @@ -11344,10 +10386,10 @@ dependencies = [ "serde_json", "sp-consensus", "sp-core 28.0.0", - "sp-inherents 26.0.0", + "sp-inherents", "sp-runtime 31.0.1", "sp-state-machine 0.35.0", - "sp-timestamp 26.0.0", + "sp-timestamp", "sp-tracing 16.0.0", "sp-trie 29.0.0", "tempfile", @@ -11365,7 +10407,7 @@ dependencies = [ name = "node-rpc" version = "3.0.0-dev" dependencies = [ - "jsonrpsee", + "jsonrpsee 0.24.3", "mmr-rpc", "node-primitives", "pallet-transaction-payment-rpc", @@ -11383,14 +10425,14 @@ dependencies = [ "sc-transaction-pool-api", "sp-api 26.0.0", "sp-application-crypto 30.0.0", - "sp-block-builder 26.0.0", + "sp-block-builder", "sp-blockchain", "sp-consensus", - "sp-consensus-babe 0.32.0", - "sp-consensus-beefy 13.0.0", + "sp-consensus-babe", + "sp-consensus-beefy", "sp-keystore 0.34.0", "sp-runtime 31.0.1", - "sp-statement-store 10.0.0", + "sp-statement-store", "substrate-frame-rpc-system", "substrate-state-trie-migration-rpc", ] @@ -11422,19 +10464,19 @@ dependencies = [ name = "node-testing" version = "3.0.0-dev" dependencies = [ - "frame-metadata-hash-extension 0.1.0", - "frame-system 28.0.0", + "frame-metadata-hash-extension", + "frame-system", "fs_extra", "futures", "kitchensink-runtime", "log", "node-primitives", - "pallet-asset-conversion 10.0.0", - "pallet-asset-conversion-tx-payment 10.0.0", - "pallet-asset-tx-payment 28.0.0", - "pallet-assets 29.1.0", - "pallet-revive 0.1.0", - "pallet-skip-feeless-payment 3.0.0", + "pallet-asset-conversion", + "pallet-asset-conversion-tx-payment", + "pallet-asset-tx-payment", + "pallet-assets", + "pallet-revive", + "pallet-skip-feeless-payment", "parity-scale-codec", "sc-block-builder", "sc-client-api", @@ -11443,16 +10485,16 @@ dependencies = [ "sc-executor 0.32.0", "sc-service", "sp-api 26.0.0", - "sp-block-builder 26.0.0", + "sp-block-builder", "sp-blockchain", "sp-consensus", "sp-core 28.0.0", "sp-crypto-hashing 0.1.0", - "sp-inherents 26.0.0", + "sp-inherents", "sp-io 30.0.0", - "sp-keyring 31.0.0", + "sp-keyring", "sp-runtime 31.0.1", - "sp-timestamp 26.0.0", + "sp-timestamp", "staging-node-cli", "substrate-test-client", "tempfile", @@ -11613,9 +10655,9 @@ dependencies = [ [[package]] name = "num-traits" -version = "0.2.19" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" +checksum = "39e3200413f237f41ab11ad6d161bc7239c84dcb631773ccd7de3dfe4b5c267c" dependencies = [ "autocfg", "libm", @@ -11676,13 +10718,22 @@ dependencies = [ "memchr", ] +[[package]] +name = "oid-registry" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9bedf36ffb6ba96c2eb7144ef6270557b52e54b20c0a8e1eb2ff99a6c6959bff" +dependencies = [ + "asn1-rs 0.5.2", +] + [[package]] name = "oid-registry" version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1c958dd45046245b9c3c2547369bb634eb461670b2e7e0de552905801a648d1d" dependencies = [ - "asn1-rs", + "asn1-rs 0.6.1", ] [[package]] @@ -11783,7 +10834,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f7b1d40dd8f367db3c65bec8d3dd47d4a604ee8874480738f93191bddab4e0e0" dependencies = [ "expander", - "indexmap 2.7.0", + "indexmap 2.2.3", "itertools 0.11.0", "petgraph", "proc-macro-crate 3.1.0", @@ -11834,13 +10885,13 @@ name = "pallet-alliance" version = "27.0.0" dependencies = [ "array-bytes", - "frame-benchmarking 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", + "frame-benchmarking", + "frame-support", + "frame-system", "log", - "pallet-balances 28.0.0", - "pallet-collective 28.0.0", - "pallet-identity 29.0.0", + "pallet-balances", + "pallet-collective", + "pallet-identity", "parity-scale-codec", "scale-info", "sp-core 28.0.0", @@ -11849,36 +10900,16 @@ dependencies = [ "sp-runtime 31.0.1", ] -[[package]] -name = "pallet-alliance" -version = "37.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59378a648a0aa279a4b10650366c3389cd0a1239b1876f74bfecd268eecb086b" -dependencies = [ - "frame-benchmarking 38.0.0", - "frame-support 38.0.0", - "frame-system 38.0.0", - "log", - "pallet-collective 38.0.0", - "pallet-identity 38.0.0", - "parity-scale-codec", - "scale-info", - "sp-core 34.0.0", - "sp-crypto-hashing 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sp-io 38.0.0", - "sp-runtime 39.0.2", -] - [[package]] name = "pallet-asset-conversion" version = "10.0.0" dependencies = [ - "frame-benchmarking 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", + "frame-benchmarking", + "frame-support", + "frame-system", "log", - "pallet-assets 29.1.0", - "pallet-balances 28.0.0", + "pallet-assets", + "pallet-balances", "parity-scale-codec", "primitive-types 0.13.1", "scale-info", @@ -11889,36 +10920,17 @@ dependencies = [ "sp-runtime 31.0.1", ] -[[package]] -name = "pallet-asset-conversion" -version = "20.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33f0078659ae95efe6a1bf138ab5250bc41ab98f22ff3651d0208684f08ae797" -dependencies = [ - "frame-benchmarking 38.0.0", - "frame-support 38.0.0", - "frame-system 38.0.0", - "log", - "parity-scale-codec", - "scale-info", - "sp-api 34.0.0", - "sp-arithmetic 26.0.0", - "sp-core 34.0.0", - "sp-io 38.0.0", - "sp-runtime 39.0.2", -] - [[package]] name = "pallet-asset-conversion-ops" version = "0.1.0" dependencies = [ - "frame-benchmarking 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", + "frame-benchmarking", + "frame-support", + "frame-system", "log", - "pallet-asset-conversion 10.0.0", - "pallet-assets 29.1.0", - "pallet-balances 28.0.0", + "pallet-asset-conversion", + "pallet-assets", + "pallet-balances", "parity-scale-codec", "primitive-types 0.13.1", "scale-info", @@ -11928,36 +10940,17 @@ dependencies = [ "sp-runtime 31.0.1", ] -[[package]] -name = "pallet-asset-conversion-ops" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3edbeda834bcd6660f311d4eead3dabdf6d385b7308ac75b0fae941a960e6c3a" -dependencies = [ - "frame-benchmarking 38.0.0", - "frame-support 38.0.0", - "frame-system 38.0.0", - "log", - "pallet-asset-conversion 20.0.0", - "parity-scale-codec", - "scale-info", - "sp-arithmetic 26.0.0", - "sp-core 34.0.0", - "sp-io 38.0.0", - "sp-runtime 39.0.2", -] - [[package]] name = "pallet-asset-conversion-tx-payment" version = "10.0.0" dependencies = [ - "frame-benchmarking 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", - "pallet-asset-conversion 10.0.0", - "pallet-assets 29.1.0", - "pallet-balances 28.0.0", - "pallet-transaction-payment 28.0.0", + "frame-benchmarking", + "frame-support", + "frame-system", + "pallet-asset-conversion", + "pallet-assets", + "pallet-balances", + "pallet-transaction-payment", "parity-scale-codec", "scale-info", "sp-core 28.0.0", @@ -11966,29 +10959,14 @@ dependencies = [ "sp-storage 19.0.0", ] -[[package]] -name = "pallet-asset-conversion-tx-payment" -version = "20.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ab66c4c22ac0f20e620a954ce7ba050118d6d8011e2d02df599309502064e98" -dependencies = [ - "frame-support 38.0.0", - "frame-system 38.0.0", - "pallet-asset-conversion 20.0.0", - "pallet-transaction-payment 38.0.0", - "parity-scale-codec", - "scale-info", - "sp-runtime 39.0.2", -] - [[package]] name = "pallet-asset-rate" version = "7.0.0" dependencies = [ - "frame-benchmarking 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", - "pallet-balances 28.0.0", + "frame-benchmarking", + "frame-support", + "frame-system", + "pallet-balances", "parity-scale-codec", "scale-info", "sp-core 28.0.0", @@ -11996,32 +10974,17 @@ dependencies = [ "sp-runtime 31.0.1", ] -[[package]] -name = "pallet-asset-rate" -version = "17.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71b2149aa741bc39466bbcc92d9d0ab6e9adcf39d2790443a735ad573b3191e7" -dependencies = [ - "frame-benchmarking 38.0.0", - "frame-support 38.0.0", - "frame-system 38.0.0", - "parity-scale-codec", - "scale-info", - "sp-core 34.0.0", - "sp-runtime 39.0.2", -] - [[package]] name = "pallet-asset-tx-payment" version = "28.0.0" dependencies = [ - "frame-benchmarking 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", - "pallet-assets 29.1.0", - "pallet-authorship 28.0.0", - "pallet-balances 28.0.0", - "pallet-transaction-payment 28.0.0", + "frame-benchmarking", + "frame-support", + "frame-system", + "pallet-assets", + "pallet-authorship", + "pallet-balances", + "pallet-transaction-payment", "parity-scale-codec", "scale-info", "serde", @@ -12032,34 +10995,16 @@ dependencies = [ "sp-storage 19.0.0", ] -[[package]] -name = "pallet-asset-tx-payment" -version = "38.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "406a486466d15acc48c99420191f96f1af018f3381fde829c467aba489030f18" -dependencies = [ - "frame-benchmarking 38.0.0", - "frame-support 38.0.0", - "frame-system 38.0.0", - "pallet-transaction-payment 38.0.0", - "parity-scale-codec", - "scale-info", - "serde", - "sp-core 34.0.0", - "sp-io 38.0.0", - "sp-runtime 39.0.2", -] - [[package]] name = "pallet-assets" version = "29.1.0" dependencies = [ - "frame-benchmarking 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", + "frame-benchmarking", + "frame-support", + "frame-system", "impl-trait-for-tuples", "log", - "pallet-balances 28.0.0", + "pallet-balances", "parity-scale-codec", "scale-info", "sp-core 28.0.0", @@ -12068,32 +11013,29 @@ dependencies = [ ] [[package]] -name = "pallet-assets" -version = "40.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f45f4eb6027fc34c4650e0ed6a7e57ed3335cc364be74b4531f714237676bcee" +name = "pallet-assets-freezer" +version = "0.1.0" dependencies = [ - "frame-benchmarking 38.0.0", - "frame-support 38.0.0", - "frame-system 38.0.0", - "impl-trait-for-tuples", + "frame-benchmarking", + "frame-support", + "frame-system", "log", + "pallet-assets", + "pallet-balances", "parity-scale-codec", "scale-info", - "sp-core 34.0.0", - "sp-runtime 39.0.2", + "sp-core 28.0.0", + "sp-io 30.0.0", + "sp-runtime 31.0.1", ] [[package]] -name = "pallet-assets-freezer" -version = "0.1.0" +name = "pallet-atomic-swap" +version = "28.0.0" dependencies = [ - "frame-benchmarking 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", - "log", - "pallet-assets 29.1.0", - "pallet-balances 28.0.0", + "frame-support", + "frame-system", + "pallet-balances", "parity-scale-codec", "scale-info", "sp-core 28.0.0", @@ -12102,189 +11044,77 @@ dependencies = [ ] [[package]] -name = "pallet-assets-freezer" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "127adc2250b89416b940850ce2175dab10a9297b503b1fcb05dc555bd9bd3207" +name = "pallet-aura" +version = "27.0.0" dependencies = [ - "frame-benchmarking 38.0.0", - "frame-support 38.0.0", - "frame-system 38.0.0", + "frame-support", + "frame-system", "log", - "pallet-assets 40.0.0", + "pallet-timestamp", "parity-scale-codec", "scale-info", - "sp-runtime 39.0.2", + "sp-application-crypto 30.0.0", + "sp-consensus-aura", + "sp-core 28.0.0", + "sp-io 30.0.0", + "sp-runtime 31.0.1", ] [[package]] -name = "pallet-atomic-swap" +name = "pallet-authority-discovery" version = "28.0.0" dependencies = [ - "pallet-balances 28.0.0", + "frame-support", + "frame-system", + "pallet-session", "parity-scale-codec", - "polkadot-sdk-frame 0.1.0", "scale-info", + "sp-application-crypto 30.0.0", + "sp-authority-discovery", + "sp-core 28.0.0", + "sp-io 30.0.0", + "sp-runtime 31.0.1", ] [[package]] -name = "pallet-atomic-swap" -version = "38.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15906a685adeabe6027e49c814a34066222dd6136187a8a79c213d0d739b6634" +name = "pallet-authorship" +version = "28.0.0" dependencies = [ - "frame-support 38.0.0", - "frame-system 38.0.0", + "frame-support", + "frame-system", + "impl-trait-for-tuples", "parity-scale-codec", "scale-info", - "sp-core 34.0.0", - "sp-io 38.0.0", - "sp-runtime 39.0.2", -] - -[[package]] -name = "pallet-aura" -version = "27.0.0" -dependencies = [ - "frame-support 28.0.0", - "frame-system 28.0.0", - "log", - "pallet-timestamp 27.0.0", - "parity-scale-codec", - "scale-info", - "sp-application-crypto 30.0.0", - "sp-consensus-aura 0.32.0", - "sp-core 28.0.0", - "sp-io 30.0.0", - "sp-runtime 31.0.1", -] - -[[package]] -name = "pallet-aura" -version = "37.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b31da6e794d655d1f9c4da6557a57399538d75905a7862a2ed3f7e5fb711d7e4" -dependencies = [ - "frame-support 38.0.0", - "frame-system 38.0.0", - "log", - "pallet-timestamp 37.0.0", - "parity-scale-codec", - "scale-info", - "sp-application-crypto 38.0.0", - "sp-consensus-aura 0.40.0", - "sp-runtime 39.0.2", -] - -[[package]] -name = "pallet-authority-discovery" -version = "28.0.0" -dependencies = [ - "frame-support 28.0.0", - "frame-system 28.0.0", - "pallet-session 28.0.0", - "parity-scale-codec", - "scale-info", - "sp-application-crypto 30.0.0", - "sp-authority-discovery 26.0.0", - "sp-core 28.0.0", - "sp-io 30.0.0", - "sp-runtime 31.0.1", -] - -[[package]] -name = "pallet-authority-discovery" -version = "38.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffb0208f0538d58dcb78ce1ff5e6e8641c5f37b23b20b05587e51da30ab13541" -dependencies = [ - "frame-support 38.0.0", - "frame-system 38.0.0", - "pallet-session 38.0.0", - "parity-scale-codec", - "scale-info", - "sp-application-crypto 38.0.0", - "sp-authority-discovery 34.0.0", - "sp-runtime 39.0.2", -] - -[[package]] -name = "pallet-authorship" -version = "28.0.0" -dependencies = [ - "frame-support 28.0.0", - "frame-system 28.0.0", - "impl-trait-for-tuples", - "parity-scale-codec", - "scale-info", - "sp-core 28.0.0", - "sp-io 30.0.0", - "sp-runtime 31.0.1", -] - -[[package]] -name = "pallet-authorship" -version = "38.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "625d47577cabbe1318ccec5d612e2379002d1b6af1ab6edcef3243c66ec246df" -dependencies = [ - "frame-support 38.0.0", - "frame-system 38.0.0", - "impl-trait-for-tuples", - "parity-scale-codec", - "scale-info", - "sp-runtime 39.0.2", + "sp-core 28.0.0", + "sp-io 30.0.0", + "sp-runtime 31.0.1", ] [[package]] name = "pallet-babe" version = "28.0.0" dependencies = [ - "frame-benchmarking 28.0.0", - "frame-election-provider-support 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", + "frame-benchmarking", + "frame-election-provider-support", + "frame-support", + "frame-system", "log", - "pallet-authorship 28.0.0", - "pallet-balances 28.0.0", - "pallet-offences 27.0.0", - "pallet-session 28.0.0", - "pallet-staking 28.0.0", + "pallet-authorship", + "pallet-balances", + "pallet-offences", + "pallet-session", + "pallet-staking", "pallet-staking-reward-curve", - "pallet-timestamp 27.0.0", + "pallet-timestamp", "parity-scale-codec", "scale-info", "sp-application-crypto 30.0.0", - "sp-consensus-babe 0.32.0", + "sp-consensus-babe", "sp-core 28.0.0", "sp-io 30.0.0", "sp-runtime 31.0.1", - "sp-session 27.0.0", - "sp-staking 26.0.0", -] - -[[package]] -name = "pallet-babe" -version = "38.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ee096c0def13832475b340d00121025e0225de29604d44bc6dfcaa294c995b4" -dependencies = [ - "frame-benchmarking 38.0.0", - "frame-support 38.0.0", - "frame-system 38.0.0", - "log", - "pallet-authorship 38.0.0", - "pallet-session 38.0.0", - "pallet-timestamp 37.0.0", - "parity-scale-codec", - "scale-info", - "sp-application-crypto 38.0.0", - "sp-consensus-babe 0.40.0", - "sp-core 34.0.0", - "sp-io 38.0.0", - "sp-runtime 39.0.2", - "sp-session 36.0.0", - "sp-staking 36.0.0", + "sp-session", + "sp-staking", ] [[package]] @@ -12293,12 +11123,12 @@ version = "27.0.0" dependencies = [ "aquamarine", "docify", - "frame-benchmarking 28.0.0", - "frame-election-provider-support 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", + "frame-benchmarking", + "frame-election-provider-support", + "frame-support", + "frame-system", "log", - "pallet-balances 28.0.0", + "pallet-balances", "parity-scale-codec", "scale-info", "sp-core 28.0.0", @@ -12307,35 +11137,13 @@ dependencies = [ "sp-tracing 16.0.0", ] -[[package]] -name = "pallet-bags-list" -version = "37.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fd23a6f94ba9c1e57c8a7f8a41327d132903a79c55c0c83f36cbae19946cf10" -dependencies = [ - "aquamarine", - "docify", - "frame-benchmarking 38.0.0", - "frame-election-provider-support 38.0.0", - "frame-support 38.0.0", - "frame-system 38.0.0", - "log", - "pallet-balances 39.0.0", - "parity-scale-codec", - "scale-info", - "sp-core 34.0.0", - "sp-io 38.0.0", - "sp-runtime 39.0.2", - "sp-tracing 17.0.1", -] - [[package]] name = "pallet-bags-list-fuzzer" version = "4.0.0-dev" dependencies = [ - "frame-election-provider-support 28.0.0", + "frame-election-provider-support", "honggfuzz", - "pallet-bags-list 27.0.0", + "pallet-bags-list", "rand", ] @@ -12343,13 +11151,13 @@ dependencies = [ name = "pallet-bags-list-remote-tests" version = "4.0.0-dev" dependencies = [ - "frame-election-provider-support 28.0.0", + "frame-election-provider-support", "frame-remote-externalities", - "frame-support 28.0.0", - "frame-system 28.0.0", + "frame-support", + "frame-system", "log", - "pallet-bags-list 27.0.0", - "pallet-staking 28.0.0", + "pallet-bags-list", + "pallet-staking", "sp-core 28.0.0", "sp-runtime 31.0.1", "sp-std 14.0.0", @@ -12362,11 +11170,11 @@ name = "pallet-balances" version = "28.0.0" dependencies = [ "docify", - "frame-benchmarking 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", + "frame-benchmarking", + "frame-support", + "frame-system", "log", - "pallet-transaction-payment 28.0.0", + "pallet-transaction-payment", "parity-scale-codec", "paste", "scale-info", @@ -12375,130 +11183,68 @@ dependencies = [ "sp-runtime 31.0.1", ] -[[package]] -name = "pallet-balances" -version = "39.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c6945b078919acb14d126490e4b0973a688568b30142476ca69c6df2bed27ad" -dependencies = [ - "docify", - "frame-benchmarking 38.0.0", - "frame-support 38.0.0", - "frame-system 38.0.0", - "log", - "parity-scale-codec", - "scale-info", - "sp-runtime 39.0.2", -] - [[package]] name = "pallet-beefy" version = "28.0.0" dependencies = [ - "frame-election-provider-support 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", + "frame-election-provider-support", + "frame-support", + "frame-system", "log", - "pallet-authorship 28.0.0", - "pallet-balances 28.0.0", - "pallet-offences 27.0.0", - "pallet-session 28.0.0", - "pallet-staking 28.0.0", + "pallet-authorship", + "pallet-balances", + "pallet-offences", + "pallet-session", + "pallet-staking", "pallet-staking-reward-curve", - "pallet-timestamp 27.0.0", + "pallet-timestamp", "parity-scale-codec", "scale-info", "serde", - "sp-consensus-beefy 13.0.0", + "sp-consensus-beefy", "sp-core 28.0.0", "sp-io 30.0.0", "sp-runtime 31.0.1", - "sp-session 27.0.0", - "sp-staking 26.0.0", + "sp-session", + "sp-staking", "sp-state-machine 0.35.0", ] -[[package]] -name = "pallet-beefy" -version = "39.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "014d177a3aba19ac144fc6b2b5eb94930b9874734b91fd014902b6706288bb5f" -dependencies = [ - "frame-support 38.0.0", - "frame-system 38.0.0", - "log", - "pallet-authorship 38.0.0", - "pallet-session 38.0.0", - "parity-scale-codec", - "scale-info", - "serde", - "sp-consensus-beefy 22.1.0", - "sp-runtime 39.0.2", - "sp-session 36.0.0", - "sp-staking 36.0.0", -] - [[package]] name = "pallet-beefy-mmr" version = "28.0.0" dependencies = [ "array-bytes", - "binary-merkle-tree 13.0.0", - "frame-benchmarking 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", + "binary-merkle-tree", + "frame-benchmarking", + "frame-support", + "frame-system", "log", - "pallet-beefy 28.0.0", - "pallet-mmr 27.0.0", - "pallet-session 28.0.0", + "pallet-beefy", + "pallet-mmr", + "pallet-session", "parity-scale-codec", "scale-info", "serde", "sp-api 26.0.0", - "sp-consensus-beefy 13.0.0", + "sp-consensus-beefy", "sp-core 28.0.0", "sp-io 30.0.0", "sp-runtime 31.0.1", - "sp-staking 26.0.0", + "sp-staking", "sp-state-machine 0.35.0", ] -[[package]] -name = "pallet-beefy-mmr" -version = "39.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c64f536e7f04cf3a0a17fdf20870ddb3d63a7690419c40f75cfd2f72b6e6d22" -dependencies = [ - "array-bytes", - "binary-merkle-tree 15.0.1", - "frame-benchmarking 38.0.0", - "frame-support 38.0.0", - "frame-system 38.0.0", - "log", - "pallet-beefy 39.0.0", - "pallet-mmr 38.0.0", - "pallet-session 38.0.0", - "parity-scale-codec", - "scale-info", - "serde", - "sp-api 34.0.0", - "sp-consensus-beefy 22.1.0", - "sp-core 34.0.0", - "sp-io 38.0.0", - "sp-runtime 39.0.2", - "sp-state-machine 0.43.0", -] - [[package]] name = "pallet-bounties" version = "27.0.0" dependencies = [ - "frame-benchmarking 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", + "frame-benchmarking", + "frame-support", + "frame-system", "log", - "pallet-balances 28.0.0", - "pallet-treasury 27.0.0", + "pallet-balances", + "pallet-treasury", "parity-scale-codec", "scale-info", "sp-core 28.0.0", @@ -12506,42 +11252,24 @@ dependencies = [ "sp-runtime 31.0.1", ] -[[package]] -name = "pallet-bounties" -version = "37.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1163f9cd8bbc47ec0c6900a3ca67689d8d7b40bedfa6aa22b1b3c6027b1090e" -dependencies = [ - "frame-benchmarking 38.0.0", - "frame-support 38.0.0", - "frame-system 38.0.0", - "log", - "pallet-treasury 37.0.0", - "parity-scale-codec", - "scale-info", - "sp-core 34.0.0", - "sp-io 38.0.0", - "sp-runtime 39.0.2", -] - [[package]] name = "pallet-bridge-beefy" version = "0.1.0" dependencies = [ "bp-beefy", - "bp-runtime 0.7.0", - "bp-test-utils 0.7.0", + "bp-runtime", + "bp-test-utils", "ckb-merkle-mountain-range", - "frame-support 28.0.0", - "frame-system 28.0.0", + "frame-support", + "frame-system", "log", - "pallet-beefy-mmr 28.0.0", - "pallet-mmr 27.0.0", + "pallet-beefy-mmr", + "pallet-mmr", "parity-scale-codec", "rand", "scale-info", "serde", - "sp-consensus-beefy 13.0.0", + "sp-consensus-beefy", "sp-core 28.0.0", "sp-io 30.0.0", "sp-runtime 31.0.1", @@ -12552,56 +11280,36 @@ dependencies = [ name = "pallet-bridge-grandpa" version = "0.7.0" dependencies = [ - "bp-header-chain 0.7.0", - "bp-runtime 0.7.0", - "bp-test-utils 0.7.0", - "frame-benchmarking 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", + "bp-header-chain", + "bp-runtime", + "bp-test-utils", + "frame-benchmarking", + "frame-support", + "frame-system", "log", "parity-scale-codec", "scale-info", - "sp-consensus-grandpa 13.0.0", + "sp-consensus-grandpa", "sp-core 28.0.0", "sp-io 30.0.0", "sp-runtime 31.0.1", "sp-std 14.0.0", ] -[[package]] -name = "pallet-bridge-grandpa" -version = "0.18.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d825fbed9fb68bc5d344311653dc0f69caeabe647365abf79a539310b2245f6" -dependencies = [ - "bp-header-chain 0.18.1", - "bp-runtime 0.18.0", - "bp-test-utils 0.18.0", - "frame-benchmarking 38.0.0", - "frame-support 38.0.0", - "frame-system 38.0.0", - "log", - "parity-scale-codec", - "scale-info", - "sp-consensus-grandpa 21.0.0", - "sp-runtime 39.0.2", - "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "pallet-bridge-messages" version = "0.7.0" dependencies = [ - "bp-header-chain 0.7.0", - "bp-messages 0.7.0", - "bp-runtime 0.7.0", - "bp-test-utils 0.7.0", - "frame-benchmarking 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", + "bp-header-chain", + "bp-messages", + "bp-runtime", + "bp-test-utils", + "frame-benchmarking", + "frame-support", + "frame-system", "log", - "pallet-balances 28.0.0", - "pallet-bridge-grandpa 0.7.0", + "pallet-balances", + "pallet-bridge-grandpa", "parity-scale-codec", "scale-info", "sp-core 28.0.0", @@ -12611,40 +11319,20 @@ dependencies = [ "sp-trie 29.0.0", ] -[[package]] -name = "pallet-bridge-messages" -version = "0.18.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1decdc9fb885e46eb17f850aa14f8cf39e17f31574aa6a5fa1a9e603cc526a2" -dependencies = [ - "bp-header-chain 0.18.1", - "bp-messages 0.18.0", - "bp-runtime 0.18.0", - "frame-benchmarking 38.0.0", - "frame-support 38.0.0", - "frame-system 38.0.0", - "log", - "parity-scale-codec", - "scale-info", - "sp-runtime 39.0.2", - "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sp-trie 37.0.0", -] - [[package]] name = "pallet-bridge-parachains" version = "0.7.0" dependencies = [ - "bp-header-chain 0.7.0", - "bp-parachains 0.7.0", - "bp-polkadot-core 0.7.0", - "bp-runtime 0.7.0", - "bp-test-utils 0.7.0", - "frame-benchmarking 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", + "bp-header-chain", + "bp-parachains", + "bp-polkadot-core", + "bp-runtime", + "bp-test-utils", + "frame-benchmarking", + "frame-support", + "frame-system", "log", - "pallet-bridge-grandpa 0.7.0", + "pallet-bridge-grandpa", "parity-scale-codec", "scale-info", "sp-core 28.0.0", @@ -12653,48 +11341,27 @@ dependencies = [ "sp-std 14.0.0", ] -[[package]] -name = "pallet-bridge-parachains" -version = "0.18.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41450a8d214f20eaff57aeca8e647b20c0df7d66871ee2262609b90824bd4cca" -dependencies = [ - "bp-header-chain 0.18.1", - "bp-parachains 0.18.0", - "bp-polkadot-core 0.18.0", - "bp-runtime 0.18.0", - "frame-benchmarking 38.0.0", - "frame-support 38.0.0", - "frame-system 38.0.0", - "log", - "pallet-bridge-grandpa 0.18.0", - "parity-scale-codec", - "scale-info", - "sp-runtime 39.0.2", - "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "pallet-bridge-relayers" version = "0.7.0" dependencies = [ - "bp-header-chain 0.7.0", - "bp-messages 0.7.0", - "bp-parachains 0.7.0", - "bp-polkadot-core 0.7.0", - "bp-relayers 0.7.0", - "bp-runtime 0.7.0", - "bp-test-utils 0.7.0", - "frame-benchmarking 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", - "log", - "pallet-balances 28.0.0", - "pallet-bridge-grandpa 0.7.0", - "pallet-bridge-messages 0.7.0", - "pallet-bridge-parachains 0.7.0", - "pallet-transaction-payment 28.0.0", - "pallet-utility 28.0.0", + "bp-header-chain", + "bp-messages", + "bp-parachains", + "bp-polkadot-core", + "bp-relayers", + "bp-runtime", + "bp-test-utils", + "frame-benchmarking", + "frame-support", + "frame-system", + "log", + "pallet-balances", + "pallet-bridge-grandpa", + "pallet-bridge-messages", + "pallet-bridge-parachains", + "pallet-transaction-payment", + "pallet-utility", "parity-scale-codec", "scale-info", "sp-arithmetic 23.0.0", @@ -12704,39 +11371,14 @@ dependencies = [ "sp-std 14.0.0", ] -[[package]] -name = "pallet-bridge-relayers" -version = "0.18.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2faead05455a965a0a0ec69ffa779933479b599e40bda809c0aa1efa72a39281" -dependencies = [ - "bp-header-chain 0.18.1", - "bp-messages 0.18.0", - "bp-relayers 0.18.0", - "bp-runtime 0.18.0", - "frame-benchmarking 38.0.0", - "frame-support 38.0.0", - "frame-system 38.0.0", - "log", - "pallet-bridge-grandpa 0.18.0", - "pallet-bridge-messages 0.18.0", - "pallet-bridge-parachains 0.18.0", - "pallet-transaction-payment 38.0.0", - "parity-scale-codec", - "scale-info", - "sp-arithmetic 26.0.0", - "sp-runtime 39.0.2", - "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "pallet-broker" version = "0.6.0" dependencies = [ "bitvec", - "frame-benchmarking 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", + "frame-benchmarking", + "frame-support", + "frame-system", "log", "parity-scale-codec", "pretty_assertions", @@ -12749,36 +11391,17 @@ dependencies = [ "sp-tracing 16.0.0", ] -[[package]] -name = "pallet-broker" -version = "0.17.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3043c90106d88cb93fcf0d9b6d19418f11f44cc2b11873414aec3b46044a24ea" -dependencies = [ - "bitvec", - "frame-benchmarking 38.0.0", - "frame-support 38.0.0", - "frame-system 38.0.0", - "log", - "parity-scale-codec", - "scale-info", - "sp-api 34.0.0", - "sp-arithmetic 26.0.0", - "sp-core 34.0.0", - "sp-runtime 39.0.2", -] - [[package]] name = "pallet-child-bounties" version = "27.0.0" dependencies = [ - "frame-benchmarking 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", + "frame-benchmarking", + "frame-support", + "frame-system", "log", - "pallet-balances 28.0.0", - "pallet-bounties 27.0.0", - "pallet-treasury 27.0.0", + "pallet-balances", + "pallet-bounties", + "pallet-treasury", "parity-scale-codec", "scale-info", "sp-core 28.0.0", @@ -12786,110 +11409,54 @@ dependencies = [ "sp-runtime 31.0.1", ] -[[package]] -name = "pallet-child-bounties" -version = "37.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7f3bc38ae6584b5f57e4de3e49e5184bfc0f20692829530ae1465ffe04e09e7" -dependencies = [ - "frame-benchmarking 38.0.0", - "frame-support 38.0.0", - "frame-system 38.0.0", - "log", - "pallet-bounties 37.0.0", - "pallet-treasury 37.0.0", - "parity-scale-codec", - "scale-info", - "sp-core 34.0.0", - "sp-io 38.0.0", - "sp-runtime 39.0.2", -] - [[package]] name = "pallet-collator-selection" version = "9.0.0" dependencies = [ - "frame-benchmarking 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", + "frame-benchmarking", + "frame-support", + "frame-system", "log", - "pallet-aura 27.0.0", - "pallet-authorship 28.0.0", - "pallet-balances 28.0.0", - "pallet-session 28.0.0", - "pallet-timestamp 27.0.0", - "parity-scale-codec", - "rand", - "scale-info", - "sp-consensus-aura 0.32.0", - "sp-core 28.0.0", - "sp-io 30.0.0", - "sp-runtime 31.0.1", - "sp-staking 26.0.0", - "sp-tracing 16.0.0", -] - -[[package]] -name = "pallet-collator-selection" -version = "19.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "658798d70c9054165169f6a6a96cfa9d6a5e7d24a524bc19825bf17fcbc5cc5a" -dependencies = [ - "frame-benchmarking 38.0.0", - "frame-support 38.0.0", - "frame-system 38.0.0", - "log", - "pallet-authorship 38.0.0", - "pallet-balances 39.0.0", - "pallet-session 38.0.0", - "parity-scale-codec", - "rand", - "scale-info", - "sp-runtime 39.0.2", - "sp-staking 36.0.0", -] - -[[package]] -name = "pallet-collective" -version = "28.0.0" -dependencies = [ - "docify", - "frame-benchmarking 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", - "log", - "pallet-balances 28.0.0", + "pallet-aura", + "pallet-authorship", + "pallet-balances", + "pallet-session", + "pallet-timestamp", "parity-scale-codec", + "rand", "scale-info", + "sp-consensus-aura", "sp-core 28.0.0", "sp-io 30.0.0", "sp-runtime 31.0.1", + "sp-staking", + "sp-tracing 16.0.0", ] [[package]] name = "pallet-collective" -version = "38.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e149f1aefd444c9a1da6ec5a94bc8a7671d7a33078f85dd19ae5b06e3438e60" +version = "28.0.0" dependencies = [ - "frame-benchmarking 38.0.0", - "frame-support 38.0.0", - "frame-system 38.0.0", + "docify", + "frame-benchmarking", + "frame-support", + "frame-system", "log", + "pallet-balances", "parity-scale-codec", "scale-info", - "sp-core 34.0.0", - "sp-io 38.0.0", - "sp-runtime 39.0.2", + "sp-core 28.0.0", + "sp-io 30.0.0", + "sp-runtime 31.0.1", ] [[package]] name = "pallet-collective-content" version = "0.6.0" dependencies = [ - "frame-benchmarking 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", + "frame-benchmarking", + "frame-support", + "frame-system", "parity-scale-codec", "scale-info", "sp-core 28.0.0", @@ -12897,21 +11464,6 @@ dependencies = [ "sp-runtime 31.0.1", ] -[[package]] -name = "pallet-collective-content" -version = "0.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38a6a5cbe781d9c711be74855ba32ef138f3779d6c54240c08e6d1b4bbba4d1d" -dependencies = [ - "frame-benchmarking 38.0.0", - "frame-support 38.0.0", - "frame-system 38.0.0", - "parity-scale-codec", - "scale-info", - "sp-core 34.0.0", - "sp-runtime 39.0.2", -] - [[package]] name = "pallet-contracts" version = "27.0.0" @@ -12920,21 +11472,21 @@ dependencies = [ "assert_matches", "bitflags 1.3.2", "environmental", - "frame-benchmarking 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", + "frame-benchmarking", + "frame-support", + "frame-system", "impl-trait-for-tuples", "log", - "pallet-assets 29.1.0", - "pallet-balances 28.0.0", + "pallet-assets", + "pallet-balances", "pallet-contracts-fixtures", - "pallet-contracts-proc-macro 18.0.0", - "pallet-contracts-uapi 5.0.0", - "pallet-insecure-randomness-collective-flip 16.0.0", - "pallet-message-queue 31.0.0", - "pallet-proxy 28.0.0", - "pallet-timestamp 27.0.0", - "pallet-utility 28.0.0", + "pallet-contracts-proc-macro", + "pallet-contracts-uapi", + "pallet-insecure-randomness-collective-flip", + "pallet-message-queue", + "pallet-proxy", + "pallet-timestamp", + "pallet-utility", "parity-scale-codec", "paste", "pretty_assertions", @@ -12950,52 +11502,19 @@ dependencies = [ "sp-runtime 31.0.1", "sp-std 14.0.0", "sp-tracing 16.0.0", - "staging-xcm 7.0.0", - "staging-xcm-builder 7.0.0", + "staging-xcm", + "staging-xcm-builder", "wasm-instrument", "wasmi 0.32.3", "wat", ] -[[package]] -name = "pallet-contracts" -version = "38.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5df77077745d891c822b4275f273f336077a97e69e62a30134776aa721c96fee" -dependencies = [ - "bitflags 1.3.2", - "environmental", - "frame-benchmarking 38.0.0", - "frame-support 38.0.0", - "frame-system 38.0.0", - "impl-trait-for-tuples", - "log", - "pallet-balances 39.0.0", - "pallet-contracts-proc-macro 23.0.1", - "pallet-contracts-uapi 12.0.0", - "parity-scale-codec", - "paste", - "rand", - "scale-info", - "serde", - "smallvec", - "sp-api 34.0.0", - "sp-core 34.0.0", - "sp-io 38.0.0", - "sp-runtime 39.0.2", - "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "staging-xcm 14.2.0", - "staging-xcm-builder 17.0.1", - "wasm-instrument", - "wasmi 0.32.3", -] - [[package]] name = "pallet-contracts-fixtures" version = "1.0.0" dependencies = [ "anyhow", - "frame-system 28.0.0", + "frame-system", "parity-wasm", "sp-runtime 31.0.1", "tempfile", @@ -13008,24 +11527,24 @@ name = "pallet-contracts-mock-network" version = "3.0.0" dependencies = [ "assert_matches", - "frame-support 28.0.0", - "frame-system 28.0.0", - "pallet-assets 29.1.0", - "pallet-balances 28.0.0", - "pallet-contracts 27.0.0", + "frame-support", + "frame-system", + "pallet-assets", + "pallet-balances", + "pallet-contracts", "pallet-contracts-fixtures", - "pallet-contracts-proc-macro 18.0.0", - "pallet-contracts-uapi 5.0.0", - "pallet-insecure-randomness-collective-flip 16.0.0", - "pallet-message-queue 31.0.0", - "pallet-proxy 28.0.0", - "pallet-timestamp 27.0.0", - "pallet-utility 28.0.0", - "pallet-xcm 7.0.0", - "parity-scale-codec", - "polkadot-parachain-primitives 6.0.0", - "polkadot-primitives 7.0.0", - "polkadot-runtime-parachains 7.0.0", + "pallet-contracts-proc-macro", + "pallet-contracts-uapi", + "pallet-insecure-randomness-collective-flip", + "pallet-message-queue", + "pallet-proxy", + "pallet-timestamp", + "pallet-utility", + "pallet-xcm", + "parity-scale-codec", + "polkadot-parachain-primitives", + "polkadot-primitives", + "polkadot-runtime-parachains", "pretty_assertions", "scale-info", "sp-api 26.0.0", @@ -13034,46 +11553,10 @@ dependencies = [ "sp-keystore 0.34.0", "sp-runtime 31.0.1", "sp-tracing 16.0.0", - "staging-xcm 7.0.0", - "staging-xcm-builder 7.0.0", - "staging-xcm-executor 7.0.0", - "xcm-simulator 7.0.0", -] - -[[package]] -name = "pallet-contracts-mock-network" -version = "14.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "309666537ed001c61a99f59fa7b98680f4a6e4e361ed3bc64f7b0237da3e3e06" -dependencies = [ - "frame-support 38.0.0", - "frame-system 38.0.0", - "pallet-assets 40.0.0", - "pallet-balances 39.0.0", - "pallet-contracts 38.0.0", - "pallet-contracts-proc-macro 23.0.1", - "pallet-contracts-uapi 12.0.0", - "pallet-insecure-randomness-collective-flip 26.0.0", - "pallet-message-queue 41.0.1", - "pallet-proxy 38.0.0", - "pallet-timestamp 37.0.0", - "pallet-utility 38.0.0", - "pallet-xcm 17.0.0", - "parity-scale-codec", - "polkadot-parachain-primitives 14.0.0", - "polkadot-primitives 16.0.0", - "polkadot-runtime-parachains 17.0.1", - "scale-info", - "sp-api 34.0.0", - "sp-core 34.0.0", - "sp-io 38.0.0", - "sp-keystore 0.40.0", - "sp-runtime 39.0.2", - "sp-tracing 17.0.1", - "staging-xcm 14.2.0", - "staging-xcm-builder 17.0.1", - "staging-xcm-executor 17.0.0", - "xcm-simulator 17.0.0", + "staging-xcm", + "staging-xcm-builder", + "staging-xcm-executor", + "xcm-simulator", ] [[package]] @@ -13085,17 +11568,6 @@ dependencies = [ "syn 2.0.87", ] -[[package]] -name = "pallet-contracts-proc-macro" -version = "23.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94226cbd48516b7c310eb5dae8d50798c1ce73a7421dc0977c55b7fc2237a283" -dependencies = [ - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.87", -] - [[package]] name = "pallet-contracts-uapi" version = "5.0.0" @@ -13106,29 +11578,16 @@ dependencies = [ "scale-info", ] -[[package]] -name = "pallet-contracts-uapi" -version = "12.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16f74b000590c33fadea48585d3ae3f4b7867e99f0a524c444d5779f36b9a1b6" -dependencies = [ - "bitflags 1.3.2", - "parity-scale-codec", - "paste", - "polkavm-derive 0.9.1", - "scale-info", -] - [[package]] name = "pallet-conviction-voting" version = "28.0.0" dependencies = [ "assert_matches", - "frame-benchmarking 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", - "pallet-balances 28.0.0", - "pallet-scheduler 29.0.0", + "frame-benchmarking", + "frame-support", + "frame-system", + "pallet-balances", + "pallet-scheduler", "parity-scale-codec", "scale-info", "serde", @@ -13137,32 +11596,15 @@ dependencies = [ "sp-runtime 31.0.1", ] -[[package]] -name = "pallet-conviction-voting" -version = "38.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "999c242491b74395b8c5409ef644e782fe426d87ae36ad92240ffbf21ff0a76e" -dependencies = [ - "assert_matches", - "frame-benchmarking 38.0.0", - "frame-support 38.0.0", - "frame-system 38.0.0", - "parity-scale-codec", - "scale-info", - "serde", - "sp-io 38.0.0", - "sp-runtime 39.0.2", -] - [[package]] name = "pallet-core-fellowship" version = "12.0.0" dependencies = [ - "frame-benchmarking 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", + "frame-benchmarking", + "frame-support", + "frame-system", "log", - "pallet-ranked-collective 28.0.0", + "pallet-ranked-collective", "parity-scale-codec", "scale-info", "sp-arithmetic 23.0.0", @@ -13171,31 +11613,12 @@ dependencies = [ "sp-runtime 31.0.1", ] -[[package]] -name = "pallet-core-fellowship" -version = "22.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d063b41df454bd128d6fefd5800af8a71ac383c9dd6f20096832537efc110a8a" -dependencies = [ - "frame-benchmarking 38.0.0", - "frame-support 38.0.0", - "frame-system 38.0.0", - "log", - "pallet-ranked-collective 38.0.0", - "parity-scale-codec", - "scale-info", - "sp-arithmetic 26.0.0", - "sp-core 34.0.0", - "sp-io 38.0.0", - "sp-runtime 39.0.2", -] - [[package]] name = "pallet-default-config-example" version = "10.0.0" dependencies = [ - "frame-support 28.0.0", - "frame-system 28.0.0", + "frame-support", + "frame-system", "log", "parity-scale-codec", "scale-info", @@ -13207,52 +11630,36 @@ dependencies = [ name = "pallet-delegated-staking" version = "1.0.0" dependencies = [ - "frame-election-provider-support 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", + "frame-election-provider-support", + "frame-support", + "frame-system", "log", - "pallet-balances 28.0.0", - "pallet-nomination-pools 25.0.0", - "pallet-staking 28.0.0", + "pallet-balances", + "pallet-nomination-pools", + "pallet-staking", "pallet-staking-reward-curve", - "pallet-timestamp 27.0.0", + "pallet-timestamp", "parity-scale-codec", "scale-info", "sp-core 28.0.0", "sp-io 30.0.0", "sp-runtime 31.0.1", - "sp-staking 26.0.0", + "sp-staking", "sp-tracing 16.0.0", "substrate-test-utils", ] -[[package]] -name = "pallet-delegated-staking" -version = "5.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "117f003a97f980514c6db25a50c22aaec2a9ccb5664b3cb32f52fb990e0b0c12" -dependencies = [ - "frame-support 38.0.0", - "frame-system 38.0.0", - "log", - "parity-scale-codec", - "scale-info", - "sp-io 38.0.0", - "sp-runtime 39.0.2", - "sp-staking 36.0.0", -] - [[package]] name = "pallet-democracy" version = "28.0.0" dependencies = [ - "frame-benchmarking 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", + "frame-benchmarking", + "frame-support", + "frame-system", "log", - "pallet-balances 28.0.0", - "pallet-preimage 28.0.0", - "pallet-scheduler 29.0.0", + "pallet-balances", + "pallet-preimage", + "pallet-scheduler", "parity-scale-codec", "scale-info", "serde", @@ -13261,32 +11668,14 @@ dependencies = [ "sp-runtime 31.0.1", ] -[[package]] -name = "pallet-democracy" -version = "38.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6d1dc655f50b7c65bb2fb14086608ba11af02ef2936546f7a67db980ec1f133" -dependencies = [ - "frame-benchmarking 38.0.0", - "frame-support 38.0.0", - "frame-system 38.0.0", - "log", - "parity-scale-codec", - "scale-info", - "serde", - "sp-core 34.0.0", - "sp-io 38.0.0", - "sp-runtime 39.0.2", -] - [[package]] name = "pallet-dev-mode" version = "10.0.0" dependencies = [ - "frame-support 28.0.0", - "frame-system 28.0.0", + "frame-support", + "frame-system", "log", - "pallet-balances 28.0.0", + "pallet-balances", "parity-scale-codec", "scale-info", "sp-core 28.0.0", @@ -13294,45 +11683,29 @@ dependencies = [ "sp-runtime 31.0.1", ] -[[package]] -name = "pallet-dev-mode" -version = "20.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae1d8050c09c5e003d502c1addc7fdfbde21a854bd57787e94447078032710c8" -dependencies = [ - "frame-support 38.0.0", - "frame-system 38.0.0", - "log", - "pallet-balances 39.0.0", - "parity-scale-codec", - "scale-info", - "sp-io 38.0.0", - "sp-runtime 39.0.2", -] - [[package]] name = "pallet-election-provider-e2e-test" version = "1.0.0" dependencies = [ - "frame-election-provider-support 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", + "frame-election-provider-support", + "frame-support", + "frame-system", "log", - "pallet-bags-list 27.0.0", - "pallet-balances 28.0.0", - "pallet-election-provider-multi-phase 27.0.0", - "pallet-nomination-pools 25.0.0", - "pallet-session 28.0.0", - "pallet-staking 28.0.0", - "pallet-timestamp 27.0.0", + "pallet-bags-list", + "pallet-balances", + "pallet-election-provider-multi-phase", + "pallet-nomination-pools", + "pallet-session", + "pallet-staking", + "pallet-timestamp", "parity-scale-codec", "parking_lot 0.12.3", "scale-info", "sp-core 28.0.0", "sp-io 30.0.0", - "sp-npos-elections 26.0.0", + "sp-npos-elections", "sp-runtime 31.0.1", - "sp-staking 26.0.0", + "sp-staking", "sp-std 14.0.0", "sp-tracing 16.0.0", ] @@ -13341,13 +11714,13 @@ dependencies = [ name = "pallet-election-provider-multi-phase" version = "27.0.0" dependencies = [ - "frame-benchmarking 28.0.0", - "frame-election-provider-support 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", + "frame-benchmarking", + "frame-election-provider-support", + "frame-support", + "frame-system", "log", - "pallet-balances 28.0.0", - "pallet-election-provider-support-benchmarking 27.0.0", + "pallet-balances", + "pallet-election-provider-support-benchmarking", "parity-scale-codec", "parking_lot 0.12.3", "rand", @@ -13355,115 +11728,59 @@ dependencies = [ "sp-arithmetic 23.0.0", "sp-core 28.0.0", "sp-io 30.0.0", - "sp-npos-elections 26.0.0", + "sp-npos-elections", "sp-runtime 31.0.1", "sp-tracing 16.0.0", "strum 0.26.3", ] -[[package]] -name = "pallet-election-provider-multi-phase" -version = "37.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62f9ad5ae0c13ba3727183dadf1825b6b7b0b0598ed5c366f8697e13fd540f7d" -dependencies = [ - "frame-benchmarking 38.0.0", - "frame-election-provider-support 38.0.0", - "frame-support 38.0.0", - "frame-system 38.0.0", - "log", - "pallet-election-provider-support-benchmarking 37.0.0", - "parity-scale-codec", - "rand", - "scale-info", - "sp-arithmetic 26.0.0", - "sp-core 34.0.0", - "sp-io 38.0.0", - "sp-npos-elections 34.0.0", - "sp-runtime 39.0.2", - "strum 0.26.3", -] - [[package]] name = "pallet-election-provider-support-benchmarking" version = "27.0.0" dependencies = [ - "frame-benchmarking 28.0.0", - "frame-election-provider-support 28.0.0", - "frame-system 28.0.0", + "frame-benchmarking", + "frame-election-provider-support", + "frame-system", "parity-scale-codec", - "sp-npos-elections 26.0.0", + "sp-npos-elections", "sp-runtime 31.0.1", ] -[[package]] -name = "pallet-election-provider-support-benchmarking" -version = "37.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4111d0d27545c260c9dd0d6fc504961db59c1ec4b42e1bcdc28ebd478895c22" -dependencies = [ - "frame-benchmarking 38.0.0", - "frame-election-provider-support 38.0.0", - "frame-system 38.0.0", - "parity-scale-codec", - "sp-npos-elections 34.0.0", - "sp-runtime 39.0.2", -] - [[package]] name = "pallet-elections-phragmen" version = "29.0.0" dependencies = [ - "frame-benchmarking 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", + "frame-benchmarking", + "frame-support", + "frame-system", "log", - "pallet-balances 28.0.0", + "pallet-balances", "parity-scale-codec", "scale-info", "sp-core 28.0.0", "sp-io 30.0.0", - "sp-npos-elections 26.0.0", + "sp-npos-elections", "sp-runtime 31.0.1", - "sp-staking 26.0.0", + "sp-staking", "sp-tracing 16.0.0", "substrate-test-utils", ] -[[package]] -name = "pallet-elections-phragmen" -version = "39.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "705c66d6c231340c6d085a0df0319a6ce42a150f248171e88e389ab1e3ce20f5" -dependencies = [ - "frame-benchmarking 38.0.0", - "frame-support 38.0.0", - "frame-system 38.0.0", - "log", - "parity-scale-codec", - "scale-info", - "sp-core 34.0.0", - "sp-io 38.0.0", - "sp-npos-elections 34.0.0", - "sp-runtime 39.0.2", - "sp-staking 36.0.0", -] - [[package]] name = "pallet-example-authorization-tx-extension" version = "1.0.0" dependencies = [ "docify", - "frame-benchmarking 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", + "frame-benchmarking", + "frame-support", + "frame-system", "log", "pallet-verify-signature", "parity-scale-codec", "scale-info", "sp-core 28.0.0", "sp-io 30.0.0", - "sp-keyring 31.0.0", + "sp-keyring", "sp-runtime 31.0.1", ] @@ -13471,11 +11788,11 @@ dependencies = [ name = "pallet-example-basic" version = "27.0.0" dependencies = [ - "frame-benchmarking 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", + "frame-benchmarking", + "frame-support", + "frame-system", "log", - "pallet-balances 28.0.0", + "pallet-balances", "parity-scale-codec", "scale-info", "sp-core 28.0.0", @@ -13488,7 +11805,7 @@ name = "pallet-example-frame-crate" version = "0.0.1" dependencies = [ "parity-scale-codec", - "polkadot-sdk-frame 0.1.0", + "polkadot-sdk-frame", "scale-info", ] @@ -13496,11 +11813,11 @@ dependencies = [ name = "pallet-example-kitchensink" version = "4.0.0-dev" dependencies = [ - "frame-benchmarking 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", + "frame-benchmarking", + "frame-support", + "frame-system", "log", - "pallet-balances 28.0.0", + "pallet-balances", "parity-scale-codec", "scale-info", "sp-core 28.0.0", @@ -13512,11 +11829,11 @@ dependencies = [ name = "pallet-example-mbm" version = "0.1.0" dependencies = [ - "frame-benchmarking 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", + "frame-benchmarking", + "frame-support", + "frame-system", "log", - "pallet-migrations 1.0.0", + "pallet-migrations", "parity-scale-codec", "scale-info", "sp-io 30.0.0", @@ -13526,8 +11843,8 @@ dependencies = [ name = "pallet-example-offchain-worker" version = "28.0.0" dependencies = [ - "frame-support 28.0.0", - "frame-system 28.0.0", + "frame-support", + "frame-system", "lite-json", "log", "parity-scale-codec", @@ -13543,12 +11860,12 @@ name = "pallet-example-single-block-migrations" version = "0.0.1" dependencies = [ "docify", - "frame-executive 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", - "frame-try-runtime 0.34.0", + "frame-executive", + "frame-support", + "frame-system", + "frame-try-runtime", "log", - "pallet-balances 28.0.0", + "pallet-balances", "parity-scale-codec", "scale-info", "sp-core 28.0.0", @@ -13561,9 +11878,9 @@ dependencies = [ name = "pallet-example-split" version = "10.0.0" dependencies = [ - "frame-benchmarking 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", + "frame-benchmarking", + "frame-support", + "frame-system", "log", "parity-scale-codec", "scale-info", @@ -13575,9 +11892,9 @@ dependencies = [ name = "pallet-example-tasks" version = "1.0.0" dependencies = [ - "frame-benchmarking 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", + "frame-benchmarking", + "frame-support", + "frame-system", "log", "parity-scale-codec", "scale-info", @@ -13591,7 +11908,7 @@ name = "pallet-examples" version = "4.0.0-dev" dependencies = [ "pallet-default-config-example", - "pallet-dev-mode 10.0.0", + "pallet-dev-mode", "pallet-example-authorization-tx-extension", "pallet-example-basic", "pallet-example-frame-crate", @@ -13607,79 +11924,41 @@ name = "pallet-fast-unstake" version = "27.0.0" dependencies = [ "docify", - "frame-benchmarking 28.0.0", - "frame-election-provider-support 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", + "frame-benchmarking", + "frame-election-provider-support", + "frame-support", + "frame-system", "log", - "pallet-balances 28.0.0", - "pallet-staking 28.0.0", + "pallet-balances", + "pallet-staking", "pallet-staking-reward-curve", - "pallet-timestamp 27.0.0", + "pallet-timestamp", "parity-scale-codec", "scale-info", "sp-core 28.0.0", "sp-io 30.0.0", "sp-runtime 31.0.1", - "sp-staking 26.0.0", + "sp-staking", "sp-tracing 16.0.0", "substrate-test-utils", ] -[[package]] -name = "pallet-fast-unstake" -version = "37.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0ee60e8ef10b3936f2700bd61fa45dcc190c61124becc63bed787addcfa0d20" -dependencies = [ - "docify", - "frame-benchmarking 38.0.0", - "frame-election-provider-support 38.0.0", - "frame-support 38.0.0", - "frame-system 38.0.0", - "log", - "parity-scale-codec", - "scale-info", - "sp-io 38.0.0", - "sp-runtime 39.0.2", - "sp-staking 36.0.0", -] - -[[package]] -name = "pallet-glutton" -version = "14.0.0" -dependencies = [ - "blake2 0.10.6", - "frame-benchmarking 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", - "log", - "pallet-balances 28.0.0", - "parity-scale-codec", - "scale-info", - "sp-core 28.0.0", - "sp-inherents 26.0.0", - "sp-io 30.0.0", - "sp-runtime 31.0.1", -] - [[package]] name = "pallet-glutton" -version = "24.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1c79ab340890f6ab088a638c350ac1173a1b2a79c18004787523032025582b4" +version = "14.0.0" dependencies = [ "blake2 0.10.6", - "frame-benchmarking 38.0.0", - "frame-support 38.0.0", - "frame-system 38.0.0", + "frame-benchmarking", + "frame-support", + "frame-system", "log", + "pallet-balances", "parity-scale-codec", "scale-info", - "sp-core 34.0.0", - "sp-inherents 34.0.0", - "sp-io 38.0.0", - "sp-runtime 39.0.2", + "sp-core 28.0.0", + "sp-inherents", + "sp-io 30.0.0", + "sp-runtime 31.0.1", ] [[package]] @@ -13687,51 +11966,28 @@ name = "pallet-grandpa" version = "28.0.0" dependencies = [ "finality-grandpa", - "frame-benchmarking 28.0.0", - "frame-election-provider-support 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", - "log", - "pallet-authorship 28.0.0", - "pallet-balances 28.0.0", - "pallet-offences 27.0.0", - "pallet-session 28.0.0", - "pallet-staking 28.0.0", + "frame-benchmarking", + "frame-election-provider-support", + "frame-support", + "frame-system", + "log", + "pallet-authorship", + "pallet-balances", + "pallet-offences", + "pallet-session", + "pallet-staking", "pallet-staking-reward-curve", - "pallet-timestamp 27.0.0", + "pallet-timestamp", "parity-scale-codec", "scale-info", "sp-application-crypto 30.0.0", - "sp-consensus-grandpa 13.0.0", + "sp-consensus-grandpa", "sp-core 28.0.0", "sp-io 30.0.0", - "sp-keyring 31.0.0", + "sp-keyring", "sp-runtime 31.0.1", - "sp-session 27.0.0", - "sp-staking 26.0.0", -] - -[[package]] -name = "pallet-grandpa" -version = "38.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d3a570a4aac3173ea46b600408183ca2bcfdaadc077f802f11e6055963e2449" -dependencies = [ - "frame-benchmarking 38.0.0", - "frame-support 38.0.0", - "frame-system 38.0.0", - "log", - "pallet-authorship 38.0.0", - "pallet-session 38.0.0", - "parity-scale-codec", - "scale-info", - "sp-application-crypto 38.0.0", - "sp-consensus-grandpa 21.0.0", - "sp-core 34.0.0", - "sp-io 38.0.0", - "sp-runtime 39.0.2", - "sp-session 36.0.0", - "sp-staking 36.0.0", + "sp-session", + "sp-staking", ] [[package]] @@ -13739,11 +11995,11 @@ name = "pallet-identity" version = "29.0.0" dependencies = [ "enumflags2", - "frame-benchmarking 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", + "frame-benchmarking", + "frame-support", + "frame-system", "log", - "pallet-balances 28.0.0", + "pallet-balances", "parity-scale-codec", "scale-info", "sp-core 28.0.0", @@ -13752,101 +12008,47 @@ dependencies = [ "sp-runtime 31.0.1", ] -[[package]] -name = "pallet-identity" -version = "38.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3a4288548de9a755e39fcb82ffb9024b6bb1ba0f582464a44423038dd7a892e" -dependencies = [ - "enumflags2", - "frame-benchmarking 38.0.0", - "frame-support 38.0.0", - "frame-system 38.0.0", - "log", - "parity-scale-codec", - "scale-info", - "sp-io 38.0.0", - "sp-runtime 39.0.2", -] - [[package]] name = "pallet-im-online" version = "27.0.0" dependencies = [ - "frame-benchmarking 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", + "frame-benchmarking", + "frame-support", + "frame-system", "log", - "pallet-authorship 28.0.0", - "pallet-session 28.0.0", + "pallet-authorship", + "pallet-session", "parity-scale-codec", "scale-info", "sp-application-crypto 30.0.0", "sp-core 28.0.0", "sp-io 30.0.0", "sp-runtime 31.0.1", - "sp-staking 26.0.0", -] - -[[package]] -name = "pallet-im-online" -version = "37.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6fd95270cf029d16cb40fe6bd9f8ab9c78cd966666dccbca4d8bfec35c5bba5" -dependencies = [ - "frame-benchmarking 38.0.0", - "frame-support 38.0.0", - "frame-system 38.0.0", - "log", - "pallet-authorship 38.0.0", - "parity-scale-codec", - "scale-info", - "sp-application-crypto 38.0.0", - "sp-core 34.0.0", - "sp-io 38.0.0", - "sp-runtime 39.0.2", - "sp-staking 36.0.0", + "sp-staking", ] [[package]] name = "pallet-indices" version = "28.0.0" dependencies = [ - "frame-benchmarking 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", - "pallet-balances 28.0.0", + "frame-benchmarking", + "frame-support", + "frame-system", + "pallet-balances", "parity-scale-codec", "scale-info", "sp-core 28.0.0", "sp-io 30.0.0", - "sp-keyring 31.0.0", + "sp-keyring", "sp-runtime 31.0.1", ] -[[package]] -name = "pallet-indices" -version = "38.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5e4b97de630427a39d50c01c9e81ab8f029a00e56321823958b39b438f7b940" -dependencies = [ - "frame-benchmarking 38.0.0", - "frame-support 38.0.0", - "frame-system 38.0.0", - "parity-scale-codec", - "scale-info", - "sp-core 34.0.0", - "sp-io 38.0.0", - "sp-keyring 39.0.0", - "sp-runtime 39.0.2", -] - [[package]] name = "pallet-insecure-randomness-collective-flip" version = "16.0.0" dependencies = [ - "frame-support 28.0.0", - "frame-system 28.0.0", + "frame-support", + "frame-system", "parity-scale-codec", "safe-mix", "scale-info", @@ -13855,29 +12057,15 @@ dependencies = [ "sp-runtime 31.0.1", ] -[[package]] -name = "pallet-insecure-randomness-collective-flip" -version = "26.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dce7ad80675d78bd38a7a66ecbbf2d218dd32955e97f8e301d0afe6c87b0f251" -dependencies = [ - "frame-support 38.0.0", - "frame-system 38.0.0", - "parity-scale-codec", - "safe-mix", - "scale-info", - "sp-runtime 39.0.2", -] - [[package]] name = "pallet-lottery" version = "28.0.0" dependencies = [ - "frame-benchmarking 28.0.0", - "frame-support 28.0.0", + "frame-benchmarking", + "frame-support", "frame-support-test", - "frame-system 28.0.0", - "pallet-balances 28.0.0", + "frame-system", + "pallet-balances", "parity-scale-codec", "scale-info", "sp-core 28.0.0", @@ -13885,27 +12073,13 @@ dependencies = [ "sp-runtime 31.0.1", ] -[[package]] -name = "pallet-lottery" -version = "38.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae0920ee53cf7b0665cfb6d275759ae0537dc3850ec78da5f118d814c99d3562" -dependencies = [ - "frame-benchmarking 38.0.0", - "frame-support 38.0.0", - "frame-system 38.0.0", - "parity-scale-codec", - "scale-info", - "sp-runtime 39.0.2", -] - [[package]] name = "pallet-membership" version = "28.0.0" dependencies = [ - "frame-benchmarking 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", + "frame-benchmarking", + "frame-support", + "frame-system", "log", "parity-scale-codec", "scale-info", @@ -13914,31 +12088,14 @@ dependencies = [ "sp-runtime 31.0.1", ] -[[package]] -name = "pallet-membership" -version = "38.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1868b5dca4bbfd1f4a222cbb80735a5197020712a71577b496bbb7e19aaa5394" -dependencies = [ - "frame-benchmarking 38.0.0", - "frame-support 38.0.0", - "frame-system 38.0.0", - "log", - "parity-scale-codec", - "scale-info", - "sp-core 34.0.0", - "sp-io 38.0.0", - "sp-runtime 39.0.2", -] - [[package]] name = "pallet-message-queue" version = "31.0.0" dependencies = [ "environmental", - "frame-benchmarking 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", + "frame-benchmarking", + "frame-support", + "frame-system", "log", "parity-scale-codec", "rand", @@ -13954,43 +12111,23 @@ dependencies = [ "sp-weights 27.0.0", ] -[[package]] -name = "pallet-message-queue" -version = "41.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0faa48b29bf5a178580c164ef00de87319a37da7547a9cd6472dfd160092811a" -dependencies = [ - "environmental", - "frame-benchmarking 38.0.0", - "frame-support 38.0.0", - "frame-system 38.0.0", - "log", - "parity-scale-codec", - "scale-info", - "sp-arithmetic 26.0.0", - "sp-core 34.0.0", - "sp-io 38.0.0", - "sp-runtime 39.0.2", - "sp-weights 31.0.0", -] - [[package]] name = "pallet-migrations" version = "1.0.0" dependencies = [ "cfg-if", "docify", - "frame-benchmarking 28.0.0", - "frame-executive 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", + "frame-benchmarking", + "frame-executive", + "frame-support", + "frame-system", "impl-trait-for-tuples", "log", "parity-scale-codec", "pretty_assertions", "scale-info", "sp-api 26.0.0", - "sp-block-builder 26.0.0", + "sp-block-builder", "sp-core 28.0.0", "sp-io 30.0.0", "sp-runtime 31.0.1", @@ -13998,30 +12135,12 @@ dependencies = [ "sp-version 29.0.0", ] -[[package]] -name = "pallet-migrations" -version = "8.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b417fc975636bce94e7c6d707e42d0706d67dfa513e72f5946918e1044beef1" -dependencies = [ - "docify", - "frame-benchmarking 38.0.0", - "frame-support 38.0.0", - "frame-system 38.0.0", - "impl-trait-for-tuples", - "log", - "parity-scale-codec", - "scale-info", - "sp-core 34.0.0", - "sp-runtime 39.0.2", -] - [[package]] name = "pallet-minimal-template" version = "0.0.0" dependencies = [ "parity-scale-codec", - "polkadot-sdk 0.1.0", + "polkadot-sdk", "scale-info", ] @@ -14029,33 +12148,18 @@ dependencies = [ name = "pallet-mixnet" version = "0.4.0" dependencies = [ + "frame-benchmarking", + "frame-support", + "frame-system", "log", "parity-scale-codec", - "polkadot-sdk-frame 0.1.0", "scale-info", "serde", "sp-application-crypto 30.0.0", - "sp-mixnet 0.4.0", -] - -[[package]] -name = "pallet-mixnet" -version = "0.14.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf3fa2b7f759a47f698a403ab40c54bc8935e2969387947224cbdb4e2bc8a28a" -dependencies = [ - "frame-benchmarking 38.0.0", - "frame-support 38.0.0", - "frame-system 38.0.0", - "log", - "parity-scale-codec", - "scale-info", - "serde", - "sp-application-crypto 38.0.0", - "sp-arithmetic 26.0.0", - "sp-io 38.0.0", - "sp-mixnet 0.12.0", - "sp-runtime 39.0.2", + "sp-arithmetic 23.0.0", + "sp-io 30.0.0", + "sp-mixnet", + "sp-runtime 31.0.1", ] [[package]] @@ -14063,76 +12167,42 @@ name = "pallet-mmr" version = "27.0.0" dependencies = [ "array-bytes", - "frame-benchmarking 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", + "frame-benchmarking", + "frame-support", + "frame-system", "itertools 0.11.0", "log", "parity-scale-codec", "scale-info", "sp-core 28.0.0", "sp-io 30.0.0", - "sp-mmr-primitives 26.0.0", + "sp-mmr-primitives", "sp-runtime 31.0.1", "sp-tracing 16.0.0", ] -[[package]] -name = "pallet-mmr" -version = "38.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6932dfb85f77a57c2d1fdc28a7b3a59ffe23efd8d5bb02dc3039d91347e4a3b" -dependencies = [ - "frame-benchmarking 38.0.0", - "frame-support 38.0.0", - "frame-system 38.0.0", - "log", - "parity-scale-codec", - "scale-info", - "sp-core 34.0.0", - "sp-io 38.0.0", - "sp-mmr-primitives 34.1.0", - "sp-runtime 39.0.2", -] - [[package]] name = "pallet-multisig" version = "28.0.0" dependencies = [ "log", - "pallet-balances 28.0.0", - "parity-scale-codec", - "polkadot-sdk-frame 0.1.0", - "scale-info", -] - -[[package]] -name = "pallet-multisig" -version = "38.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e5099c9a4442efcc1568d88ca1d22d624e81ab96358f99f616c67fbd82532d2" -dependencies = [ - "frame-benchmarking 38.0.0", - "frame-support 38.0.0", - "frame-system 38.0.0", - "log", + "pallet-balances", "parity-scale-codec", + "polkadot-sdk-frame", "scale-info", - "sp-io 38.0.0", - "sp-runtime 39.0.2", ] [[package]] name = "pallet-nft-fractionalization" version = "10.0.0" dependencies = [ - "frame-benchmarking 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", + "frame-benchmarking", + "frame-support", + "frame-system", "log", - "pallet-assets 29.1.0", - "pallet-balances 28.0.0", - "pallet-nfts 22.0.0", + "pallet-assets", + "pallet-balances", + "pallet-nfts", "parity-scale-codec", "scale-info", "sp-core 28.0.0", @@ -14141,33 +12211,16 @@ dependencies = [ "sp-std 14.0.0", ] -[[package]] -name = "pallet-nft-fractionalization" -version = "21.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "168792cf95a32fa3baf9b874efec82a45124da0a79cee1ae3c98a823e6841959" -dependencies = [ - "frame-benchmarking 38.0.0", - "frame-support 38.0.0", - "frame-system 38.0.0", - "log", - "pallet-assets 40.0.0", - "pallet-nfts 32.0.0", - "parity-scale-codec", - "scale-info", - "sp-runtime 39.0.2", -] - [[package]] name = "pallet-nfts" version = "22.0.0" dependencies = [ "enumflags2", - "frame-benchmarking 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", + "frame-benchmarking", + "frame-support", + "frame-system", "log", - "pallet-balances 28.0.0", + "pallet-balances", "parity-scale-codec", "scale-info", "sp-core 28.0.0", @@ -14176,52 +12229,23 @@ dependencies = [ "sp-runtime 31.0.1", ] -[[package]] -name = "pallet-nfts" -version = "32.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59e2aad461a0849d7f0471576eeb1fe3151795bcf2ec9e15eca5cca5b9d743b2" -dependencies = [ - "enumflags2", - "frame-benchmarking 38.0.0", - "frame-support 38.0.0", - "frame-system 38.0.0", - "log", - "parity-scale-codec", - "scale-info", - "sp-core 34.0.0", - "sp-io 38.0.0", - "sp-runtime 39.0.2", -] - [[package]] name = "pallet-nfts-runtime-api" version = "14.0.0" dependencies = [ - "pallet-nfts 22.0.0", + "pallet-nfts", "parity-scale-codec", "sp-api 26.0.0", ] -[[package]] -name = "pallet-nfts-runtime-api" -version = "24.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7a1f50c217e19dc50ff586a71eb5915df6a05bc0b25564ea20674c8cd182c1f" -dependencies = [ - "pallet-nfts 32.0.0", - "parity-scale-codec", - "sp-api 34.0.0", -] - [[package]] name = "pallet-nis" version = "28.0.0" dependencies = [ - "frame-benchmarking 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", - "pallet-balances 28.0.0", + "frame-benchmarking", + "frame-support", + "frame-system", + "pallet-balances", "parity-scale-codec", "scale-info", "sp-arithmetic 23.0.0", @@ -14230,28 +12254,12 @@ dependencies = [ "sp-runtime 31.0.1", ] -[[package]] -name = "pallet-nis" -version = "38.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ac349e119880b7df1a7c4c36d919b33a498d0e9548af3c237365c654ae0c73d" -dependencies = [ - "frame-benchmarking 38.0.0", - "frame-support 38.0.0", - "frame-system 38.0.0", - "parity-scale-codec", - "scale-info", - "sp-arithmetic 26.0.0", - "sp-core 34.0.0", - "sp-runtime 39.0.2", -] - [[package]] name = "pallet-node-authorization" version = "28.0.0" dependencies = [ - "frame-support 28.0.0", - "frame-system 28.0.0", + "frame-support", + "frame-system", "log", "parity-scale-codec", "scale-info", @@ -14260,112 +12268,56 @@ dependencies = [ "sp-runtime 31.0.1", ] -[[package]] -name = "pallet-node-authorization" -version = "38.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39ec3133be9e767b8feafbb26edd805824faa59956da008d2dc7fcf4b4720e56" -dependencies = [ - "frame-support 38.0.0", - "frame-system 38.0.0", - "log", - "parity-scale-codec", - "scale-info", - "sp-core 34.0.0", - "sp-io 38.0.0", - "sp-runtime 39.0.2", -] - [[package]] name = "pallet-nomination-pools" version = "25.0.0" dependencies = [ - "frame-support 28.0.0", - "frame-system 28.0.0", + "frame-support", + "frame-system", "log", - "pallet-balances 28.0.0", + "pallet-balances", "parity-scale-codec", "scale-info", "sp-core 28.0.0", "sp-io 30.0.0", "sp-runtime 31.0.1", - "sp-staking 26.0.0", + "sp-staking", "sp-tracing 16.0.0", ] -[[package]] -name = "pallet-nomination-pools" -version = "35.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c42906923f9f2b65b22f1211136b57c6878296ba6f6228a075c4442cc1fc1659" -dependencies = [ - "frame-support 38.0.0", - "frame-system 38.0.0", - "log", - "pallet-balances 39.0.0", - "parity-scale-codec", - "scale-info", - "sp-core 34.0.0", - "sp-io 38.0.0", - "sp-runtime 39.0.2", - "sp-staking 36.0.0", - "sp-tracing 17.0.1", -] - -[[package]] -name = "pallet-nomination-pools-benchmarking" -version = "26.0.0" -dependencies = [ - "frame-benchmarking 28.0.0", - "frame-election-provider-support 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", - "pallet-bags-list 27.0.0", - "pallet-balances 28.0.0", - "pallet-delegated-staking 1.0.0", - "pallet-nomination-pools 25.0.0", - "pallet-staking 28.0.0", - "pallet-staking-reward-curve", - "pallet-timestamp 27.0.0", - "parity-scale-codec", - "scale-info", - "sp-core 28.0.0", - "sp-io 30.0.0", - "sp-runtime 31.0.1", - "sp-runtime-interface 24.0.0", - "sp-staking 26.0.0", -] - [[package]] name = "pallet-nomination-pools-benchmarking" -version = "36.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38d2eaca0349bcda923343226b8b64d25a80b67e0a1ebaaa5b0ab1e1b3b225bc" +version = "26.0.0" dependencies = [ - "frame-benchmarking 38.0.0", - "frame-election-provider-support 38.0.0", - "frame-support 38.0.0", - "frame-system 38.0.0", - "pallet-bags-list 37.0.0", - "pallet-delegated-staking 5.0.0", - "pallet-nomination-pools 35.0.0", - "pallet-staking 38.0.0", + "frame-benchmarking", + "frame-election-provider-support", + "frame-support", + "frame-system", + "pallet-bags-list", + "pallet-balances", + "pallet-delegated-staking", + "pallet-nomination-pools", + "pallet-staking", + "pallet-staking-reward-curve", + "pallet-timestamp", "parity-scale-codec", "scale-info", - "sp-runtime 39.0.2", - "sp-runtime-interface 28.0.0", - "sp-staking 36.0.0", + "sp-core 28.0.0", + "sp-io 30.0.0", + "sp-runtime 31.0.1", + "sp-runtime-interface 24.0.0", + "sp-staking", ] [[package]] name = "pallet-nomination-pools-fuzzer" version = "2.0.0" dependencies = [ - "frame-support 28.0.0", - "frame-system 28.0.0", + "frame-support", + "frame-system", "honggfuzz", "log", - "pallet-nomination-pools 25.0.0", + "pallet-nomination-pools", "rand", "sp-io 30.0.0", "sp-runtime 31.0.1", @@ -14376,43 +12328,32 @@ dependencies = [ name = "pallet-nomination-pools-runtime-api" version = "23.0.0" dependencies = [ - "pallet-nomination-pools 25.0.0", + "pallet-nomination-pools", "parity-scale-codec", "sp-api 26.0.0", ] -[[package]] -name = "pallet-nomination-pools-runtime-api" -version = "33.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a9e1cb89cc2e6df06ce274a7fc814e5e688aad04c43902a10191fa3d2a56a96" -dependencies = [ - "pallet-nomination-pools 35.0.0", - "parity-scale-codec", - "sp-api 34.0.0", -] - [[package]] name = "pallet-nomination-pools-test-delegate-stake" version = "1.0.0" dependencies = [ - "frame-election-provider-support 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", + "frame-election-provider-support", + "frame-support", + "frame-system", "log", - "pallet-bags-list 27.0.0", - "pallet-balances 28.0.0", - "pallet-delegated-staking 1.0.0", - "pallet-nomination-pools 25.0.0", - "pallet-staking 28.0.0", + "pallet-bags-list", + "pallet-balances", + "pallet-delegated-staking", + "pallet-nomination-pools", + "pallet-staking", "pallet-staking-reward-curve", - "pallet-timestamp 27.0.0", + "pallet-timestamp", "parity-scale-codec", "scale-info", "sp-core 28.0.0", "sp-io 30.0.0", "sp-runtime 31.0.1", - "sp-staking 26.0.0", + "sp-staking", "sp-std 14.0.0", "sp-tracing 16.0.0", ] @@ -14421,22 +12362,22 @@ dependencies = [ name = "pallet-nomination-pools-test-transfer-stake" version = "1.0.0" dependencies = [ - "frame-election-provider-support 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", + "frame-election-provider-support", + "frame-support", + "frame-system", "log", - "pallet-bags-list 27.0.0", - "pallet-balances 28.0.0", - "pallet-nomination-pools 25.0.0", - "pallet-staking 28.0.0", + "pallet-bags-list", + "pallet-balances", + "pallet-nomination-pools", + "pallet-staking", "pallet-staking-reward-curve", - "pallet-timestamp 27.0.0", + "pallet-timestamp", "parity-scale-codec", "scale-info", "sp-core 28.0.0", "sp-io 30.0.0", "sp-runtime 31.0.1", - "sp-staking 26.0.0", + "sp-staking", "sp-std 14.0.0", "sp-tracing 16.0.0", ] @@ -14445,84 +12386,43 @@ dependencies = [ name = "pallet-offences" version = "27.0.0" dependencies = [ - "frame-support 28.0.0", - "frame-system 28.0.0", + "frame-support", + "frame-system", "log", - "pallet-balances 28.0.0", + "pallet-balances", "parity-scale-codec", "scale-info", "serde", "sp-core 28.0.0", "sp-io 30.0.0", "sp-runtime 31.0.1", - "sp-staking 26.0.0", -] - -[[package]] -name = "pallet-offences" -version = "37.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c4379cf853465696c1c5c03e7e8ce80aeaca0a6139d698abe9ecb3223fd732a" -dependencies = [ - "frame-support 38.0.0", - "frame-system 38.0.0", - "log", - "pallet-balances 39.0.0", - "parity-scale-codec", - "scale-info", - "serde", - "sp-runtime 39.0.2", - "sp-staking 36.0.0", + "sp-staking", ] [[package]] name = "pallet-offences-benchmarking" version = "28.0.0" dependencies = [ - "frame-benchmarking 28.0.0", - "frame-election-provider-support 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", - "log", - "pallet-babe 28.0.0", - "pallet-balances 28.0.0", - "pallet-grandpa 28.0.0", - "pallet-im-online 27.0.0", - "pallet-offences 27.0.0", - "pallet-session 28.0.0", - "pallet-staking 28.0.0", + "frame-benchmarking", + "frame-election-provider-support", + "frame-support", + "frame-system", + "log", + "pallet-babe", + "pallet-balances", + "pallet-grandpa", + "pallet-im-online", + "pallet-offences", + "pallet-session", + "pallet-staking", "pallet-staking-reward-curve", - "pallet-timestamp 27.0.0", + "pallet-timestamp", "parity-scale-codec", "scale-info", "sp-core 28.0.0", "sp-io 30.0.0", "sp-runtime 31.0.1", - "sp-staking 26.0.0", -] - -[[package]] -name = "pallet-offences-benchmarking" -version = "38.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69aa1b24cdffc3fa8c89cdea32c83f1bf9c1c82a87fa00e57ae4be8e85f5e24f" -dependencies = [ - "frame-benchmarking 38.0.0", - "frame-election-provider-support 38.0.0", - "frame-support 38.0.0", - "frame-system 38.0.0", - "log", - "pallet-babe 38.0.0", - "pallet-balances 39.0.0", - "pallet-grandpa 38.0.0", - "pallet-im-online 37.0.0", - "pallet-offences 37.0.0", - "pallet-session 38.0.0", - "pallet-staking 38.0.0", - "parity-scale-codec", - "scale-info", - "sp-runtime 39.0.2", - "sp-staking 36.0.0", + "sp-staking", ] [[package]] @@ -14530,9 +12430,9 @@ name = "pallet-paged-list" version = "0.6.0" dependencies = [ "docify", - "frame-benchmarking 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", + "frame-benchmarking", + "frame-support", + "frame-system", "parity-scale-codec", "scale-info", "sp-core 28.0.0", @@ -14541,32 +12441,14 @@ dependencies = [ "sp-runtime 31.0.1", ] -[[package]] -name = "pallet-paged-list" -version = "0.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8e099fb116068836b17ca4232dc52f762b69dc8cd4e33f509372d958de278b0" -dependencies = [ - "docify", - "frame-benchmarking 38.0.0", - "frame-support 38.0.0", - "frame-system 38.0.0", - "parity-scale-codec", - "scale-info", - "sp-core 34.0.0", - "sp-io 38.0.0", - "sp-metadata-ir 0.7.0", - "sp-runtime 39.0.2", -] - [[package]] name = "pallet-paged-list-fuzzer" version = "0.1.0" dependencies = [ "arbitrary", - "frame-support 28.0.0", + "frame-support", "honggfuzz", - "pallet-paged-list 0.6.0", + "pallet-paged-list", "sp-io 30.0.0", ] @@ -14577,6 +12459,9 @@ dependencies = [ "frame-benchmarking 28.0.0", "frame-support 28.0.0", "frame-system 28.0.0", + "frame-benchmarking", + "frame-support", + "frame-system", "parity-scale-codec", "scale-info", "sp-core 28.0.0", @@ -14661,111 +12546,143 @@ dependencies = [ "staging-xcm-builder 7.0.0", "staging-xcm-executor 7.0.0", "xcm-simulator 7.0.0", + "sp-core 28.0.0", + "sp-io 30.0.0", + "sp-runtime 31.0.1", ] [[package]] -name = "pallet-parameters" -version = "0.1.0" +name = "pallet-parachain-template-two" +version = "0.0.0" dependencies = [ - "docify", - "frame-benchmarking 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", - "pallet-balances 28.0.0", - "pallet-example-basic", + "frame-benchmarking", + "frame-support", + "frame-system", "parity-scale-codec", - "paste", "scale-info", - "serde", "sp-core 28.0.0", "sp-io 30.0.0", "sp-runtime 31.0.1", ] [[package]] -name = "pallet-parameters" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9aba424d55e17b2a2bec766a41586eab878137704d4803c04bebd6a4743db7b" +name = "pallet-parachain-xcnft" +version = "0.1.0" dependencies = [ - "docify", - "frame-benchmarking 38.0.0", - "frame-support 38.0.0", - "frame-system 38.0.0", + "cumulus-pallet-xcm", + "cumulus-primitives-core", + "enumflags2", + "frame-benchmarking", + "frame-support", + "frame-system", + "log", + "pallet-balances", + "pallet-message-queue", + "pallet-nfts", + "pallet-xcm", "parity-scale-codec", - "paste", + "polkadot-core-primitives", + "polkadot-parachain-primitives", + "polkadot-runtime-parachains", "scale-info", "serde", - "sp-core 34.0.0", - "sp-runtime 39.0.2", + "sp-core 28.0.0", + "sp-io 30.0.0", + "sp-runtime 31.0.1", + "sp-std 14.0.0", + "sp-tracing 16.0.0", + "staging-parachain-info", + "staging-xcm", + "staging-xcm-builder", + "staging-xcm-executor", + "xcm-simulator", ] [[package]] -name = "pallet-preimage" -version = "28.0.0" +name = "pallet-parachain-xcnft-two" +version = "0.1.0" dependencies = [ - "frame-benchmarking 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", - "log", - "pallet-balances 28.0.0", + "cumulus-pallet-xcm", + "cumulus-primitives-core", + "enumflags2", + "frame-benchmarking", + "frame-support", + "frame-system", + "pallet-balances", + "pallet-message-queue", + "pallet-uniques", + "pallet-xcm", "parity-scale-codec", + "polkadot-core-primitives", + "polkadot-parachain-primitives", + "polkadot-runtime-parachains", "scale-info", + "serde", "sp-core 28.0.0", "sp-io 30.0.0", "sp-runtime 31.0.1", + "sp-std 14.0.0", + "sp-tracing 16.0.0", + "staging-parachain-info", + "staging-xcm", + "staging-xcm-builder", + "staging-xcm-executor", + "xcm-simulator", ] [[package]] -name = "pallet-preimage" -version = "38.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "407828bc48c6193ac076fdf909b2fadcaaecd65f42b0b0a04afe22fe8e563834" +name = "pallet-parameters" +version = "0.1.0" dependencies = [ - "frame-benchmarking 38.0.0", - "frame-support 38.0.0", - "frame-system 38.0.0", - "log", + "docify", + "frame-benchmarking", + "frame-support", + "frame-system", + "pallet-balances", + "pallet-example-basic", "parity-scale-codec", + "paste", "scale-info", - "sp-core 34.0.0", - "sp-io 38.0.0", - "sp-runtime 39.0.2", + "serde", + "sp-core 28.0.0", + "sp-io 30.0.0", + "sp-runtime 31.0.1", ] [[package]] -name = "pallet-proxy" +name = "pallet-preimage" version = "28.0.0" dependencies = [ - "pallet-balances 28.0.0", - "pallet-utility 28.0.0", + "frame-benchmarking", + "frame-support", + "frame-system", + "log", + "pallet-balances", "parity-scale-codec", - "polkadot-sdk-frame 0.1.0", "scale-info", + "sp-core 28.0.0", + "sp-io 30.0.0", + "sp-runtime 31.0.1", ] [[package]] name = "pallet-proxy" -version = "38.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d39df395f0dbcf07dafe842916adea3266a87ce36ed87b5132184b6bcd746393" +version = "28.0.0" dependencies = [ - "frame-benchmarking 38.0.0", - "frame-support 38.0.0", - "frame-system 38.0.0", + "pallet-balances", + "pallet-utility", "parity-scale-codec", + "polkadot-sdk-frame", "scale-info", - "sp-io 38.0.0", - "sp-runtime 39.0.2", ] [[package]] name = "pallet-ranked-collective" version = "28.0.0" dependencies = [ - "frame-benchmarking 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", + "frame-benchmarking", + "frame-support", + "frame-system", "impl-trait-for-tuples", "log", "parity-scale-codec", @@ -14776,33 +12693,14 @@ dependencies = [ "sp-runtime 31.0.1", ] -[[package]] -name = "pallet-ranked-collective" -version = "38.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2b38708feaed202debf1ac6beffaa5e20c99a9825c5ca0991753c2d4eaaf3ac" -dependencies = [ - "frame-benchmarking 38.0.0", - "frame-support 38.0.0", - "frame-system 38.0.0", - "impl-trait-for-tuples", - "log", - "parity-scale-codec", - "scale-info", - "sp-arithmetic 26.0.0", - "sp-core 34.0.0", - "sp-io 38.0.0", - "sp-runtime 39.0.2", -] - [[package]] name = "pallet-recovery" version = "28.0.0" dependencies = [ - "frame-benchmarking 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", - "pallet-balances 28.0.0", + "frame-benchmarking", + "frame-support", + "frame-system", + "pallet-balances", "parity-scale-codec", "scale-info", "sp-core 28.0.0", @@ -14810,33 +12708,18 @@ dependencies = [ "sp-runtime 31.0.1", ] -[[package]] -name = "pallet-recovery" -version = "38.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "406a116aa6d05f88f3c10d79ff89cf577323680a48abd8e5550efb47317e67fa" -dependencies = [ - "frame-benchmarking 38.0.0", - "frame-support 38.0.0", - "frame-system 38.0.0", - "parity-scale-codec", - "scale-info", - "sp-io 38.0.0", - "sp-runtime 39.0.2", -] - [[package]] name = "pallet-referenda" version = "28.0.0" dependencies = [ "assert_matches", - "frame-benchmarking 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", + "frame-benchmarking", + "frame-support", + "frame-system", "log", - "pallet-balances 28.0.0", - "pallet-preimage 28.0.0", - "pallet-scheduler 29.0.0", + "pallet-balances", + "pallet-preimage", + "pallet-scheduler", "parity-scale-codec", "scale-info", "serde", @@ -14846,31 +12729,13 @@ dependencies = [ "sp-runtime 31.0.1", ] -[[package]] -name = "pallet-referenda" -version = "38.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3008c20531d1730c9b457ae77ecf0e3c9b07aaf8c4f5d798d61ef6f0b9e2d4b" -dependencies = [ - "frame-benchmarking 38.0.0", - "frame-support 38.0.0", - "frame-system 38.0.0", - "log", - "parity-scale-codec", - "scale-info", - "serde", - "sp-arithmetic 26.0.0", - "sp-io 38.0.0", - "sp-runtime 39.0.2", -] - [[package]] name = "pallet-remark" version = "28.0.0" dependencies = [ - "frame-benchmarking 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", + "frame-benchmarking", + "frame-support", + "frame-system", "parity-scale-codec", "scale-info", "serde", @@ -14879,54 +12744,41 @@ dependencies = [ "sp-runtime 31.0.1", ] -[[package]] -name = "pallet-remark" -version = "38.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3e8cae0e20888065ec73dda417325c6ecabf797f4002329484b59c25ecc34d4" -dependencies = [ - "frame-benchmarking 38.0.0", - "frame-support 38.0.0", - "frame-system 38.0.0", - "parity-scale-codec", - "scale-info", - "serde", - "sp-core 34.0.0", - "sp-io 38.0.0", - "sp-runtime 39.0.2", -] - [[package]] name = "pallet-revive" version = "0.1.0" dependencies = [ "array-bytes", "assert_matches", - "derive_more 0.99.17", + "bitflags 1.3.2", + "derive_more", "environmental", - "ethereum-types 0.15.1", - "frame-benchmarking 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", + "ethereum-types", + "frame-benchmarking", + "frame-support", + "frame-system", "hex", "hex-literal", "impl-trait-for-tuples", - "log", - "pallet-balances 28.0.0", - "pallet-proxy 28.0.0", - "pallet-revive-fixtures 0.1.0", - "pallet-revive-proc-macro 0.1.0", - "pallet-revive-uapi 0.1.0", - "pallet-timestamp 27.0.0", - "pallet-transaction-payment 28.0.0", - "pallet-utility 28.0.0", + "jsonrpsee 0.24.3", + "log", + "pallet-assets", + "pallet-balances", + "pallet-message-queue", + "pallet-proxy", + "pallet-revive-fixtures", + "pallet-revive-proc-macro", + "pallet-revive-uapi", + "pallet-timestamp", + "pallet-transaction-payment", + "pallet-utility", "parity-scale-codec", "paste", - "polkavm 0.18.0", + "polkavm 0.13.0", "pretty_assertions", "rlp 0.6.1", "scale-info", - "secp256k1 0.28.2", + "secp256k1", "serde", "serde_json", "sp-api 26.0.0", @@ -14937,42 +12789,12 @@ dependencies = [ "sp-runtime 31.0.1", "sp-std 14.0.0", "sp-tracing 16.0.0", - "staging-xcm 7.0.0", - "staging-xcm-builder 7.0.0", + "sp-weights 27.0.0", + "staging-xcm", + "staging-xcm-builder", "subxt-signer", ] -[[package]] -name = "pallet-revive" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be02c94dcbadd206a910a244ec19b493aac793eed95e23d37d6699547234569f" -dependencies = [ - "bitflags 1.3.2", - "environmental", - "frame-benchmarking 38.0.0", - "frame-support 38.0.0", - "frame-system 38.0.0", - "impl-trait-for-tuples", - "log", - "pallet-balances 39.0.0", - "pallet-revive-fixtures 0.2.0", - "pallet-revive-proc-macro 0.1.1", - "pallet-revive-uapi 0.1.1", - "parity-scale-codec", - "paste", - "polkavm 0.10.0", - "scale-info", - "serde", - "sp-api 34.0.0", - "sp-core 34.0.0", - "sp-io 38.0.0", - "sp-runtime 39.0.2", - "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "staging-xcm 14.2.0", - "staging-xcm-builder 17.0.1", -] - [[package]] name = "pallet-revive-eth-rpc" version = "0.1.0" @@ -14980,23 +12802,26 @@ dependencies = [ "anyhow", "clap 4.5.13", "env_logger 0.11.3", - "ethabi", "futures", "hex", - "jsonrpsee", + "hex-literal", + "jsonrpsee 0.24.3", "log", - "pallet-revive 0.1.0", - "pallet-revive-fixtures 0.1.0", + "pallet-revive", + "pallet-revive-fixtures", "parity-scale-codec", "rlp 0.6.1", "sc-cli", "sc-rpc", "sc-rpc-api", "sc-service", + "scale-info", + "secp256k1", + "serde_json", "sp-core 28.0.0", "sp-crypto-hashing 0.1.0", + "sp-runtime 31.0.1", "sp-weights 27.0.0", - "static_init", "substrate-cli-test-utils", "substrate-prometheus-endpoint", "subxt", @@ -15010,107 +12835,56 @@ name = "pallet-revive-fixtures" version = "0.1.0" dependencies = [ "anyhow", - "polkavm-linker 0.18.0", - "sp-core 28.0.0", - "sp-io 30.0.0", - "toml 0.8.12", -] - -[[package]] -name = "pallet-revive-fixtures" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a38c27f1531f36e5327f3084eb24cf1c9dd46b372e030c0169e843ce363105e" -dependencies = [ - "anyhow", - "frame-system 38.0.0", + "frame-system", + "log", "parity-wasm", - "polkavm-linker 0.10.0", - "sp-runtime 39.0.2", - "tempfile", - "toml 0.8.12", -] - -[[package]] -name = "pallet-revive-mock-network" -version = "0.1.0" -dependencies = [ - "assert_matches", - "frame-support 28.0.0", - "frame-system 28.0.0", - "pallet-assets 29.1.0", - "pallet-balances 28.0.0", - "pallet-message-queue 31.0.0", - "pallet-revive 0.1.0", - "pallet-revive-fixtures 0.1.0", - "pallet-revive-uapi 0.1.0", - "pallet-timestamp 27.0.0", - "pallet-xcm 7.0.0", - "parity-scale-codec", - "polkadot-parachain-primitives 6.0.0", - "polkadot-primitives 7.0.0", - "polkadot-runtime-parachains 7.0.0", - "pretty_assertions", - "scale-info", + "polkavm-linker 0.14.0", "sp-core 28.0.0", "sp-io 30.0.0", "sp-runtime 31.0.1", - "sp-tracing 16.0.0", - "staging-xcm 7.0.0", - "staging-xcm-builder 7.0.0", - "staging-xcm-executor 7.0.0", - "xcm-simulator 7.0.0", -] - -[[package]] -name = "pallet-revive-mock-network" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60e74591d44dbd78db02c8593f5caa75bd61bcc4d63999302150223fb969ae37" -dependencies = [ - "frame-support 38.0.0", - "frame-system 38.0.0", - "pallet-assets 40.0.0", - "pallet-balances 39.0.0", - "pallet-message-queue 41.0.1", - "pallet-proxy 38.0.0", - "pallet-revive 0.2.0", - "pallet-revive-proc-macro 0.1.1", - "pallet-revive-uapi 0.1.1", - "pallet-timestamp 37.0.0", - "pallet-utility 38.0.0", - "pallet-xcm 17.0.0", - "parity-scale-codec", - "polkadot-parachain-primitives 14.0.0", - "polkadot-primitives 16.0.0", - "polkadot-runtime-parachains 17.0.1", - "scale-info", - "sp-api 34.0.0", - "sp-core 34.0.0", - "sp-io 38.0.0", - "sp-keystore 0.40.0", - "sp-runtime 39.0.2", - "sp-tracing 17.0.1", - "staging-xcm 14.2.0", - "staging-xcm-builder 17.0.1", - "staging-xcm-executor 17.0.0", - "xcm-simulator 17.0.0", + "tempfile", + "toml 0.8.12", ] [[package]] -name = "pallet-revive-proc-macro" +name = "pallet-revive-mock-network" version = "0.1.0" dependencies = [ - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.87", + "assert_matches", + "frame-support", + "frame-system", + "pallet-assets", + "pallet-balances", + "pallet-message-queue", + "pallet-proxy", + "pallet-revive", + "pallet-revive-fixtures", + "pallet-revive-proc-macro", + "pallet-revive-uapi", + "pallet-timestamp", + "pallet-utility", + "pallet-xcm", + "parity-scale-codec", + "polkadot-parachain-primitives", + "polkadot-primitives", + "polkadot-runtime-parachains", + "pretty_assertions", + "scale-info", + "sp-api 26.0.0", + "sp-core 28.0.0", + "sp-io 30.0.0", + "sp-keystore 0.34.0", + "sp-runtime 31.0.1", + "sp-tracing 16.0.0", + "staging-xcm", + "staging-xcm-builder", + "staging-xcm-executor", + "xcm-simulator", ] [[package]] name = "pallet-revive-proc-macro" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0cc16d1f7cee6a1ee6e8cd710e16230d59fb4935316c1704cf770e4d2335f8d4" +version = "0.1.0" dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", @@ -15120,25 +12894,11 @@ dependencies = [ [[package]] name = "pallet-revive-uapi" version = "0.1.0" -dependencies = [ - "bitflags 1.3.2", - "pallet-revive-proc-macro 0.1.0", - "parity-scale-codec", - "paste", - "polkavm-derive 0.18.0", - "scale-info", -] - -[[package]] -name = "pallet-revive-uapi" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ecb4686c8415619cc13e43fadef146ffff46424d9b4d037fe4c069de52708aac" dependencies = [ "bitflags 1.3.2", "parity-scale-codec", "paste", - "polkavm-derive 0.10.0", + "polkavm-derive 0.14.0", "scale-info", ] @@ -15146,45 +12906,29 @@ dependencies = [ name = "pallet-root-offences" version = "25.0.0" dependencies = [ - "frame-election-provider-support 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", - "pallet-balances 28.0.0", - "pallet-session 28.0.0", - "pallet-staking 28.0.0", + "frame-election-provider-support", + "frame-support", + "frame-system", + "pallet-balances", + "pallet-session", + "pallet-staking", "pallet-staking-reward-curve", - "pallet-timestamp 27.0.0", + "pallet-timestamp", "parity-scale-codec", "scale-info", "sp-core 28.0.0", "sp-io 30.0.0", "sp-runtime 31.0.1", - "sp-staking 26.0.0", + "sp-staking", "sp-std 14.0.0", ] -[[package]] -name = "pallet-root-offences" -version = "35.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b35774b830928daaeeca7196cead7c56eeed952a6616ad6dc5ec068d8c85c81a" -dependencies = [ - "frame-support 38.0.0", - "frame-system 38.0.0", - "pallet-session 38.0.0", - "pallet-staking 38.0.0", - "parity-scale-codec", - "scale-info", - "sp-runtime 39.0.2", - "sp-staking 36.0.0", -] - [[package]] name = "pallet-root-testing" version = "4.0.0" dependencies = [ - "frame-support 28.0.0", - "frame-system 28.0.0", + "frame-support", + "frame-system", "parity-scale-codec", "scale-info", "sp-core 28.0.0", @@ -15192,32 +12936,17 @@ dependencies = [ "sp-runtime 31.0.1", ] -[[package]] -name = "pallet-root-testing" -version = "14.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be95e7c320ac1d381715364cd721e67ab3152ab727f8e4defd3a92e41ebbc880" -dependencies = [ - "frame-support 38.0.0", - "frame-system 38.0.0", - "parity-scale-codec", - "scale-info", - "sp-core 34.0.0", - "sp-io 38.0.0", - "sp-runtime 39.0.2", -] - [[package]] name = "pallet-safe-mode" version = "9.0.0" dependencies = [ "docify", - "frame-benchmarking 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", - "pallet-balances 28.0.0", - "pallet-proxy 28.0.0", - "pallet-utility 28.0.0", + "frame-benchmarking", + "frame-support", + "frame-system", + "pallet-balances", + "pallet-proxy", + "pallet-utility", "parity-scale-codec", "scale-info", "sp-arithmetic 23.0.0", @@ -15226,34 +12955,15 @@ dependencies = [ "sp-runtime 31.0.1", ] -[[package]] -name = "pallet-safe-mode" -version = "19.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d3e67dd4644c168cedbf257ac3dd2527aad81acf4a0d413112197094e549f76" -dependencies = [ - "docify", - "frame-benchmarking 38.0.0", - "frame-support 38.0.0", - "frame-system 38.0.0", - "pallet-balances 39.0.0", - "pallet-proxy 38.0.0", - "pallet-utility 38.0.0", - "parity-scale-codec", - "scale-info", - "sp-arithmetic 26.0.0", - "sp-runtime 39.0.2", -] - [[package]] name = "pallet-salary" version = "13.0.0" dependencies = [ - "frame-benchmarking 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", + "frame-benchmarking", + "frame-support", + "frame-system", "log", - "pallet-ranked-collective 28.0.0", + "pallet-ranked-collective", "parity-scale-codec", "scale-info", "sp-arithmetic 23.0.0", @@ -15262,33 +12972,14 @@ dependencies = [ "sp-runtime 31.0.1", ] -[[package]] -name = "pallet-salary" -version = "23.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0544a71dba06a9a29da0778ba8cb37728c3b9a8377ac9737c4b1bc48c618bc2f" -dependencies = [ - "frame-benchmarking 38.0.0", - "frame-support 38.0.0", - "frame-system 38.0.0", - "log", - "pallet-ranked-collective 38.0.0", - "parity-scale-codec", - "scale-info", - "sp-arithmetic 26.0.0", - "sp-core 34.0.0", - "sp-io 38.0.0", - "sp-runtime 39.0.2", -] - [[package]] name = "pallet-sassafras" version = "0.3.5-dev" dependencies = [ "array-bytes", - "frame-benchmarking 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", + "frame-benchmarking", + "frame-support", + "frame-system", "log", "parity-scale-codec", "scale-info", @@ -15304,11 +12995,11 @@ name = "pallet-scheduler" version = "29.0.0" dependencies = [ "docify", - "frame-benchmarking 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", + "frame-benchmarking", + "frame-support", + "frame-system", "log", - "pallet-preimage 28.0.0", + "pallet-preimage", "parity-scale-codec", "scale-info", "sp-core 28.0.0", @@ -15318,31 +13009,13 @@ dependencies = [ "substrate-test-utils", ] -[[package]] -name = "pallet-scheduler" -version = "39.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26899a331e7ab5f7d5966cbf203e1cf5bd99cd110356d7ddcaa7597087cdc0b5" -dependencies = [ - "docify", - "frame-benchmarking 38.0.0", - "frame-support 38.0.0", - "frame-system 38.0.0", - "log", - "parity-scale-codec", - "scale-info", - "sp-io 38.0.0", - "sp-runtime 39.0.2", - "sp-weights 31.0.0", -] - [[package]] name = "pallet-scored-pool" version = "28.0.0" dependencies = [ - "frame-support 28.0.0", - "frame-system 28.0.0", - "pallet-balances 28.0.0", + "frame-support", + "frame-system", + "pallet-balances", "parity-scale-codec", "scale-info", "sp-core 28.0.0", @@ -15350,135 +13023,69 @@ dependencies = [ "sp-runtime 31.0.1", ] -[[package]] -name = "pallet-scored-pool" -version = "38.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f84b48bb4702712c902f43931c4077d3a1cb6773c8d8c290d4a6251f6bc2a5c" -dependencies = [ - "frame-support 38.0.0", - "frame-system 38.0.0", - "parity-scale-codec", - "scale-info", - "sp-io 38.0.0", - "sp-runtime 39.0.2", -] - [[package]] name = "pallet-session" version = "28.0.0" dependencies = [ - "frame-support 28.0.0", - "frame-system 28.0.0", + "frame-support", + "frame-system", "impl-trait-for-tuples", "log", - "pallet-timestamp 27.0.0", + "pallet-timestamp", "parity-scale-codec", "scale-info", "sp-core 28.0.0", "sp-io 30.0.0", "sp-runtime 31.0.1", - "sp-session 27.0.0", - "sp-staking 26.0.0", + "sp-session", + "sp-staking", "sp-state-machine 0.35.0", "sp-trie 29.0.0", ] -[[package]] -name = "pallet-session" -version = "38.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8474b62b6b7622f891e83d922a589e2ad5be5471f5ca47d45831a797dba0b3f4" -dependencies = [ - "frame-support 38.0.0", - "frame-system 38.0.0", - "impl-trait-for-tuples", - "log", - "pallet-timestamp 37.0.0", - "parity-scale-codec", - "scale-info", - "sp-core 34.0.0", - "sp-io 38.0.0", - "sp-runtime 39.0.2", - "sp-session 36.0.0", - "sp-staking 36.0.0", - "sp-state-machine 0.43.0", - "sp-trie 37.0.0", -] - [[package]] name = "pallet-session-benchmarking" version = "28.0.0" dependencies = [ - "frame-benchmarking 28.0.0", - "frame-election-provider-support 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", - "pallet-balances 28.0.0", - "pallet-session 28.0.0", - "pallet-staking 28.0.0", + "frame-benchmarking", + "frame-election-provider-support", + "frame-support", + "frame-system", + "pallet-balances", + "pallet-session", + "pallet-staking", "pallet-staking-reward-curve", - "pallet-timestamp 27.0.0", + "pallet-timestamp", "parity-scale-codec", "rand", "scale-info", "sp-core 28.0.0", "sp-io 30.0.0", "sp-runtime 31.0.1", - "sp-session 27.0.0", -] - -[[package]] -name = "pallet-session-benchmarking" -version = "38.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8aadce7df0fee981721983795919642648b846dab5ab9096f82c2cea781007d0" -dependencies = [ - "frame-benchmarking 38.0.0", - "frame-support 38.0.0", - "frame-system 38.0.0", - "pallet-session 38.0.0", - "pallet-staking 38.0.0", - "parity-scale-codec", - "rand", - "sp-runtime 39.0.2", - "sp-session 36.0.0", + "sp-session", ] [[package]] name = "pallet-skip-feeless-payment" version = "3.0.0" dependencies = [ - "frame-support 28.0.0", - "frame-system 28.0.0", + "frame-support", + "frame-system", "parity-scale-codec", "scale-info", "sp-runtime 31.0.1", ] -[[package]] -name = "pallet-skip-feeless-payment" -version = "13.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8c2cb0dae13d2c2d2e76373f337d408468f571459df1900cbd7458f21cf6c01" -dependencies = [ - "frame-support 38.0.0", - "frame-system 38.0.0", - "parity-scale-codec", - "scale-info", - "sp-runtime 39.0.2", -] - [[package]] name = "pallet-society" version = "28.0.0" dependencies = [ - "frame-benchmarking 28.0.0", - "frame-support 28.0.0", + "frame-benchmarking", + "frame-support", "frame-support-test", - "frame-system 28.0.0", + "frame-system", "log", - "pallet-balances 28.0.0", + "pallet-balances", "parity-scale-codec", "rand_chacha", "scale-info", @@ -15489,39 +13096,21 @@ dependencies = [ "sp-runtime 31.0.1", ] -[[package]] -name = "pallet-society" -version = "38.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1dc69fea8a8de343e71691f009d5fece6ae302ed82b7bb357882b2ea6454143" -dependencies = [ - "frame-benchmarking 38.0.0", - "frame-support 38.0.0", - "frame-system 38.0.0", - "log", - "parity-scale-codec", - "rand_chacha", - "scale-info", - "sp-arithmetic 26.0.0", - "sp-io 38.0.0", - "sp-runtime 39.0.2", -] - [[package]] name = "pallet-staking" version = "28.0.0" dependencies = [ - "frame-benchmarking 28.0.0", - "frame-election-provider-support 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", + "frame-benchmarking", + "frame-election-provider-support", + "frame-support", + "frame-system", "log", - "pallet-authorship 28.0.0", - "pallet-bags-list 27.0.0", - "pallet-balances 28.0.0", - "pallet-session 28.0.0", + "pallet-authorship", + "pallet-bags-list", + "pallet-balances", + "pallet-session", "pallet-staking-reward-curve", - "pallet-timestamp 27.0.0", + "pallet-timestamp", "parity-scale-codec", "rand_chacha", "scale-info", @@ -15529,35 +13118,13 @@ dependencies = [ "sp-application-crypto 30.0.0", "sp-core 28.0.0", "sp-io 30.0.0", - "sp-npos-elections 26.0.0", + "sp-npos-elections", "sp-runtime 31.0.1", - "sp-staking 26.0.0", + "sp-staking", "sp-tracing 16.0.0", "substrate-test-utils", ] -[[package]] -name = "pallet-staking" -version = "38.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c870d123f4f053b56af808a4beae1ffc4309a696e829796c26837936c926db3b" -dependencies = [ - "frame-benchmarking 38.0.0", - "frame-election-provider-support 38.0.0", - "frame-support 38.0.0", - "frame-system 38.0.0", - "log", - "pallet-authorship 38.0.0", - "pallet-session 38.0.0", - "parity-scale-codec", - "scale-info", - "serde", - "sp-application-crypto 38.0.0", - "sp-io 38.0.0", - "sp-runtime 39.0.2", - "sp-staking 36.0.0", -] - [[package]] name = "pallet-staking-reward-curve" version = "11.0.0" @@ -15577,46 +13144,25 @@ dependencies = [ "sp-arithmetic 23.0.0", ] -[[package]] -name = "pallet-staking-reward-fn" -version = "22.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "988a7ebeacc84d4bdb0b12409681e956ffe35438447d8f8bc78db547cffb6ebc" -dependencies = [ - "log", - "sp-arithmetic 26.0.0", -] - [[package]] name = "pallet-staking-runtime-api" version = "14.0.0" dependencies = [ "parity-scale-codec", "sp-api 26.0.0", - "sp-staking 26.0.0", -] - -[[package]] -name = "pallet-staking-runtime-api" -version = "24.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7298559ef3a6b2f5dfbe9a3b8f3d22f2ff9b073c97f4c4853d2b316d973e72d" -dependencies = [ - "parity-scale-codec", - "sp-api 34.0.0", - "sp-staking 36.0.0", + "sp-staking", ] [[package]] name = "pallet-state-trie-migration" version = "29.0.0" dependencies = [ - "frame-benchmarking 28.0.0", + "frame-benchmarking", "frame-remote-externalities", - "frame-support 28.0.0", - "frame-system 28.0.0", + "frame-support", + "frame-system", "log", - "pallet-balances 28.0.0", + "pallet-balances", "parity-scale-codec", "parking_lot 0.12.3", "scale-info", @@ -15631,188 +13177,98 @@ dependencies = [ "zstd 0.12.4", ] -[[package]] -name = "pallet-state-trie-migration" -version = "40.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "138c15b4200b9dc4c3e031def6a865a235cdc76ff91ee96fba19ca1787c9dda6" -dependencies = [ - "frame-benchmarking 38.0.0", - "frame-support 38.0.0", - "frame-system 38.0.0", - "log", - "parity-scale-codec", - "scale-info", - "sp-core 34.0.0", - "sp-io 38.0.0", - "sp-runtime 39.0.2", -] - [[package]] name = "pallet-statement" version = "10.0.0" dependencies = [ - "frame-support 28.0.0", - "frame-system 28.0.0", + "frame-support", + "frame-system", "log", - "pallet-balances 28.0.0", + "pallet-balances", "parity-scale-codec", "scale-info", "sp-api 26.0.0", "sp-core 28.0.0", "sp-io 30.0.0", "sp-runtime 31.0.1", - "sp-statement-store 10.0.0", -] - -[[package]] -name = "pallet-statement" -version = "20.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e03e147efa900e75cd106337f36da3d7dcd185bd9e5f5c3df474c08c3c37d16" -dependencies = [ - "frame-support 38.0.0", - "frame-system 38.0.0", - "log", - "parity-scale-codec", - "scale-info", - "sp-api 34.0.0", - "sp-core 34.0.0", - "sp-io 38.0.0", - "sp-runtime 39.0.2", - "sp-statement-store 18.0.0", -] - -[[package]] -name = "pallet-sudo" -version = "28.0.0" -dependencies = [ - "docify", - "frame-benchmarking 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", - "parity-scale-codec", - "scale-info", - "sp-core 28.0.0", - "sp-io 30.0.0", - "sp-runtime 31.0.1", + "sp-statement-store", ] [[package]] name = "pallet-sudo" -version = "38.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1574fe2aed3d52db4a389b77b53d8c9758257b121e3e7bbe24c4904e11681e0e" -dependencies = [ - "docify", - "frame-benchmarking 38.0.0", - "frame-support 38.0.0", - "frame-system 38.0.0", - "parity-scale-codec", - "scale-info", - "sp-io 38.0.0", - "sp-runtime 39.0.2", -] - -[[package]] -name = "pallet-template" -version = "0.0.0" -dependencies = [ - "frame-benchmarking 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", - "parity-scale-codec", - "scale-info", - "sp-core 28.0.0", - "sp-io 30.0.0", - "sp-runtime 31.0.1", -] - -[[package]] -name = "pallet-timestamp" -version = "27.0.0" +version = "28.0.0" dependencies = [ "docify", - "frame-benchmarking 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", - "log", + "frame-benchmarking", + "frame-support", + "frame-system", "parity-scale-codec", "scale-info", "sp-core 28.0.0", - "sp-inherents 26.0.0", "sp-io 30.0.0", "sp-runtime 31.0.1", - "sp-storage 19.0.0", - "sp-timestamp 26.0.0", ] [[package]] -name = "pallet-timestamp" -version = "37.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9ba9b71bbfd33ae672f23ba7efaeed2755fdac37b8f946cb7474fc37841b7e1" +name = "pallet-template" +version = "0.0.0" dependencies = [ - "docify", - "frame-benchmarking 38.0.0", - "frame-support 38.0.0", - "frame-system 38.0.0", - "log", + "frame-benchmarking", + "frame-support", + "frame-system", "parity-scale-codec", "scale-info", - "sp-inherents 34.0.0", - "sp-io 38.0.0", - "sp-runtime 39.0.2", - "sp-storage 21.0.0", - "sp-timestamp 34.0.0", + "sp-core 28.0.0", + "sp-io 30.0.0", + "sp-runtime 31.0.1", ] [[package]] -name = "pallet-tips" +name = "pallet-timestamp" version = "27.0.0" dependencies = [ - "frame-benchmarking 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", + "docify", + "frame-benchmarking", + "frame-support", + "frame-system", "log", - "pallet-balances 28.0.0", - "pallet-treasury 27.0.0", "parity-scale-codec", "scale-info", - "serde", "sp-core 28.0.0", + "sp-inherents", "sp-io 30.0.0", "sp-runtime 31.0.1", "sp-storage 19.0.0", + "sp-timestamp", ] [[package]] name = "pallet-tips" -version = "37.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa1d4371a70c309ba11624933f8f5262fe4edad0149c556361d31f26190da936" +version = "27.0.0" dependencies = [ - "frame-benchmarking 38.0.0", - "frame-support 38.0.0", - "frame-system 38.0.0", + "frame-benchmarking", + "frame-support", + "frame-system", "log", - "pallet-treasury 37.0.0", + "pallet-balances", + "pallet-treasury", "parity-scale-codec", "scale-info", "serde", - "sp-core 34.0.0", - "sp-io 38.0.0", - "sp-runtime 39.0.2", + "sp-core 28.0.0", + "sp-io 30.0.0", + "sp-runtime 31.0.1", + "sp-storage 19.0.0", ] [[package]] name = "pallet-transaction-payment" version = "28.0.0" dependencies = [ - "frame-benchmarking 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", - "pallet-balances 28.0.0", + "frame-benchmarking", + "frame-support", + "frame-system", + "pallet-balances", "parity-scale-codec", "scale-info", "serde", @@ -15822,28 +13278,12 @@ dependencies = [ "sp-runtime 31.0.1", ] -[[package]] -name = "pallet-transaction-payment" -version = "38.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47b1aa3498107a30237f941b0f02180db3b79012c3488878ff01a4ac3e8ee04e" -dependencies = [ - "frame-support 38.0.0", - "frame-system 38.0.0", - "parity-scale-codec", - "scale-info", - "serde", - "sp-core 34.0.0", - "sp-io 38.0.0", - "sp-runtime 39.0.2", -] - [[package]] name = "pallet-transaction-payment-rpc" version = "30.0.0" dependencies = [ - "jsonrpsee", - "pallet-transaction-payment-rpc-runtime-api 28.0.0", + "jsonrpsee 0.24.3", + "pallet-transaction-payment-rpc-runtime-api", "parity-scale-codec", "sp-api 26.0.0", "sp-blockchain", @@ -15857,64 +13297,31 @@ dependencies = [ name = "pallet-transaction-payment-rpc-runtime-api" version = "28.0.0" dependencies = [ - "pallet-transaction-payment 28.0.0", + "pallet-transaction-payment", "parity-scale-codec", "sp-api 26.0.0", "sp-runtime 31.0.1", "sp-weights 27.0.0", ] -[[package]] -name = "pallet-transaction-payment-rpc-runtime-api" -version = "38.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49fdf5ab71e9dbcadcf7139736b6ea6bac8ec4a83985d46cbd130e1eec770e41" -dependencies = [ - "pallet-transaction-payment 38.0.0", - "parity-scale-codec", - "sp-api 34.0.0", - "sp-runtime 39.0.2", - "sp-weights 31.0.0", -] - [[package]] name = "pallet-transaction-storage" version = "27.0.0" dependencies = [ "array-bytes", - "frame-benchmarking 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", + "frame-benchmarking", + "frame-support", + "frame-system", "log", - "pallet-balances 28.0.0", + "pallet-balances", "parity-scale-codec", "scale-info", "serde", "sp-core 28.0.0", - "sp-inherents 26.0.0", + "sp-inherents", "sp-io 30.0.0", "sp-runtime 31.0.1", - "sp-transaction-storage-proof 26.0.0", -] - -[[package]] -name = "pallet-transaction-storage" -version = "37.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8c337a972a6a796c0a0acc6c03b5e02901c43ad721ce79eb87b45717d75c93b" -dependencies = [ - "frame-benchmarking 38.0.0", - "frame-support 38.0.0", - "frame-system 38.0.0", - "log", - "pallet-balances 39.0.0", - "parity-scale-codec", - "scale-info", - "serde", - "sp-inherents 34.0.0", - "sp-io 38.0.0", - "sp-runtime 39.0.2", - "sp-transaction-storage-proof 34.0.0", + "sp-transaction-storage-proof", ] [[package]] @@ -15922,13 +13329,13 @@ name = "pallet-treasury" version = "27.0.0" dependencies = [ "docify", - "frame-benchmarking 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", + "frame-benchmarking", + "frame-support", + "frame-system", "impl-trait-for-tuples", "log", - "pallet-balances 28.0.0", - "pallet-utility 28.0.0", + "pallet-balances", + "pallet-utility", "parity-scale-codec", "scale-info", "serde", @@ -15937,36 +13344,17 @@ dependencies = [ "sp-runtime 31.0.1", ] -[[package]] -name = "pallet-treasury" -version = "37.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98bfdd3bb9b58fb010bcd419ff5bf940817a8e404cdbf7886a53ac730f5dda2b" -dependencies = [ - "docify", - "frame-benchmarking 38.0.0", - "frame-support 38.0.0", - "frame-system 38.0.0", - "impl-trait-for-tuples", - "pallet-balances 39.0.0", - "parity-scale-codec", - "scale-info", - "serde", - "sp-core 34.0.0", - "sp-runtime 39.0.2", -] - [[package]] name = "pallet-tx-pause" version = "9.0.0" dependencies = [ "docify", - "frame-benchmarking 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", - "pallet-balances 28.0.0", - "pallet-proxy 28.0.0", - "pallet-utility 28.0.0", + "frame-benchmarking", + "frame-support", + "frame-system", + "pallet-balances", + "pallet-proxy", + "pallet-utility", "parity-scale-codec", "scale-info", "sp-core 28.0.0", @@ -15974,33 +13362,15 @@ dependencies = [ "sp-runtime 31.0.1", ] -[[package]] -name = "pallet-tx-pause" -version = "19.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cee153f5be5efc84ebd53aa581e5361cde17dc3669ef80d8ad327f4041d89ebe" -dependencies = [ - "docify", - "frame-benchmarking 38.0.0", - "frame-support 38.0.0", - "frame-system 38.0.0", - "pallet-balances 39.0.0", - "pallet-proxy 38.0.0", - "pallet-utility 38.0.0", - "parity-scale-codec", - "scale-info", - "sp-runtime 39.0.2", -] - [[package]] name = "pallet-uniques" version = "28.0.0" dependencies = [ - "frame-benchmarking 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", + "frame-benchmarking", + "frame-support", + "frame-system", "log", - "pallet-balances 28.0.0", + "pallet-balances", "parity-scale-codec", "scale-info", "sp-core 28.0.0", @@ -16009,32 +13379,17 @@ dependencies = [ "sp-std 14.0.0", ] -[[package]] -name = "pallet-uniques" -version = "38.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2b13cdaedf2d5bd913a5f6e637cb52b5973d8ed4b8d45e56d921bc4d627006f" -dependencies = [ - "frame-benchmarking 38.0.0", - "frame-support 38.0.0", - "frame-system 38.0.0", - "log", - "parity-scale-codec", - "scale-info", - "sp-runtime 39.0.2", -] - [[package]] name = "pallet-utility" version = "28.0.0" dependencies = [ - "frame-benchmarking 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", - "pallet-balances 28.0.0", - "pallet-collective 28.0.0", - "pallet-root-testing 4.0.0", - "pallet-timestamp 27.0.0", + "frame-benchmarking", + "frame-support", + "frame-system", + "pallet-balances", + "pallet-collective", + "pallet-root-testing", + "pallet-timestamp", "parity-scale-codec", "scale-info", "sp-core 28.0.0", @@ -16042,33 +13397,17 @@ dependencies = [ "sp-runtime 31.0.1", ] -[[package]] -name = "pallet-utility" -version = "38.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fdcade6efc0b66fc7fc4138964802c02d0ffb7380d894e26b9dd5073727d2b3" -dependencies = [ - "frame-benchmarking 38.0.0", - "frame-support 38.0.0", - "frame-system 38.0.0", - "parity-scale-codec", - "scale-info", - "sp-core 34.0.0", - "sp-io 38.0.0", - "sp-runtime 39.0.2", -] - [[package]] name = "pallet-verify-signature" version = "1.0.0" dependencies = [ - "frame-benchmarking 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", - "pallet-balances 28.0.0", - "pallet-collective 28.0.0", - "pallet-root-testing 4.0.0", - "pallet-timestamp 27.0.0", + "frame-benchmarking", + "frame-support", + "frame-system", + "pallet-balances", + "pallet-collective", + "pallet-root-testing", + "pallet-timestamp", "parity-scale-codec", "scale-info", "sp-core 28.0.0", @@ -16081,11 +13420,11 @@ dependencies = [ name = "pallet-vesting" version = "28.0.0" dependencies = [ - "frame-benchmarking 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", + "frame-benchmarking", + "frame-support", + "frame-system", "log", - "pallet-balances 28.0.0", + "pallet-balances", "parity-scale-codec", "scale-info", "sp-core 28.0.0", @@ -16093,30 +13432,15 @@ dependencies = [ "sp-runtime 31.0.1", ] -[[package]] -name = "pallet-vesting" -version = "38.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "807df2ef13ab6bf940879352c3013bfa00b670458b4c125c2f60e5753f68e3d5" -dependencies = [ - "frame-benchmarking 38.0.0", - "frame-support 38.0.0", - "frame-system 38.0.0", - "log", - "parity-scale-codec", - "scale-info", - "sp-runtime 39.0.2", -] - [[package]] name = "pallet-whitelist" version = "27.0.0" dependencies = [ - "frame-benchmarking 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", - "pallet-balances 28.0.0", - "pallet-preimage 28.0.0", + "frame-benchmarking", + "frame-support", + "frame-system", + "pallet-balances", + "pallet-preimage", "parity-scale-codec", "scale-info", "sp-api 26.0.0", @@ -16125,170 +13449,88 @@ dependencies = [ "sp-runtime 31.0.1", ] -[[package]] -name = "pallet-whitelist" -version = "37.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ef17df925290865cf37096dd0cb76f787df11805bba01b1d0ca3e106d06280b" -dependencies = [ - "frame-benchmarking 38.0.0", - "frame-support 38.0.0", - "frame-system 38.0.0", - "parity-scale-codec", - "scale-info", - "sp-api 34.0.0", - "sp-runtime 39.0.2", -] - [[package]] name = "pallet-xcm" version = "7.0.0" dependencies = [ "bounded-collections", - "frame-benchmarking 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", - "pallet-assets 29.1.0", - "pallet-balances 28.0.0", + "frame-benchmarking", + "frame-support", + "frame-system", + "pallet-assets", + "pallet-balances", "parity-scale-codec", - "polkadot-parachain-primitives 6.0.0", - "polkadot-runtime-parachains 7.0.0", + "polkadot-parachain-primitives", + "polkadot-runtime-parachains", "scale-info", "serde", "sp-core 28.0.0", "sp-io 30.0.0", "sp-runtime 31.0.1", - "staging-xcm 7.0.0", - "staging-xcm-builder 7.0.0", - "staging-xcm-executor 7.0.0", + "staging-xcm", + "staging-xcm-builder", + "staging-xcm-executor", "tracing", - "xcm-runtime-apis 0.1.0", -] - -[[package]] -name = "pallet-xcm" -version = "17.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b1760b6589e53f4ad82216c72c0e38fcb4df149c37224ab3301dc240c85d1d4" -dependencies = [ - "bounded-collections", - "frame-benchmarking 38.0.0", - "frame-support 38.0.0", - "frame-system 38.0.0", - "log", - "pallet-balances 39.0.0", - "parity-scale-codec", - "scale-info", - "serde", - "sp-core 34.0.0", - "sp-io 38.0.0", - "sp-runtime 39.0.2", - "staging-xcm 14.2.0", - "staging-xcm-builder 17.0.1", - "staging-xcm-executor 17.0.0", - "xcm-runtime-apis 0.4.0", + "xcm-runtime-apis", ] [[package]] name = "pallet-xcm-benchmarks" version = "7.0.0" dependencies = [ - "frame-benchmarking 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", + "frame-benchmarking", + "frame-support", + "frame-system", "log", - "pallet-assets 29.1.0", - "pallet-balances 28.0.0", - "pallet-xcm 7.0.0", + "pallet-assets", + "pallet-balances", + "pallet-xcm", "parity-scale-codec", - "polkadot-primitives 7.0.0", - "polkadot-runtime-common 7.0.0", + "polkadot-primitives", + "polkadot-runtime-common", "scale-info", "sp-io 30.0.0", "sp-runtime 31.0.1", "sp-tracing 16.0.0", - "staging-xcm 7.0.0", - "staging-xcm-builder 7.0.0", - "staging-xcm-executor 7.0.0", -] - -[[package]] -name = "pallet-xcm-benchmarks" -version = "17.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2da423463933b42f4a4c74175f9e9295a439de26719579b894ce533926665e4a" -dependencies = [ - "frame-benchmarking 38.0.0", - "frame-support 38.0.0", - "frame-system 38.0.0", - "log", - "parity-scale-codec", - "scale-info", - "sp-io 38.0.0", - "sp-runtime 39.0.2", - "staging-xcm 14.2.0", - "staging-xcm-builder 17.0.1", - "staging-xcm-executor 17.0.0", + "staging-xcm", + "staging-xcm-builder", + "staging-xcm-executor", ] [[package]] name = "pallet-xcm-bridge-hub" version = "0.2.0" dependencies = [ - "bp-header-chain 0.7.0", - "bp-messages 0.7.0", - "bp-runtime 0.7.0", - "bp-xcm-bridge-hub 0.2.0", - "bp-xcm-bridge-hub-router 0.6.0", - "frame-support 28.0.0", - "frame-system 28.0.0", + "bp-header-chain", + "bp-messages", + "bp-runtime", + "bp-xcm-bridge-hub", + "frame-support", + "frame-system", "log", - "pallet-balances 28.0.0", - "pallet-bridge-messages 0.7.0", - "pallet-xcm-bridge-hub-router 0.5.0", + "pallet-balances", + "pallet-bridge-messages", + "pallet-xcm-bridge-hub-router", "parity-scale-codec", - "polkadot-parachain-primitives 6.0.0", + "polkadot-parachain-primitives", "scale-info", "sp-core 28.0.0", "sp-io 30.0.0", "sp-runtime 31.0.1", "sp-std 14.0.0", - "staging-xcm 7.0.0", - "staging-xcm-builder 7.0.0", - "staging-xcm-executor 7.0.0", -] - -[[package]] -name = "pallet-xcm-bridge-hub" -version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5f9670065b7cba92771060a4a3925b6650ff67611443ccfccd5aa356f7d5aac" -dependencies = [ - "bp-messages 0.18.0", - "bp-runtime 0.18.0", - "bp-xcm-bridge-hub 0.4.0", - "frame-support 38.0.0", - "frame-system 38.0.0", - "log", - "pallet-bridge-messages 0.18.0", - "parity-scale-codec", - "scale-info", - "sp-core 34.0.0", - "sp-runtime 39.0.2", - "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "staging-xcm 14.2.0", - "staging-xcm-builder 17.0.1", - "staging-xcm-executor 17.0.0", + "staging-xcm", + "staging-xcm-builder", + "staging-xcm-executor", ] [[package]] name = "pallet-xcm-bridge-hub-router" version = "0.5.0" dependencies = [ - "bp-xcm-bridge-hub-router 0.6.0", - "frame-benchmarking 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", + "bp-xcm-bridge-hub-router", + "frame-benchmarking", + "frame-support", + "frame-system", "log", "parity-scale-codec", "scale-info", @@ -16296,43 +13538,42 @@ dependencies = [ "sp-io 30.0.0", "sp-runtime 31.0.1", "sp-std 14.0.0", - "staging-xcm 7.0.0", - "staging-xcm-builder 7.0.0", + "staging-xcm", + "staging-xcm-builder", ] [[package]] -name = "pallet-xcm-bridge-hub-router" -version = "0.15.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3b5347c826b721098ef39afb0d750e621c77538044fc1e865af1a8747824fdf" +name = "parachain-template-node" +version = "0.0.0" dependencies = [ - "bp-xcm-bridge-hub-router 0.14.1", - "frame-benchmarking 38.0.0", - "frame-support 38.0.0", - "frame-system 38.0.0", + "clap 4.5.13", + "color-print", + "docify", + "futures", + "jsonrpsee 0.24.3", "log", + "parachain-template-runtime", "parity-scale-codec", - "scale-info", - "sp-core 34.0.0", - "sp-runtime 39.0.2", - "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "staging-xcm 14.2.0", - "staging-xcm-builder 17.0.1", + "polkadot-sdk", + "sc-tracing", + "serde", + "serde_json", + "substrate-prometheus-endpoint", ] [[package]] -name = "parachain-template-node" +name = "parachain-template-node-two" version = "0.0.0" dependencies = [ "clap 4.5.13", "color-print", "docify", "futures", - "jsonrpsee", + "jsonrpsee 0.24.3", "log", - "parachain-template-runtime", + "parachain-template-runtime-two", "parity-scale-codec", - "polkadot-sdk 0.1.0", + "polkadot-sdk", "sc-tracing", "serde", "serde_json", @@ -16362,98 +13603,88 @@ dependencies = [ name = "parachain-template-runtime" version = "0.0.0" dependencies = [ - "cumulus-pallet-parachain-system 0.7.0", + "cumulus-pallet-parachain-system", "docify", "hex-literal", "log", "pallet-nfts 22.0.0", + "pallet-nfts", "pallet-parachain-template", "pallet-parachain-xcnft", "parity-scale-codec", - "polkadot-sdk 0.1.0", + "polkadot-sdk 0.1.0", + "scale-info", + "serde_json", + "smallvec", + "substrate-wasm-builder 17.0.0", +] + +[[package]] +name = "parachain-template-runtime-two" +version = "0.0.0" +dependencies = [ + "cumulus-pallet-parachain-system 0.7.0", + "docify", + "hex-literal", + "log", + "pallet-parachain-template-two", + "pallet-parachain-xcnft-two", + "pallet-uniques 28.0.0", + "pallet-parachain-xcnft", + "parity-scale-codec", + "polkadot-sdk", "scale-info", "serde_json", "smallvec", - "substrate-wasm-builder 17.0.0", + "substrate-wasm-builder", ] [[package]] name = "parachain-template-runtime-two" version = "0.0.0" dependencies = [ - "cumulus-pallet-parachain-system 0.7.0", + "cumulus-pallet-parachain-system", "docify", "hex-literal", "log", "pallet-parachain-template-two", "pallet-parachain-xcnft-two", - "pallet-uniques 28.0.0", + "pallet-uniques", "parity-scale-codec", - "polkadot-sdk 0.1.0", + "polkadot-sdk", "scale-info", "serde_json", "smallvec", - "substrate-wasm-builder 17.0.0", + "substrate-wasm-builder", ] [[package]] name = "parachains-common" version = "7.0.0" dependencies = [ - "cumulus-primitives-core 0.7.0", - "cumulus-primitives-utility 0.7.0", - "frame-support 28.0.0", - "frame-system 28.0.0", + "cumulus-primitives-core", + "cumulus-primitives-utility", + "frame-support", + "frame-system", "log", - "pallet-asset-tx-payment 28.0.0", - "pallet-assets 29.1.0", - "pallet-authorship 28.0.0", - "pallet-balances 28.0.0", - "pallet-collator-selection 9.0.0", - "pallet-message-queue 31.0.0", - "pallet-xcm 7.0.0", + "pallet-asset-tx-payment", + "pallet-assets", + "pallet-authorship", + "pallet-balances", + "pallet-collator-selection", + "pallet-message-queue", + "pallet-xcm", "parity-scale-codec", - "polkadot-primitives 7.0.0", + "polkadot-primitives", "scale-info", - "sp-consensus-aura 0.32.0", + "sp-consensus-aura", "sp-core 28.0.0", "sp-io 30.0.0", "sp-runtime 31.0.1", - "staging-parachain-info 0.7.0", - "staging-xcm 7.0.0", - "staging-xcm-executor 7.0.0", - "substrate-wasm-builder 17.0.0", -] - -[[package]] -name = "parachains-common" -version = "18.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9460a69f409be27c62161d8b4d36ffc32735d09a4f9097f9c789db0cca7196c" -dependencies = [ - "cumulus-primitives-core 0.16.0", - "cumulus-primitives-utility 0.17.0", - "frame-support 38.0.0", - "frame-system 38.0.0", - "log", - "pallet-asset-tx-payment 38.0.0", - "pallet-assets 40.0.0", - "pallet-authorship 38.0.0", - "pallet-balances 39.0.0", - "pallet-collator-selection 19.0.0", - "pallet-message-queue 41.0.1", - "pallet-xcm 17.0.0", - "parity-scale-codec", - "polkadot-primitives 16.0.0", - "scale-info", - "sp-consensus-aura 0.40.0", - "sp-core 34.0.0", - "sp-io 38.0.0", - "sp-runtime 39.0.2", - "staging-parachain-info 0.17.0", - "staging-xcm 14.2.0", - "staging-xcm-executor 17.0.0", - "substrate-wasm-builder 24.0.1", + "staging-parachain-info", + "staging-xcm", + "staging-xcm-executor", + "substrate-wasm-builder", ] [[package]] @@ -16462,7 +13693,7 @@ version = "0.1.0" dependencies = [ "async-std", "async-trait", - "bp-polkadot-core 0.7.0", + "bp-polkadot-core", "futures", "log", "parity-scale-codec", @@ -16475,63 +13706,30 @@ dependencies = [ name = "parachains-runtimes-test-utils" version = "7.0.0" dependencies = [ - "cumulus-pallet-parachain-system 0.7.0", - "cumulus-pallet-xcmp-queue 0.7.0", - "cumulus-primitives-core 0.7.0", - "cumulus-primitives-parachain-inherent 0.7.0", - "cumulus-test-relay-sproof-builder 0.7.0", - "frame-support 28.0.0", - "frame-system 28.0.0", + "cumulus-pallet-parachain-system", + "cumulus-pallet-xcmp-queue", + "cumulus-primitives-core", + "cumulus-primitives-parachain-inherent", + "cumulus-test-relay-sproof-builder", + "frame-support", + "frame-system", "hex-literal", - "pallet-balances 28.0.0", - "pallet-collator-selection 9.0.0", - "pallet-session 28.0.0", - "pallet-timestamp 27.0.0", - "pallet-xcm 7.0.0", - "parachains-common 7.0.0", + "pallet-balances", + "pallet-collator-selection", + "pallet-session", + "pallet-timestamp", + "pallet-xcm", "parity-scale-codec", - "polkadot-parachain-primitives 6.0.0", - "sp-consensus-aura 0.32.0", + "polkadot-parachain-primitives", + "sp-consensus-aura", "sp-core 28.0.0", "sp-io 30.0.0", "sp-runtime 31.0.1", "sp-tracing 16.0.0", - "staging-parachain-info 0.7.0", - "staging-xcm 7.0.0", - "staging-xcm-executor 7.0.0", - "substrate-wasm-builder 17.0.0", - "xcm-runtime-apis 0.1.0", -] - -[[package]] -name = "parachains-runtimes-test-utils" -version = "17.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "287d2db0a2d19466caa579a69f021bfdc6fa352f382c8395dade58d1d0c6adfe" -dependencies = [ - "cumulus-pallet-parachain-system 0.17.1", - "cumulus-pallet-xcmp-queue 0.17.0", - "cumulus-primitives-core 0.16.0", - "cumulus-primitives-parachain-inherent 0.16.0", - "cumulus-test-relay-sproof-builder 0.16.0", - "frame-support 38.0.0", - "frame-system 38.0.0", - "pallet-balances 39.0.0", - "pallet-collator-selection 19.0.0", - "pallet-session 38.0.0", - "pallet-timestamp 37.0.0", - "pallet-xcm 17.0.0", - "parity-scale-codec", - "polkadot-parachain-primitives 14.0.0", - "sp-consensus-aura 0.40.0", - "sp-core 34.0.0", - "sp-io 38.0.0", - "sp-runtime 39.0.2", - "sp-tracing 17.0.1", - "staging-parachain-info 0.17.0", - "staging-xcm 14.2.0", - "staging-xcm-executor 17.0.0", - "substrate-wasm-builder 24.0.1", + "staging-parachain-info", + "staging-xcm", + "staging-xcm-executor", + "substrate-wasm-builder", ] [[package]] @@ -16600,35 +13798,6 @@ dependencies = [ "syn 1.0.109", ] -[[package]] -name = "parity-util-mem" -version = "0.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d32c34f4f5ca7f9196001c0aba5a1f9a5a12382c8944b8b0f90233282d1e8f8" -dependencies = [ - "cfg-if", - "ethereum-types 0.14.1", - "hashbrown 0.12.3", - "impl-trait-for-tuples", - "lru 0.8.1", - "parity-util-mem-derive", - "parking_lot 0.12.3", - "primitive-types 0.12.2", - "smallvec", - "winapi", -] - -[[package]] -name = "parity-util-mem-derive" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f557c32c6d268a07c921471619c0295f5efad3a0e76d4f97a05c091a51d110b2" -dependencies = [ - "proc-macro2 1.0.86", - "syn 1.0.109", - "synstructure 0.12.6", -] - [[package]] name = "parity-wasm" version = "0.45.0" @@ -16719,7 +13888,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f8ed6a7761f76e3b9f92dfb0a60a6a6477c61024b775147ff0973a02653abaf2" dependencies = [ "digest 0.10.7", - "hmac 0.12.1", "password-hash", ] @@ -16752,213 +13920,211 @@ dependencies = [ name = "penpal-emulated-chain" version = "0.0.0" dependencies = [ - "cumulus-primitives-core 0.7.0", + "cumulus-primitives-core", "emulated-integration-tests-common", - "frame-support 28.0.0", - "parachains-common 7.0.0", + "frame-support", + "parachains-common", "penpal-runtime", "sp-core 28.0.0", - "sp-keyring 31.0.0", - "staging-xcm 7.0.0", + "sp-keyring", + "staging-xcm", ] [[package]] name = "penpal-runtime" version = "0.14.0" dependencies = [ - "assets-common 0.7.0", - "cumulus-pallet-aura-ext 0.7.0", - "cumulus-pallet-parachain-system 0.7.0", - "cumulus-pallet-session-benchmarking 9.0.0", - "cumulus-pallet-xcm 0.7.0", - "cumulus-pallet-xcmp-queue 0.7.0", - "cumulus-primitives-core 0.7.0", - "cumulus-primitives-utility 0.7.0", - "frame-benchmarking 28.0.0", - "frame-executive 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", - "frame-system-benchmarking 28.0.0", - "frame-system-rpc-runtime-api 26.0.0", - "frame-try-runtime 0.34.0", + "assets-common", + "cumulus-pallet-aura-ext", + "cumulus-pallet-parachain-system", + "cumulus-pallet-session-benchmarking", + "cumulus-pallet-xcm", + "cumulus-pallet-xcmp-queue", + "cumulus-primitives-core", + "cumulus-primitives-utility", + "frame-benchmarking", + "frame-executive", + "frame-support", + "frame-system", + "frame-system-benchmarking", + "frame-system-rpc-runtime-api", + "frame-try-runtime", "hex-literal", "log", - "pallet-asset-conversion 10.0.0", - "pallet-asset-tx-payment 28.0.0", - "pallet-assets 29.1.0", - "pallet-aura 27.0.0", - "pallet-authorship 28.0.0", - "pallet-balances 28.0.0", - "pallet-collator-selection 9.0.0", - "pallet-message-queue 31.0.0", - "pallet-session 28.0.0", - "pallet-sudo 28.0.0", - "pallet-timestamp 27.0.0", - "pallet-transaction-payment 28.0.0", - "pallet-transaction-payment-rpc-runtime-api 28.0.0", - "pallet-xcm 7.0.0", - "parachains-common 7.0.0", - "parity-scale-codec", - "polkadot-parachain-primitives 6.0.0", - "polkadot-primitives 7.0.0", - "polkadot-runtime-common 7.0.0", + "pallet-asset-conversion", + "pallet-asset-tx-payment", + "pallet-assets", + "pallet-aura", + "pallet-authorship", + "pallet-balances", + "pallet-collator-selection", + "pallet-message-queue", + "pallet-session", + "pallet-sudo", + "pallet-timestamp", + "pallet-transaction-payment", + "pallet-transaction-payment-rpc-runtime-api", + "pallet-xcm", + "parachains-common", + "parity-scale-codec", + "polkadot-parachain-primitives", + "polkadot-primitives", + "polkadot-runtime-common", "primitive-types 0.12.2", "scale-info", "smallvec", - "snowbridge-router-primitives 0.9.0", + "snowbridge-router-primitives", "sp-api 26.0.0", - "sp-block-builder 26.0.0", - "sp-consensus-aura 0.32.0", + "sp-block-builder", + "sp-consensus-aura", "sp-core 28.0.0", - "sp-genesis-builder 0.8.0", - "sp-inherents 26.0.0", - "sp-offchain 26.0.0", + "sp-genesis-builder", + "sp-inherents", + "sp-offchain", "sp-runtime 31.0.1", - "sp-session 27.0.0", + "sp-session", "sp-storage 19.0.0", - "sp-transaction-pool 26.0.0", + "sp-transaction-pool", "sp-version 29.0.0", - "staging-parachain-info 0.7.0", - "staging-xcm 7.0.0", - "staging-xcm-builder 7.0.0", - "staging-xcm-executor 7.0.0", - "substrate-wasm-builder 17.0.0", - "xcm-runtime-apis 0.1.0", + "staging-parachain-info", + "staging-xcm", + "staging-xcm-builder", + "staging-xcm-executor", + "substrate-wasm-builder", + "xcm-runtime-apis", ] [[package]] name = "people-rococo-emulated-chain" version = "0.1.0" dependencies = [ - "cumulus-primitives-core 0.7.0", + "cumulus-primitives-core", "emulated-integration-tests-common", - "frame-support 28.0.0", - "parachains-common 7.0.0", + "frame-support", + "parachains-common", "people-rococo-runtime", "sp-core 28.0.0", - "testnet-parachains-constants 1.0.0", + "testnet-parachains-constants", ] [[package]] name = "people-rococo-integration-tests" version = "0.1.0" dependencies = [ - "asset-test-utils 7.0.0", + "asset-test-utils", "emulated-integration-tests-common", - "frame-support 28.0.0", - "pallet-balances 28.0.0", - "pallet-identity 29.0.0", - "pallet-message-queue 31.0.0", - "parachains-common 7.0.0", - "parity-scale-codec", - "polkadot-runtime-common 7.0.0", - "rococo-runtime-constants 7.0.0", + "frame-support", + "pallet-balances", + "pallet-identity", + "pallet-message-queue", + "parachains-common", + "parity-scale-codec", + "polkadot-runtime-common", + "rococo-runtime-constants", "rococo-system-emulated-network", "sp-runtime 31.0.1", - "staging-xcm 7.0.0", - "staging-xcm-executor 7.0.0", + "staging-xcm", + "staging-xcm-executor", ] [[package]] name = "people-rococo-runtime" version = "0.1.0" dependencies = [ - "cumulus-pallet-aura-ext 0.7.0", - "cumulus-pallet-parachain-system 0.7.0", - "cumulus-pallet-session-benchmarking 9.0.0", - "cumulus-pallet-xcm 0.7.0", - "cumulus-pallet-xcmp-queue 0.7.0", - "cumulus-primitives-aura 0.7.0", - "cumulus-primitives-core 0.7.0", - "cumulus-primitives-storage-weight-reclaim 1.0.0", - "cumulus-primitives-utility 0.7.0", + "cumulus-pallet-aura-ext", + "cumulus-pallet-parachain-system", + "cumulus-pallet-session-benchmarking", + "cumulus-pallet-xcm", + "cumulus-pallet-xcmp-queue", + "cumulus-primitives-aura", + "cumulus-primitives-core", + "cumulus-primitives-storage-weight-reclaim", + "cumulus-primitives-utility", "enumflags2", - "frame-benchmarking 28.0.0", - "frame-executive 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", - "frame-system-benchmarking 28.0.0", - "frame-system-rpc-runtime-api 26.0.0", - "frame-try-runtime 0.34.0", + "frame-benchmarking", + "frame-executive", + "frame-support", + "frame-system", + "frame-system-benchmarking", + "frame-system-rpc-runtime-api", + "frame-try-runtime", "hex-literal", "log", - "pallet-aura 27.0.0", - "pallet-authorship 28.0.0", - "pallet-balances 28.0.0", - "pallet-collator-selection 9.0.0", - "pallet-identity 29.0.0", - "pallet-message-queue 31.0.0", - "pallet-migrations 1.0.0", - "pallet-multisig 28.0.0", - "pallet-proxy 28.0.0", - "pallet-session 28.0.0", - "pallet-timestamp 27.0.0", - "pallet-transaction-payment 28.0.0", - "pallet-transaction-payment-rpc-runtime-api 28.0.0", - "pallet-utility 28.0.0", - "pallet-xcm 7.0.0", - "pallet-xcm-benchmarks 7.0.0", - "parachains-common 7.0.0", - "parachains-runtimes-test-utils 7.0.0", - "parity-scale-codec", - "polkadot-parachain-primitives 6.0.0", - "polkadot-runtime-common 7.0.0", - "rococo-runtime-constants 7.0.0", + "pallet-aura", + "pallet-authorship", + "pallet-balances", + "pallet-collator-selection", + "pallet-identity", + "pallet-message-queue", + "pallet-migrations", + "pallet-multisig", + "pallet-proxy", + "pallet-session", + "pallet-timestamp", + "pallet-transaction-payment", + "pallet-transaction-payment-rpc-runtime-api", + "pallet-utility", + "pallet-xcm", + "pallet-xcm-benchmarks", + "parachains-common", + "parity-scale-codec", + "polkadot-parachain-primitives", + "polkadot-runtime-common", + "rococo-runtime-constants", "scale-info", "serde", "sp-api 26.0.0", - "sp-block-builder 26.0.0", - "sp-consensus-aura 0.32.0", + "sp-block-builder", + "sp-consensus-aura", "sp-core 28.0.0", - "sp-genesis-builder 0.8.0", - "sp-inherents 26.0.0", - "sp-offchain 26.0.0", + "sp-genesis-builder", + "sp-inherents", + "sp-offchain", "sp-runtime 31.0.1", - "sp-session 27.0.0", + "sp-session", "sp-storage 19.0.0", - "sp-transaction-pool 26.0.0", + "sp-transaction-pool", "sp-version 29.0.0", - "staging-parachain-info 0.7.0", - "staging-xcm 7.0.0", - "staging-xcm-builder 7.0.0", - "staging-xcm-executor 7.0.0", - "substrate-wasm-builder 17.0.0", - "testnet-parachains-constants 1.0.0", - "xcm-runtime-apis 0.1.0", + "staging-parachain-info", + "staging-xcm", + "staging-xcm-builder", + "staging-xcm-executor", + "substrate-wasm-builder", + "testnet-parachains-constants", + "xcm-runtime-apis", ] [[package]] name = "people-westend-emulated-chain" version = "0.1.0" dependencies = [ - "cumulus-primitives-core 0.7.0", + "cumulus-primitives-core", "emulated-integration-tests-common", - "frame-support 28.0.0", - "parachains-common 7.0.0", + "frame-support", + "parachains-common", "people-westend-runtime", "sp-core 28.0.0", - "testnet-parachains-constants 1.0.0", + "testnet-parachains-constants", ] [[package]] name = "people-westend-integration-tests" version = "0.1.0" dependencies = [ - "asset-test-utils 7.0.0", + "asset-test-utils", "emulated-integration-tests-common", - "frame-support 28.0.0", - "pallet-balances 28.0.0", - "pallet-identity 29.0.0", - "pallet-message-queue 31.0.0", - "pallet-xcm 7.0.0", - "parachains-common 7.0.0", - "parity-scale-codec", - "polkadot-runtime-common 7.0.0", - "sp-runtime 31.0.1", - "staging-xcm 7.0.0", - "staging-xcm-executor 7.0.0", - "westend-runtime", - "westend-runtime-constants 7.0.0", + "frame-support", + "pallet-balances", + "pallet-identity", + "pallet-message-queue", + "pallet-xcm", + "parachains-common", + "parity-scale-codec", + "polkadot-runtime-common", + "sp-runtime 31.0.1", + "staging-xcm", + "staging-xcm-executor", + "westend-runtime-constants", "westend-system-emulated-network", ] @@ -16966,68 +14132,67 @@ dependencies = [ name = "people-westend-runtime" version = "0.1.0" dependencies = [ - "cumulus-pallet-aura-ext 0.7.0", - "cumulus-pallet-parachain-system 0.7.0", - "cumulus-pallet-session-benchmarking 9.0.0", - "cumulus-pallet-xcm 0.7.0", - "cumulus-pallet-xcmp-queue 0.7.0", - "cumulus-primitives-aura 0.7.0", - "cumulus-primitives-core 0.7.0", - "cumulus-primitives-storage-weight-reclaim 1.0.0", - "cumulus-primitives-utility 0.7.0", + "cumulus-pallet-aura-ext", + "cumulus-pallet-parachain-system", + "cumulus-pallet-session-benchmarking", + "cumulus-pallet-xcm", + "cumulus-pallet-xcmp-queue", + "cumulus-primitives-aura", + "cumulus-primitives-core", + "cumulus-primitives-storage-weight-reclaim", + "cumulus-primitives-utility", "enumflags2", - "frame-benchmarking 28.0.0", - "frame-executive 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", - "frame-system-benchmarking 28.0.0", - "frame-system-rpc-runtime-api 26.0.0", - "frame-try-runtime 0.34.0", + "frame-benchmarking", + "frame-executive", + "frame-support", + "frame-system", + "frame-system-benchmarking", + "frame-system-rpc-runtime-api", + "frame-try-runtime", "hex-literal", "log", - "pallet-aura 27.0.0", - "pallet-authorship 28.0.0", - "pallet-balances 28.0.0", - "pallet-collator-selection 9.0.0", - "pallet-identity 29.0.0", - "pallet-message-queue 31.0.0", - "pallet-migrations 1.0.0", - "pallet-multisig 28.0.0", - "pallet-proxy 28.0.0", - "pallet-session 28.0.0", - "pallet-timestamp 27.0.0", - "pallet-transaction-payment 28.0.0", - "pallet-transaction-payment-rpc-runtime-api 28.0.0", - "pallet-utility 28.0.0", - "pallet-xcm 7.0.0", - "pallet-xcm-benchmarks 7.0.0", - "parachains-common 7.0.0", - "parachains-runtimes-test-utils 7.0.0", - "parity-scale-codec", - "polkadot-parachain-primitives 6.0.0", - "polkadot-runtime-common 7.0.0", + "pallet-aura", + "pallet-authorship", + "pallet-balances", + "pallet-collator-selection", + "pallet-identity", + "pallet-message-queue", + "pallet-migrations", + "pallet-multisig", + "pallet-proxy", + "pallet-session", + "pallet-timestamp", + "pallet-transaction-payment", + "pallet-transaction-payment-rpc-runtime-api", + "pallet-utility", + "pallet-xcm", + "pallet-xcm-benchmarks", + "parachains-common", + "parity-scale-codec", + "polkadot-parachain-primitives", + "polkadot-runtime-common", "scale-info", "serde", "sp-api 26.0.0", - "sp-block-builder 26.0.0", - "sp-consensus-aura 0.32.0", + "sp-block-builder", + "sp-consensus-aura", "sp-core 28.0.0", - "sp-genesis-builder 0.8.0", - "sp-inherents 26.0.0", - "sp-offchain 26.0.0", + "sp-genesis-builder", + "sp-inherents", + "sp-offchain", "sp-runtime 31.0.1", - "sp-session 27.0.0", + "sp-session", "sp-storage 19.0.0", - "sp-transaction-pool 26.0.0", + "sp-transaction-pool", "sp-version 29.0.0", - "staging-parachain-info 0.7.0", - "staging-xcm 7.0.0", - "staging-xcm-builder 7.0.0", - "staging-xcm-executor 7.0.0", - "substrate-wasm-builder 17.0.0", - "testnet-parachains-constants 1.0.0", - "westend-runtime-constants 7.0.0", - "xcm-runtime-apis 0.1.0", + "staging-parachain-info", + "staging-xcm", + "staging-xcm-builder", + "staging-xcm-executor", + "substrate-wasm-builder", + "testnet-parachains-constants", + "westend-runtime-constants", + "xcm-runtime-apis", ] [[package]] @@ -17087,23 +14252,23 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e1d3afd2628e69da2be385eb6f2fd57c8ac7977ceeff6dc166ff1657b0e386a9" dependencies = [ "fixedbitset", - "indexmap 2.7.0", + "indexmap 2.2.3", ] [[package]] name = "pin-project" -version = "1.1.7" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be57f64e946e500c8ee36ef6331845d40a93055567ec57e8fae13efd33759b95" +checksum = "fda4ed1c6c173e3fc7a83629421152e01d7b1f9b7f65fb301e490e8cfc656422" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.7" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c0f5fad0874fc7abcd4d750e76917eaebbecaa2c20bde22e1dbeeba8beb758c" +checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", @@ -17178,9 +14343,9 @@ version = "6.0.0" dependencies = [ "assert_cmd", "color-eyre", - "nix 0.29.0", + "nix 0.28.0", "polkadot-cli", - "polkadot-core-primitives 7.0.0", + "polkadot-core-primitives", "polkadot-node-core-pvf", "polkadot-node-core-pvf-common", "polkadot-node-core-pvf-execute-worker", @@ -17209,7 +14374,7 @@ dependencies = [ "polkadot-node-subsystem", "polkadot-node-subsystem-test-helpers", "polkadot-node-subsystem-util", - "polkadot-primitives 7.0.0", + "polkadot-primitives", "polkadot-primitives-test-helpers", "rand", "rand_chacha", @@ -17217,7 +14382,7 @@ dependencies = [ "sc-keystore", "schnorrkel 0.11.4", "sp-application-crypto 30.0.0", - "sp-authority-discovery 26.0.0", + "sp-authority-discovery", "sp-core 28.0.0", "sp-tracing 16.0.0", "tracing-gum", @@ -17237,13 +14402,13 @@ dependencies = [ "polkadot-node-subsystem", "polkadot-node-subsystem-test-helpers", "polkadot-node-subsystem-util", - "polkadot-primitives 7.0.0", + "polkadot-primitives", "rand", "rand_chacha", "sp-application-crypto 30.0.0", - "sp-authority-discovery 26.0.0", + "sp-authority-discovery", "sp-core 28.0.0", - "sp-keyring 31.0.0", + "sp-keyring", "sp-keystore 0.34.0", "sp-tracing 16.0.0", "tracing-gum", @@ -17254,7 +14419,7 @@ name = "polkadot-availability-distribution" version = "7.0.0" dependencies = [ "assert_matches", - "derive_more 0.99.17", + "derive_more", "fatality", "futures", "futures-timer", @@ -17265,7 +14430,7 @@ dependencies = [ "polkadot-node-subsystem", "polkadot-node-subsystem-test-helpers", "polkadot-node-subsystem-util", - "polkadot-primitives 7.0.0", + "polkadot-primitives", "polkadot-primitives-test-helpers", "polkadot-subsystem-bench", "rand", @@ -17273,7 +14438,7 @@ dependencies = [ "sc-network", "schnellru", "sp-core 28.0.0", - "sp-keyring 31.0.0", + "sp-keyring", "sp-keystore 0.34.0", "sp-tracing 16.0.0", "thiserror", @@ -17297,7 +14462,7 @@ dependencies = [ "polkadot-node-subsystem", "polkadot-node-subsystem-test-helpers", "polkadot-node-subsystem-util", - "polkadot-primitives 7.0.0", + "polkadot-primitives", "polkadot-primitives-test-helpers", "polkadot-subsystem-bench", "rand", @@ -17306,7 +14471,7 @@ dependencies = [ "schnellru", "sp-application-crypto 30.0.0", "sp-core 28.0.0", - "sp-keyring 31.0.0", + "sp-keyring", "sp-tracing 16.0.0", "thiserror", "tokio", @@ -17345,7 +14510,7 @@ dependencies = [ "sc-tracing", "sp-core 28.0.0", "sp-io 30.0.0", - "sp-keyring 31.0.0", + "sp-keyring", "sp-maybe-compressed-blob 11.0.0", "sp-runtime 31.0.1", "substrate-build-script-utils", @@ -17367,14 +14532,14 @@ dependencies = [ "polkadot-node-subsystem", "polkadot-node-subsystem-test-helpers", "polkadot-node-subsystem-util", - "polkadot-primitives 7.0.0", + "polkadot-primitives", "polkadot-primitives-test-helpers", "rstest", "sc-keystore", "sc-network", "schnellru", "sp-core 28.0.0", - "sp-keyring 31.0.0", + "sp-keyring", "sp-keystore 0.34.0", "sp-runtime 31.0.1", "sp-tracing 16.0.0", @@ -17393,18 +14558,6 @@ dependencies = [ "sp-runtime 31.0.1", ] -[[package]] -name = "polkadot-core-primitives" -version = "15.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2900d3b857e34c480101618a950c3a4fbcddc8c0d50573d48553376185908b8" -dependencies = [ - "parity-scale-codec", - "scale-info", - "sp-core 34.0.0", - "sp-runtime 39.0.2", -] - [[package]] name = "polkadot-dispute-distribution" version = "7.0.0" @@ -17412,11 +14565,11 @@ dependencies = [ "assert_matches", "async-channel 1.9.0", "async-trait", - "derive_more 0.99.17", + "derive_more", "fatality", "futures", "futures-timer", - "indexmap 2.7.0", + "indexmap 2.2.3", "parity-scale-codec", "polkadot-erasure-coding", "polkadot-node-network-protocol", @@ -17424,13 +14577,13 @@ dependencies = [ "polkadot-node-subsystem", "polkadot-node-subsystem-test-helpers", "polkadot-node-subsystem-util", - "polkadot-primitives 7.0.0", + "polkadot-primitives", "polkadot-primitives-test-helpers", "sc-keystore", "sc-network", "schnellru", "sp-application-crypto 30.0.0", - "sp-keyring 31.0.0", + "sp-keyring", "sp-keystore 0.34.0", "sp-tracing 16.0.0", "thiserror", @@ -17444,7 +14597,7 @@ dependencies = [ "criterion", "parity-scale-codec", "polkadot-node-primitives", - "polkadot-primitives 7.0.0", + "polkadot-primitives", "quickcheck", "reed-solomon-novelpoly", "sp-core 28.0.0", @@ -17465,18 +14618,18 @@ dependencies = [ "polkadot-node-subsystem", "polkadot-node-subsystem-test-helpers", "polkadot-node-subsystem-util", - "polkadot-primitives 7.0.0", + "polkadot-primitives", "quickcheck", "rand", "rand_chacha", "sc-network", "sc-network-common", "sp-application-crypto 30.0.0", - "sp-authority-discovery 26.0.0", - "sp-consensus-babe 0.32.0", + "sp-authority-discovery", + "sp-consensus-babe", "sp-core 28.0.0", "sp-crypto-hashing 0.1.0", - "sp-keyring 31.0.0", + "sp-keyring", "sp-keystore 0.34.0", "sp-tracing 16.0.0", "tracing-gum", @@ -17501,12 +14654,12 @@ dependencies = [ "polkadot-node-subsystem-test-helpers", "polkadot-node-subsystem-util", "polkadot-overseer", - "polkadot-primitives 7.0.0", + "polkadot-primitives", "polkadot-primitives-test-helpers", "sc-network", "sp-consensus", "sp-core 28.0.0", - "sp-keyring 31.0.0", + "sp-keyring", "thiserror", "tracing-gum", ] @@ -17523,12 +14676,12 @@ dependencies = [ "polkadot-node-subsystem", "polkadot-node-subsystem-test-helpers", "polkadot-node-subsystem-util", - "polkadot-primitives 7.0.0", + "polkadot-primitives", "polkadot-primitives-test-helpers", "rstest", "schnellru", "sp-core 28.0.0", - "sp-keyring 31.0.0", + "sp-keyring", "sp-maybe-compressed-blob 11.0.0", "thiserror", "tracing-gum", @@ -17541,7 +14694,7 @@ dependencies = [ "assert_matches", "async-trait", "bitvec", - "derive_more 0.99.17", + "derive_more", "futures", "futures-timer", "itertools 0.11.0", @@ -17556,7 +14709,7 @@ dependencies = [ "polkadot-node-subsystem-test-helpers", "polkadot-node-subsystem-util", "polkadot-overseer", - "polkadot-primitives 7.0.0", + "polkadot-primitives", "polkadot-primitives-test-helpers", "polkadot-subsystem-bench", "rand", @@ -17567,10 +14720,10 @@ dependencies = [ "schnorrkel 0.11.4", "sp-application-crypto 30.0.0", "sp-consensus", - "sp-consensus-babe 0.32.0", - "sp-consensus-slots 0.32.0", + "sp-consensus-babe", + "sp-consensus-slots", "sp-core 28.0.0", - "sp-keyring 31.0.0", + "sp-keyring", "sp-keystore 0.34.0", "sp-runtime 31.0.1", "sp-tracing 16.0.0", @@ -17599,7 +14752,7 @@ dependencies = [ "polkadot-node-subsystem-test-helpers", "polkadot-node-subsystem-util", "polkadot-overseer", - "polkadot-primitives 7.0.0", + "polkadot-primitives", "polkadot-primitives-test-helpers", "polkadot-subsystem-bench", "rand", @@ -17609,10 +14762,10 @@ dependencies = [ "schnorrkel 0.11.4", "sp-application-crypto 30.0.0", "sp-consensus", - "sp-consensus-babe 0.32.0", - "sp-consensus-slots 0.32.0", + "sp-consensus-babe", + "sp-consensus-slots", "sp-core 28.0.0", - "sp-keyring 31.0.0", + "sp-keyring", "sp-keystore 0.34.0", "sp-runtime 31.0.1", "sp-tracing 16.0.0", @@ -17639,11 +14792,11 @@ dependencies = [ "polkadot-node-subsystem-test-helpers", "polkadot-node-subsystem-util", "polkadot-overseer", - "polkadot-primitives 7.0.0", + "polkadot-primitives", "polkadot-primitives-test-helpers", "sp-consensus", "sp-core 28.0.0", - "sp-keyring 31.0.0", + "sp-keyring", "sp-tracing 16.0.0", "thiserror", "tracing-gum", @@ -17662,8 +14815,8 @@ dependencies = [ "polkadot-node-subsystem", "polkadot-node-subsystem-test-helpers", "polkadot-node-subsystem-util", - "polkadot-parachain-primitives 6.0.0", - "polkadot-primitives 7.0.0", + "polkadot-parachain-primitives", + "polkadot-primitives", "polkadot-primitives-test-helpers", "polkadot-statement-table", "rstest", @@ -17671,7 +14824,7 @@ dependencies = [ "schnellru", "sp-application-crypto 30.0.0", "sp-core 28.0.0", - "sp-keyring 31.0.0", + "sp-keyring", "sp-keystore 0.34.0", "sp-tracing 16.0.0", "thiserror", @@ -17686,7 +14839,7 @@ dependencies = [ "polkadot-node-subsystem", "polkadot-node-subsystem-test-helpers", "polkadot-node-subsystem-util", - "polkadot-primitives 7.0.0", + "polkadot-primitives", "polkadot-primitives-test-helpers", "sp-keystore 0.34.0", "thiserror", @@ -17710,13 +14863,13 @@ dependencies = [ "polkadot-node-subsystem-test-helpers", "polkadot-node-subsystem-util", "polkadot-overseer", - "polkadot-parachain-primitives 6.0.0", - "polkadot-primitives 7.0.0", + "polkadot-parachain-primitives", + "polkadot-primitives", "polkadot-primitives-test-helpers", "rstest", "sp-application-crypto 30.0.0", "sp-core 28.0.0", - "sp-keyring 31.0.0", + "sp-keyring", "sp-keystore 0.34.0", "sp-maybe-compressed-blob 11.0.0", "tracing-gum", @@ -17734,7 +14887,7 @@ dependencies = [ "polkadot-node-subsystem", "polkadot-node-subsystem-test-helpers", "polkadot-node-subsystem-types", - "polkadot-primitives 7.0.0", + "polkadot-primitives", "sc-client-api", "sc-consensus-babe", "sp-blockchain", @@ -17757,7 +14910,7 @@ dependencies = [ "polkadot-node-subsystem", "polkadot-node-subsystem-test-helpers", "polkadot-node-subsystem-util", - "polkadot-primitives 7.0.0", + "polkadot-primitives", "sp-core 28.0.0", "thiserror", "tracing-gum", @@ -17778,13 +14931,13 @@ dependencies = [ "polkadot-node-subsystem", "polkadot-node-subsystem-test-helpers", "polkadot-node-subsystem-util", - "polkadot-primitives 7.0.0", + "polkadot-primitives", "polkadot-primitives-test-helpers", "sc-keystore", "schnellru", "sp-application-crypto 30.0.0", "sp-core 28.0.0", - "sp-keyring 31.0.0", + "sp-keyring", "sp-keystore 0.34.0", "sp-tracing 16.0.0", "thiserror", @@ -17800,9 +14953,9 @@ dependencies = [ "futures-timer", "polkadot-node-subsystem", "polkadot-overseer", - "polkadot-primitives 7.0.0", + "polkadot-primitives", "sp-blockchain", - "sp-inherents 26.0.0", + "sp-inherents", "thiserror", "tracing-gum", ] @@ -17817,7 +14970,7 @@ dependencies = [ "polkadot-node-subsystem", "polkadot-node-subsystem-test-helpers", "polkadot-node-subsystem-util", - "polkadot-primitives 7.0.0", + "polkadot-primitives", "polkadot-primitives-test-helpers", "rand", "rstest", @@ -17839,7 +14992,7 @@ dependencies = [ "polkadot-node-subsystem", "polkadot-node-subsystem-test-helpers", "polkadot-node-subsystem-util", - "polkadot-primitives 7.0.0", + "polkadot-primitives", "polkadot-primitives-test-helpers", "rstest", "schnellru", @@ -17866,7 +15019,7 @@ dependencies = [ "libc", "parity-scale-codec", "pin-project", - "polkadot-core-primitives 7.0.0", + "polkadot-core-primitives", "polkadot-node-core-pvf", "polkadot-node-core-pvf-common", "polkadot-node-core-pvf-execute-worker", @@ -17875,14 +15028,13 @@ dependencies = [ "polkadot-node-primitives", "polkadot-node-subsystem", "polkadot-node-subsystem-test-helpers", - "polkadot-parachain-primitives 6.0.0", - "polkadot-primitives 7.0.0", + "polkadot-parachain-primitives", + "polkadot-primitives", "procfs", "rand", "rococo-runtime", "rusty-fork", "sc-sysinfo", - "sc-tracing", "slotmap", "sp-core 28.0.0", "sp-maybe-compressed-blob 11.0.0", @@ -17906,12 +15058,12 @@ dependencies = [ "polkadot-node-subsystem-test-helpers", "polkadot-node-subsystem-util", "polkadot-overseer", - "polkadot-primitives 7.0.0", + "polkadot-primitives", "polkadot-primitives-test-helpers", "sc-keystore", "sp-application-crypto 30.0.0", "sp-core 28.0.0", - "sp-keyring 31.0.0", + "sp-keyring", "sp-keystore 0.34.0", "sp-runtime 31.0.1", "thiserror", @@ -17927,10 +15079,10 @@ dependencies = [ "futures", "landlock", "libc", - "nix 0.29.0", + "nix 0.28.0", "parity-scale-codec", - "polkadot-parachain-primitives 6.0.0", - "polkadot-primitives 7.0.0", + "polkadot-parachain-primitives", + "polkadot-primitives", "sc-executor 0.32.0", "sc-executor-common 0.29.0", "sc-executor-wasmtime 0.29.0", @@ -17952,12 +15104,12 @@ dependencies = [ "cfg-if", "cpu-time", "libc", - "nix 0.29.0", + "nix 0.28.0", "parity-scale-codec", "polkadot-node-core-pvf-common", "polkadot-node-primitives", - "polkadot-parachain-primitives 6.0.0", - "polkadot-primitives 7.0.0", + "polkadot-parachain-primitives", + "polkadot-primitives", "sp-maybe-compressed-blob 11.0.0", "tracing-gum", ] @@ -17970,11 +15122,11 @@ dependencies = [ "cfg-if", "criterion", "libc", - "nix 0.29.0", + "nix 0.28.0", "parity-scale-codec", "polkadot-node-core-pvf-common", "polkadot-node-primitives", - "polkadot-primitives 7.0.0", + "polkadot-primitives", "rayon", "rococo-runtime", "sc-executor-common 0.29.0", @@ -17997,13 +15149,13 @@ dependencies = [ "polkadot-node-subsystem", "polkadot-node-subsystem-test-helpers", "polkadot-node-subsystem-types", - "polkadot-primitives 7.0.0", + "polkadot-primitives", "polkadot-primitives-test-helpers", "schnellru", "sp-api 26.0.0", - "sp-consensus-babe 0.32.0", + "sp-consensus-babe", "sp-core 28.0.0", - "sp-keyring 31.0.0", + "sp-keyring", "tracing-gum", ] @@ -18020,14 +15172,14 @@ dependencies = [ "hyper-util", "log", "parity-scale-codec", - "polkadot-primitives 7.0.0", + "polkadot-primitives", "polkadot-test-service", "prioritized-metered-channel", "prometheus-parse", "sc-cli", "sc-service", "sc-tracing", - "sp-keyring 31.0.0", + "sp-keyring", "substrate-prometheus-endpoint", "substrate-test-utils", "tempfile", @@ -18042,13 +15194,13 @@ dependencies = [ "async-channel 1.9.0", "async-trait", "bitvec", - "derive_more 0.99.17", + "derive_more", "fatality", "futures", "hex", "parity-scale-codec", "polkadot-node-primitives", - "polkadot-primitives 7.0.0", + "polkadot-primitives", "rand", "rand_chacha", "sc-authority-discovery", @@ -18070,14 +15222,14 @@ dependencies = [ "futures-timer", "parity-scale-codec", "polkadot-erasure-coding", - "polkadot-parachain-primitives 6.0.0", - "polkadot-primitives 7.0.0", + "polkadot-parachain-primitives", + "polkadot-primitives", "sc-keystore", "schnorrkel 0.11.4", "serde", "sp-application-crypto 30.0.0", - "sp-consensus-babe 0.32.0", - "sp-consensus-slots 0.32.0", + "sp-consensus-babe", + "sp-consensus-slots", "sp-core 28.0.0", "sp-keystore 0.34.0", "sp-maybe-compressed-blob 11.0.0", @@ -18105,13 +15257,13 @@ dependencies = [ "polkadot-node-primitives", "polkadot-node-subsystem", "polkadot-node-subsystem-util", - "polkadot-primitives 7.0.0", + "polkadot-primitives", "sc-client-api", "sc-keystore", "sc-utils", "sp-application-crypto 30.0.0", "sp-core 28.0.0", - "sp-keyring 31.0.0", + "sp-keyring", "sp-keystore 0.34.0", ] @@ -18121,13 +15273,13 @@ version = "7.0.0" dependencies = [ "async-trait", "bitvec", - "derive_more 0.99.17", + "derive_more", "fatality", "futures", "orchestra", "polkadot-node-network-protocol", "polkadot-node-primitives", - "polkadot-primitives 7.0.0", + "polkadot-primitives", "polkadot-statement-table", "sc-client-api", "sc-network", @@ -18135,9 +15287,9 @@ dependencies = [ "sc-transaction-pool-api", "smallvec", "sp-api 26.0.0", - "sp-authority-discovery 26.0.0", + "sp-authority-discovery", "sp-blockchain", - "sp-consensus-babe 0.32.0", + "sp-consensus-babe", "sp-runtime 31.0.1", "substrate-prometheus-endpoint", "thiserror", @@ -18149,7 +15301,7 @@ version = "7.0.0" dependencies = [ "assert_matches", "async-trait", - "derive_more 0.99.17", + "derive_more", "fatality", "futures", "futures-channel", @@ -18170,7 +15322,7 @@ dependencies = [ "polkadot-node-subsystem-test-helpers", "polkadot-node-subsystem-types", "polkadot-overseer", - "polkadot-primitives 7.0.0", + "polkadot-primitives", "polkadot-primitives-test-helpers", "prioritized-metered-channel", "rand", @@ -18209,28 +15361,27 @@ dependencies = [ "cumulus-client-consensus-relay-chain", "cumulus-client-parachain-inherent", "cumulus-client-service", - "cumulus-primitives-aura 0.7.0", - "cumulus-primitives-core 0.7.0", + "cumulus-primitives-aura", + "cumulus-primitives-core", "cumulus-relay-chain-interface", - "cumulus-test-runtime", "docify", - "frame-benchmarking 28.0.0", + "frame-benchmarking", "frame-benchmarking-cli", - "frame-support 28.0.0", - "frame-system-rpc-runtime-api 26.0.0", - "frame-try-runtime 0.34.0", + "frame-support", + "frame-system-rpc-runtime-api", + "frame-try-runtime", "futures", "futures-timer", - "jsonrpsee", + "jsonrpsee 0.24.3", "log", - "nix 0.29.0", - "pallet-transaction-payment 28.0.0", + "nix 0.28.0", + "pallet-transaction-payment", "pallet-transaction-payment-rpc", - "pallet-transaction-payment-rpc-runtime-api 28.0.0", - "parachains-common 7.0.0", + "pallet-transaction-payment-rpc-runtime-api", + "parachains-common", "parity-scale-codec", "polkadot-cli", - "polkadot-primitives 7.0.0", + "polkadot-primitives", "sc-basic-authorship", "sc-chain-spec", "sc-cli", @@ -18241,35 +15392,29 @@ dependencies = [ "sc-executor 0.32.0", "sc-network", "sc-rpc", - "sc-runtime-utilities", "sc-service", "sc-sysinfo", "sc-telemetry", "sc-tracing", "sc-transaction-pool", - "scale-info", "serde", "serde_json", "sp-api 26.0.0", - "sp-block-builder 26.0.0", - "sp-consensus", - "sp-consensus-aura 0.32.0", + "sp-block-builder", + "sp-consensus-aura", "sp-core 28.0.0", - "sp-crypto-hashing 0.1.0", - "sp-genesis-builder 0.8.0", - "sp-inherents 26.0.0", + "sp-genesis-builder", + "sp-inherents", "sp-keystore 0.34.0", "sp-runtime 31.0.1", - "sp-session 27.0.0", - "sp-storage 19.0.0", - "sp-timestamp 26.0.0", - "sp-transaction-pool 26.0.0", + "sp-session", + "sp-timestamp", + "sp-transaction-pool", "sp-version 29.0.0", "sp-weights 27.0.0", "substrate-frame-rpc-system", "substrate-prometheus-endpoint", "substrate-state-trie-migration-rpc", - "subxt-metadata", "tokio", "wait-timeout", ] @@ -18290,7 +15435,7 @@ dependencies = [ "polkadot-node-primitives", "polkadot-node-subsystem-test-helpers", "polkadot-node-subsystem-types", - "polkadot-primitives 7.0.0", + "polkadot-primitives", "polkadot-primitives-test-helpers", "prioritized-metered-channel", "sc-client-api", @@ -18313,11 +15458,11 @@ dependencies = [ "contracts-rococo-runtime", "coretime-rococo-runtime", "coretime-westend-runtime", - "cumulus-primitives-core 0.7.0", + "cumulus-primitives-core", "glutton-westend-runtime", "hex-literal", "log", - "parachains-common 7.0.0", + "parachains-common", "penpal-runtime", "people-rococo-runtime", "people-westend-runtime", @@ -18329,9 +15474,9 @@ dependencies = [ "serde", "serde_json", "sp-core 28.0.0", - "sp-genesis-builder 0.8.0", - "sp-keyring 31.0.0", - "staging-xcm 7.0.0", + "sp-genesis-builder", + "sp-keyring", + "staging-xcm", "substrate-build-script-utils", ] @@ -18340,9 +15485,9 @@ name = "polkadot-parachain-primitives" version = "6.0.0" dependencies = [ "bounded-collections", - "derive_more 0.99.17", + "derive_more", "parity-scale-codec", - "polkadot-core-primitives 7.0.0", + "polkadot-core-primitives", "scale-info", "serde", "sp-core 28.0.0", @@ -18350,23 +15495,6 @@ dependencies = [ "sp-weights 27.0.0", ] -[[package]] -name = "polkadot-parachain-primitives" -version = "14.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52b5648a2e8ce1f9a0f8c41c38def670cefd91932cd793468e1a5b0b0b4e4af1" -dependencies = [ - "bounded-collections", - "derive_more 0.99.17", - "parity-scale-codec", - "polkadot-core-primitives 15.0.0", - "scale-info", - "serde", - "sp-core 34.0.0", - "sp-runtime 39.0.2", - "sp-weights 31.0.0", -] - [[package]] name = "polkadot-primitives" version = "7.0.0" @@ -18375,89 +15503,35 @@ dependencies = [ "hex-literal", "log", "parity-scale-codec", - "polkadot-core-primitives 7.0.0", - "polkadot-parachain-primitives 6.0.0", + "polkadot-core-primitives", + "polkadot-parachain-primitives", "polkadot-primitives-test-helpers", "scale-info", "serde", "sp-api 26.0.0", "sp-application-crypto 30.0.0", "sp-arithmetic 23.0.0", - "sp-authority-discovery 26.0.0", - "sp-consensus-slots 0.32.0", + "sp-authority-discovery", + "sp-consensus-slots", "sp-core 28.0.0", - "sp-inherents 26.0.0", + "sp-inherents", "sp-io 30.0.0", "sp-keystore 0.34.0", "sp-runtime 31.0.1", - "sp-staking 26.0.0", + "sp-staking", "sp-std 14.0.0", "thiserror", ] -[[package]] -name = "polkadot-primitives" -version = "15.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b57bc055fa389372ec5fc0001b99aeffd50f3fd379280ce572d935189bb58dd8" -dependencies = [ - "bitvec", - "hex-literal", - "log", - "parity-scale-codec", - "polkadot-core-primitives 15.0.0", - "polkadot-parachain-primitives 14.0.0", - "scale-info", - "serde", - "sp-api 34.0.0", - "sp-application-crypto 38.0.0", - "sp-arithmetic 26.0.0", - "sp-authority-discovery 34.0.0", - "sp-consensus-slots 0.40.1", - "sp-core 34.0.0", - "sp-inherents 34.0.0", - "sp-io 38.0.0", - "sp-keystore 0.40.0", - "sp-runtime 39.0.2", - "sp-staking 34.0.0", -] - -[[package]] -name = "polkadot-primitives" -version = "16.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6bb20b75d33212150242d39890d7ededab55f1084160c337f15d0eb8ca8c3ad4" -dependencies = [ - "bitvec", - "hex-literal", - "log", - "parity-scale-codec", - "polkadot-core-primitives 15.0.0", - "polkadot-parachain-primitives 14.0.0", - "scale-info", - "serde", - "sp-api 34.0.0", - "sp-application-crypto 38.0.0", - "sp-arithmetic 26.0.0", - "sp-authority-discovery 34.0.0", - "sp-consensus-slots 0.40.1", - "sp-core 34.0.0", - "sp-inherents 34.0.0", - "sp-io 38.0.0", - "sp-keystore 0.40.0", - "sp-runtime 39.0.2", - "sp-staking 36.0.0", -] - [[package]] name = "polkadot-primitives-test-helpers" version = "1.0.0" dependencies = [ - "polkadot-primitives 7.0.0", + "polkadot-primitives", "rand", "sp-application-crypto 30.0.0", "sp-core 28.0.0", - "sp-keyring 31.0.0", + "sp-keyring", "sp-runtime 31.0.1", ] @@ -18465,10 +15539,10 @@ dependencies = [ name = "polkadot-rpc" version = "7.0.0" dependencies = [ - "jsonrpsee", + "jsonrpsee 0.24.3", "mmr-rpc", "pallet-transaction-payment-rpc", - "polkadot-primitives 7.0.0", + "polkadot-primitives", "sc-chain-spec", "sc-client-api", "sc-consensus-babe", @@ -18484,11 +15558,11 @@ dependencies = [ "sc-transaction-pool-api", "sp-api 26.0.0", "sp-application-crypto 30.0.0", - "sp-block-builder 26.0.0", + "sp-block-builder", "sp-blockchain", "sp-consensus", - "sp-consensus-babe 0.32.0", - "sp-consensus-beefy 13.0.0", + "sp-consensus-babe", + "sp-consensus-beefy", "sp-keystore 0.34.0", "sp-runtime 31.0.1", "substrate-frame-rpc-system", @@ -18500,128 +15574,65 @@ name = "polkadot-runtime-common" version = "7.0.0" dependencies = [ "bitvec", - "frame-benchmarking 28.0.0", - "frame-election-provider-support 28.0.0", - "frame-support 28.0.0", + "frame-benchmarking", + "frame-election-provider-support", + "frame-support", "frame-support-test", - "frame-system 28.0.0", + "frame-system", "hex-literal", "impl-trait-for-tuples", "libsecp256k1", "log", - "pallet-asset-rate 7.0.0", - "pallet-authorship 28.0.0", - "pallet-babe 28.0.0", - "pallet-balances 28.0.0", - "pallet-broker 0.6.0", - "pallet-election-provider-multi-phase 27.0.0", - "pallet-fast-unstake 27.0.0", - "pallet-identity 29.0.0", - "pallet-session 28.0.0", - "pallet-staking 28.0.0", - "pallet-staking-reward-fn 19.0.0", - "pallet-timestamp 27.0.0", - "pallet-transaction-payment 28.0.0", - "pallet-treasury 27.0.0", - "pallet-vesting 28.0.0", - "parity-scale-codec", - "polkadot-primitives 7.0.0", + "pallet-asset-rate", + "pallet-authorship", + "pallet-babe", + "pallet-balances", + "pallet-broker", + "pallet-election-provider-multi-phase", + "pallet-fast-unstake", + "pallet-identity", + "pallet-session", + "pallet-staking", + "pallet-staking-reward-fn", + "pallet-timestamp", + "pallet-transaction-payment", + "pallet-treasury", + "pallet-vesting", + "parity-scale-codec", + "polkadot-primitives", "polkadot-primitives-test-helpers", - "polkadot-runtime-parachains 7.0.0", + "polkadot-runtime-parachains", "rustc-hex", "scale-info", "serde", "serde_derive", "serde_json", - "slot-range-helper 7.0.0", + "slot-range-helper", "sp-api 26.0.0", "sp-core 28.0.0", - "sp-inherents 26.0.0", + "sp-inherents", "sp-io 30.0.0", - "sp-keyring 31.0.0", + "sp-keyring", "sp-keystore 0.34.0", - "sp-npos-elections 26.0.0", + "sp-npos-elections", "sp-runtime 31.0.1", - "sp-session 27.0.0", - "sp-staking 26.0.0", - "staging-xcm 7.0.0", - "staging-xcm-builder 7.0.0", - "staging-xcm-executor 7.0.0", - "static_assertions", -] - -[[package]] -name = "polkadot-runtime-common" -version = "17.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc15154ba5ca55d323fcf7af0f5dcd39d58dcb4dfac3d9b30404840a6d8bbde4" -dependencies = [ - "bitvec", - "frame-benchmarking 38.0.0", - "frame-election-provider-support 38.0.0", - "frame-support 38.0.0", - "frame-system 38.0.0", - "impl-trait-for-tuples", - "libsecp256k1", - "log", - "pallet-asset-rate 17.0.0", - "pallet-authorship 38.0.0", - "pallet-balances 39.0.0", - "pallet-broker 0.17.0", - "pallet-election-provider-multi-phase 37.0.0", - "pallet-fast-unstake 37.0.0", - "pallet-identity 38.0.0", - "pallet-session 38.0.0", - "pallet-staking 38.0.0", - "pallet-staking-reward-fn 22.0.0", - "pallet-timestamp 37.0.0", - "pallet-transaction-payment 38.0.0", - "pallet-treasury 37.0.0", - "pallet-vesting 38.0.0", - "parity-scale-codec", - "polkadot-primitives 16.0.0", - "polkadot-runtime-parachains 17.0.1", - "rustc-hex", - "scale-info", - "serde", - "serde_derive", - "slot-range-helper 15.0.0", - "sp-api 34.0.0", - "sp-core 34.0.0", - "sp-inherents 34.0.0", - "sp-io 38.0.0", - "sp-npos-elections 34.0.0", - "sp-runtime 39.0.2", - "sp-session 36.0.0", - "sp-staking 36.0.0", - "staging-xcm 14.2.0", - "staging-xcm-builder 17.0.1", - "staging-xcm-executor 17.0.0", + "sp-session", + "sp-staking", + "staging-xcm", + "staging-xcm-builder", + "staging-xcm-executor", "static_assertions", ] [[package]] -name = "polkadot-runtime-metrics" -version = "7.0.0" -dependencies = [ - "bs58", - "frame-benchmarking 28.0.0", - "parity-scale-codec", - "polkadot-primitives 7.0.0", - "sp-tracing 16.0.0", -] - -[[package]] -name = "polkadot-runtime-metrics" -version = "17.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c306f1ace7644a24de860479f92cf8d6467393bb0c9b0777c57e2d42c9d452a" +name = "polkadot-runtime-metrics" +version = "7.0.0" dependencies = [ "bs58", - "frame-benchmarking 38.0.0", + "frame-benchmarking", "parity-scale-codec", - "polkadot-primitives 16.0.0", - "sp-tracing 17.0.1", + "polkadot-primitives", + "sp-tracing 16.0.0", ] [[package]] @@ -18631,32 +15642,32 @@ dependencies = [ "assert_matches", "bitflags 1.3.2", "bitvec", - "derive_more 0.99.17", - "frame-benchmarking 28.0.0", - "frame-support 28.0.0", + "derive_more", + "frame-benchmarking", + "frame-support", "frame-support-test", - "frame-system 28.0.0", + "frame-system", "futures", "hex-literal", "impl-trait-for-tuples", "log", - "pallet-authority-discovery 28.0.0", - "pallet-authorship 28.0.0", - "pallet-babe 28.0.0", - "pallet-balances 28.0.0", - "pallet-broker 0.6.0", - "pallet-message-queue 31.0.0", - "pallet-mmr 27.0.0", - "pallet-session 28.0.0", - "pallet-staking 28.0.0", - "pallet-timestamp 27.0.0", - "pallet-vesting 28.0.0", - "parity-scale-codec", - "polkadot-core-primitives 7.0.0", - "polkadot-parachain-primitives 6.0.0", - "polkadot-primitives 7.0.0", + "pallet-authority-discovery", + "pallet-authorship", + "pallet-babe", + "pallet-balances", + "pallet-broker", + "pallet-message-queue", + "pallet-mmr", + "pallet-session", + "pallet-staking", + "pallet-timestamp", + "pallet-vesting", + "parity-scale-codec", + "polkadot-core-primitives", + "polkadot-parachain-primitives", + "polkadot-primitives", "polkadot-primitives-test-helpers", - "polkadot-runtime-metrics 7.0.0", + "polkadot-runtime-metrics", "rand", "rand_chacha", "rstest", @@ -18669,90 +15680,41 @@ dependencies = [ "sp-arithmetic 23.0.0", "sp-core 28.0.0", "sp-crypto-hashing 0.1.0", - "sp-inherents 26.0.0", + "sp-inherents", "sp-io 30.0.0", - "sp-keyring 31.0.0", + "sp-keyring", "sp-keystore 0.34.0", "sp-runtime 31.0.1", - "sp-session 27.0.0", - "sp-staking 26.0.0", + "sp-session", + "sp-staking", "sp-std 14.0.0", "sp-tracing 16.0.0", - "staging-xcm 7.0.0", - "staging-xcm-executor 7.0.0", + "staging-xcm", + "staging-xcm-executor", "static_assertions", "thousands", ] -[[package]] -name = "polkadot-runtime-parachains" -version = "17.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd58e3a17e5df678f5737b018cbfec603af2c93bec56bbb9f8fb8b2b017b54b1" -dependencies = [ - "bitflags 1.3.2", - "bitvec", - "derive_more 0.99.17", - "frame-benchmarking 38.0.0", - "frame-support 38.0.0", - "frame-system 38.0.0", - "impl-trait-for-tuples", - "log", - "pallet-authority-discovery 38.0.0", - "pallet-authorship 38.0.0", - "pallet-babe 38.0.0", - "pallet-balances 39.0.0", - "pallet-broker 0.17.0", - "pallet-message-queue 41.0.1", - "pallet-mmr 38.0.0", - "pallet-session 38.0.0", - "pallet-staking 38.0.0", - "pallet-timestamp 37.0.0", - "pallet-vesting 38.0.0", - "parity-scale-codec", - "polkadot-core-primitives 15.0.0", - "polkadot-parachain-primitives 14.0.0", - "polkadot-primitives 16.0.0", - "polkadot-runtime-metrics 17.0.0", - "rand", - "rand_chacha", - "scale-info", - "serde", - "sp-api 34.0.0", - "sp-application-crypto 38.0.0", - "sp-arithmetic 26.0.0", - "sp-core 34.0.0", - "sp-inherents 34.0.0", - "sp-io 38.0.0", - "sp-keystore 0.40.0", - "sp-runtime 39.0.2", - "sp-session 36.0.0", - "sp-staking 36.0.0", - "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "staging-xcm 14.2.0", - "staging-xcm-executor 17.0.0", -] - [[package]] name = "polkadot-sdk" version = "0.1.0" dependencies = [ - "asset-test-utils 7.0.0", - "assets-common 0.7.0", - "binary-merkle-tree 13.0.0", - "bp-header-chain 0.7.0", - "bp-messages 0.7.0", - "bp-parachains 0.7.0", - "bp-polkadot 0.5.0", - "bp-polkadot-core 0.7.0", - "bp-relayers 0.7.0", - "bp-runtime 0.7.0", - "bp-test-utils 0.7.0", - "bp-xcm-bridge-hub 0.2.0", - "bp-xcm-bridge-hub-router 0.6.0", - "bridge-hub-common 0.1.0", - "bridge-hub-test-utils 0.7.0", - "bridge-runtime-common 0.7.0", + "asset-test-utils", + "assets-common", + "binary-merkle-tree", + "bp-header-chain", + "bp-messages", + "bp-parachains", + "bp-polkadot", + "bp-polkadot-core", + "bp-relayers", + "bp-runtime", + "bp-test-utils", + "bp-xcm-bridge-hub", + "bp-xcm-bridge-hub-router", + "bridge-hub-common", + "bridge-hub-test-utils", + "bridge-runtime-common", "cumulus-client-cli", "cumulus-client-collator", "cumulus-client-consensus-aura", @@ -18763,167 +15725,168 @@ dependencies = [ "cumulus-client-parachain-inherent", "cumulus-client-pov-recovery", "cumulus-client-service", - "cumulus-pallet-aura-ext 0.7.0", - "cumulus-pallet-dmp-queue 0.7.0", - "cumulus-pallet-parachain-system 0.7.0", - "cumulus-pallet-parachain-system-proc-macro 0.6.0", - "cumulus-pallet-session-benchmarking 9.0.0", - "cumulus-pallet-solo-to-para 0.7.0", - "cumulus-pallet-xcm 0.7.0", - "cumulus-pallet-xcmp-queue 0.7.0", - "cumulus-ping 0.7.0", - "cumulus-primitives-aura 0.7.0", - "cumulus-primitives-core 0.7.0", - "cumulus-primitives-parachain-inherent 0.7.0", - "cumulus-primitives-proof-size-hostfunction 0.2.0", - "cumulus-primitives-storage-weight-reclaim 1.0.0", - "cumulus-primitives-timestamp 0.7.0", - "cumulus-primitives-utility 0.7.0", + "cumulus-pallet-aura-ext", + "cumulus-pallet-dmp-queue", + "cumulus-pallet-parachain-system", + "cumulus-pallet-parachain-system-proc-macro", + "cumulus-pallet-session-benchmarking", + "cumulus-pallet-solo-to-para", + "cumulus-pallet-xcm", + "cumulus-pallet-xcmp-queue", + "cumulus-ping", + "cumulus-primitives-aura", + "cumulus-primitives-core", + "cumulus-primitives-parachain-inherent", + "cumulus-primitives-proof-size-hostfunction", + "cumulus-primitives-storage-weight-reclaim", + "cumulus-primitives-timestamp", + "cumulus-primitives-utility", "cumulus-relay-chain-inprocess-interface", "cumulus-relay-chain-interface", "cumulus-relay-chain-minimal-node", "cumulus-relay-chain-rpc-interface", - "cumulus-test-relay-sproof-builder 0.7.0", + "cumulus-test-relay-sproof-builder", "emulated-integration-tests-common", "fork-tree", - "frame-benchmarking 28.0.0", + "frame-benchmarking", "frame-benchmarking-cli", - "frame-benchmarking-pallet-pov 18.0.0", - "frame-election-provider-solution-type 13.0.0", - "frame-election-provider-support 28.0.0", - "frame-executive 28.0.0", - "frame-metadata-hash-extension 0.1.0", + "frame-benchmarking-pallet-pov", + "frame-election-provider-solution-type", + "frame-election-provider-support", + "frame-executive", + "frame-metadata-hash-extension", "frame-remote-externalities", - "frame-support 28.0.0", - "frame-support-procedural 23.0.0", - "frame-support-procedural-tools 10.0.0", - "frame-support-procedural-tools-derive 11.0.0", - "frame-system 28.0.0", - "frame-system-benchmarking 28.0.0", - "frame-system-rpc-runtime-api 26.0.0", - "frame-try-runtime 0.34.0", + "frame-support", + "frame-support-procedural", + "frame-support-procedural-tools", + "frame-support-procedural-tools-derive", + "frame-system", + "frame-system-benchmarking", + "frame-system-rpc-runtime-api", + "frame-try-runtime", "generate-bags", "mmr-gadget", "mmr-rpc", - "pallet-alliance 27.0.0", - "pallet-asset-conversion 10.0.0", - "pallet-asset-conversion-ops 0.1.0", - "pallet-asset-conversion-tx-payment 10.0.0", - "pallet-asset-rate 7.0.0", - "pallet-asset-tx-payment 28.0.0", - "pallet-assets 29.1.0", - "pallet-assets-freezer 0.1.0", - "pallet-atomic-swap 28.0.0", - "pallet-aura 27.0.0", - "pallet-authority-discovery 28.0.0", - "pallet-authorship 28.0.0", - "pallet-babe 28.0.0", - "pallet-bags-list 27.0.0", - "pallet-balances 28.0.0", - "pallet-beefy 28.0.0", - "pallet-beefy-mmr 28.0.0", - "pallet-bounties 27.0.0", - "pallet-bridge-grandpa 0.7.0", - "pallet-bridge-messages 0.7.0", - "pallet-bridge-parachains 0.7.0", - "pallet-bridge-relayers 0.7.0", - "pallet-broker 0.6.0", - "pallet-child-bounties 27.0.0", - "pallet-collator-selection 9.0.0", - "pallet-collective 28.0.0", - "pallet-collective-content 0.6.0", - "pallet-contracts 27.0.0", - "pallet-contracts-mock-network 3.0.0", - "pallet-contracts-proc-macro 18.0.0", - "pallet-contracts-uapi 5.0.0", - "pallet-conviction-voting 28.0.0", - "pallet-core-fellowship 12.0.0", - "pallet-delegated-staking 1.0.0", - "pallet-democracy 28.0.0", - "pallet-dev-mode 10.0.0", - "pallet-election-provider-multi-phase 27.0.0", - "pallet-election-provider-support-benchmarking 27.0.0", - "pallet-elections-phragmen 29.0.0", - "pallet-fast-unstake 27.0.0", - "pallet-glutton 14.0.0", - "pallet-grandpa 28.0.0", - "pallet-identity 29.0.0", - "pallet-im-online 27.0.0", - "pallet-indices 28.0.0", - "pallet-insecure-randomness-collective-flip 16.0.0", - "pallet-lottery 28.0.0", - "pallet-membership 28.0.0", - "pallet-message-queue 31.0.0", - "pallet-migrations 1.0.0", - "pallet-mixnet 0.4.0", - "pallet-mmr 27.0.0", - "pallet-multisig 28.0.0", - "pallet-nft-fractionalization 10.0.0", - "pallet-nfts 22.0.0", - "pallet-nfts-runtime-api 14.0.0", - "pallet-nis 28.0.0", - "pallet-node-authorization 28.0.0", - "pallet-nomination-pools 25.0.0", - "pallet-nomination-pools-benchmarking 26.0.0", - "pallet-nomination-pools-runtime-api 23.0.0", - "pallet-offences 27.0.0", - "pallet-offences-benchmarking 28.0.0", - "pallet-paged-list 0.6.0", - "pallet-parameters 0.1.0", - "pallet-preimage 28.0.0", - "pallet-proxy 28.0.0", - "pallet-ranked-collective 28.0.0", - "pallet-recovery 28.0.0", - "pallet-referenda 28.0.0", - "pallet-remark 28.0.0", - "pallet-revive 0.1.0", + "pallet-alliance", + "pallet-asset-conversion", + "pallet-asset-conversion-ops", + "pallet-asset-conversion-tx-payment", + "pallet-asset-rate", + "pallet-asset-tx-payment", + "pallet-assets", + "pallet-assets-freezer", + "pallet-atomic-swap", + "pallet-aura", + "pallet-authority-discovery", + "pallet-authorship", + "pallet-babe", + "pallet-bags-list", + "pallet-balances", + "pallet-beefy", + "pallet-beefy-mmr", + "pallet-bounties", + "pallet-bridge-grandpa", + "pallet-bridge-messages", + "pallet-bridge-parachains", + "pallet-bridge-relayers", + "pallet-broker", + "pallet-child-bounties", + "pallet-collator-selection", + "pallet-collective", + "pallet-collective-content", + "pallet-contracts", + "pallet-contracts-mock-network", + "pallet-contracts-proc-macro", + "pallet-contracts-uapi", + "pallet-conviction-voting", + "pallet-core-fellowship", + "pallet-delegated-staking", + "pallet-democracy", + "pallet-dev-mode", + "pallet-election-provider-multi-phase", + "pallet-election-provider-support-benchmarking", + "pallet-elections-phragmen", + "pallet-fast-unstake", + "pallet-glutton", + "pallet-grandpa", + "pallet-identity", + "pallet-im-online", + "pallet-indices", + "pallet-insecure-randomness-collective-flip", + "pallet-lottery", + "pallet-membership", + "pallet-message-queue", + "pallet-migrations", + "pallet-mixnet", + "pallet-mmr", + "pallet-multisig", + "pallet-nft-fractionalization", + "pallet-nfts", + "pallet-nfts-runtime-api", + "pallet-nis", + "pallet-node-authorization", + "pallet-nomination-pools", + "pallet-nomination-pools-benchmarking", + "pallet-nomination-pools-runtime-api", + "pallet-offences", + "pallet-offences-benchmarking", + "pallet-paged-list", + "pallet-parameters", + "pallet-preimage", + "pallet-proxy", + "pallet-ranked-collective", + "pallet-recovery", + "pallet-referenda", + "pallet-remark", + "pallet-revive", "pallet-revive-eth-rpc", - "pallet-revive-mock-network 0.1.0", - "pallet-revive-proc-macro 0.1.0", - "pallet-revive-uapi 0.1.0", - "pallet-root-offences 25.0.0", - "pallet-root-testing 4.0.0", - "pallet-safe-mode 9.0.0", - "pallet-salary 13.0.0", - "pallet-scheduler 29.0.0", - "pallet-scored-pool 28.0.0", - "pallet-session 28.0.0", - "pallet-session-benchmarking 28.0.0", - "pallet-skip-feeless-payment 3.0.0", - "pallet-society 28.0.0", - "pallet-staking 28.0.0", + "pallet-revive-fixtures", + "pallet-revive-mock-network", + "pallet-revive-proc-macro", + "pallet-revive-uapi", + "pallet-root-offences", + "pallet-root-testing", + "pallet-safe-mode", + "pallet-salary", + "pallet-scheduler", + "pallet-scored-pool", + "pallet-session", + "pallet-session-benchmarking", + "pallet-skip-feeless-payment", + "pallet-society", + "pallet-staking", "pallet-staking-reward-curve", - "pallet-staking-reward-fn 19.0.0", - "pallet-staking-runtime-api 14.0.0", - "pallet-state-trie-migration 29.0.0", - "pallet-statement 10.0.0", - "pallet-sudo 28.0.0", - "pallet-timestamp 27.0.0", - "pallet-tips 27.0.0", - "pallet-transaction-payment 28.0.0", + "pallet-staking-reward-fn", + "pallet-staking-runtime-api", + "pallet-state-trie-migration", + "pallet-statement", + "pallet-sudo", + "pallet-timestamp", + "pallet-tips", + "pallet-transaction-payment", "pallet-transaction-payment-rpc", - "pallet-transaction-payment-rpc-runtime-api 28.0.0", - "pallet-transaction-storage 27.0.0", - "pallet-treasury 27.0.0", - "pallet-tx-pause 9.0.0", - "pallet-uniques 28.0.0", - "pallet-utility 28.0.0", + "pallet-transaction-payment-rpc-runtime-api", + "pallet-transaction-storage", + "pallet-treasury", + "pallet-tx-pause", + "pallet-uniques", + "pallet-utility", "pallet-verify-signature", - "pallet-vesting 28.0.0", - "pallet-whitelist 27.0.0", - "pallet-xcm 7.0.0", - "pallet-xcm-benchmarks 7.0.0", - "pallet-xcm-bridge-hub 0.2.0", - "pallet-xcm-bridge-hub-router 0.5.0", - "parachains-common 7.0.0", - "parachains-runtimes-test-utils 7.0.0", + "pallet-vesting", + "pallet-whitelist", + "pallet-xcm", + "pallet-xcm-benchmarks", + "pallet-xcm-bridge-hub", + "pallet-xcm-bridge-hub-router", + "parachains-common", + "parachains-runtimes-test-utils", "polkadot-approval-distribution", "polkadot-availability-bitfield-distribution", "polkadot-availability-distribution", "polkadot-availability-recovery", "polkadot-cli", "polkadot-collator-protocol", - "polkadot-core-primitives 7.0.0", + "polkadot-core-primitives", "polkadot-dispute-distribution", "polkadot-erasure-coding", "polkadot-gossip-support", @@ -18955,13 +15918,13 @@ dependencies = [ "polkadot-node-subsystem-util", "polkadot-omni-node-lib", "polkadot-overseer", - "polkadot-parachain-primitives 6.0.0", - "polkadot-primitives 7.0.0", + "polkadot-parachain-primitives", + "polkadot-primitives", "polkadot-rpc", - "polkadot-runtime-common 7.0.0", - "polkadot-runtime-metrics 7.0.0", - "polkadot-runtime-parachains 7.0.0", - "polkadot-sdk-frame 0.1.0", + "polkadot-runtime-common", + "polkadot-runtime-metrics", + "polkadot-runtime-parachains", + "polkadot-sdk-frame", "polkadot-service", "polkadot-statement-distribution", "polkadot-statement-table", @@ -19007,7 +15970,6 @@ dependencies = [ "sc-rpc-api", "sc-rpc-server", "sc-rpc-spec-v2", - "sc-runtime-utilities", "sc-service", "sc-state-db", "sc-statement-store", @@ -19020,38 +15982,38 @@ dependencies = [ "sc-transaction-pool", "sc-transaction-pool-api", "sc-utils", - "slot-range-helper 7.0.0", - "snowbridge-beacon-primitives 0.2.0", - "snowbridge-core 0.2.0", - "snowbridge-ethereum 0.3.0", - "snowbridge-outbound-queue-merkle-tree 0.3.0", - "snowbridge-outbound-queue-runtime-api 0.2.0", - "snowbridge-pallet-ethereum-client 0.2.0", - "snowbridge-pallet-ethereum-client-fixtures 0.9.0", - "snowbridge-pallet-inbound-queue 0.2.0", - "snowbridge-pallet-inbound-queue-fixtures 0.10.0", - "snowbridge-pallet-outbound-queue 0.2.0", - "snowbridge-pallet-system 0.2.0", - "snowbridge-router-primitives 0.9.0", - "snowbridge-runtime-common 0.2.0", - "snowbridge-runtime-test-common 0.2.0", - "snowbridge-system-runtime-api 0.2.0", + "slot-range-helper", + "snowbridge-beacon-primitives", + "snowbridge-core", + "snowbridge-ethereum", + "snowbridge-outbound-queue-merkle-tree", + "snowbridge-outbound-queue-runtime-api", + "snowbridge-pallet-ethereum-client", + "snowbridge-pallet-ethereum-client-fixtures", + "snowbridge-pallet-inbound-queue", + "snowbridge-pallet-inbound-queue-fixtures", + "snowbridge-pallet-outbound-queue", + "snowbridge-pallet-system", + "snowbridge-router-primitives", + "snowbridge-runtime-common", + "snowbridge-runtime-test-common", + "snowbridge-system-runtime-api", "sp-api 26.0.0", "sp-api-proc-macro 15.0.0", "sp-application-crypto 30.0.0", "sp-arithmetic 23.0.0", - "sp-authority-discovery 26.0.0", - "sp-block-builder 26.0.0", + "sp-authority-discovery", + "sp-block-builder", "sp-blockchain", "sp-consensus", - "sp-consensus-aura 0.32.0", - "sp-consensus-babe 0.32.0", - "sp-consensus-beefy 13.0.0", - "sp-consensus-grandpa 13.0.0", - "sp-consensus-pow 0.32.0", - "sp-consensus-slots 0.32.0", - "sp-core 28.0.0", - "sp-core-hashing 15.0.0", + "sp-consensus-aura", + "sp-consensus-babe", + "sp-consensus-beefy", + "sp-consensus-grandpa", + "sp-consensus-pow", + "sp-consensus-slots", + "sp-core 28.0.0", + "sp-core-hashing", "sp-core-hashing-proc-macro", "sp-crypto-ec-utils 0.10.0", "sp-crypto-hashing 0.1.0", @@ -19059,32 +16021,32 @@ dependencies = [ "sp-database", "sp-debug-derive 14.0.0", "sp-externalities 0.25.0", - "sp-genesis-builder 0.8.0", - "sp-inherents 26.0.0", + "sp-genesis-builder", + "sp-inherents", "sp-io 30.0.0", - "sp-keyring 31.0.0", + "sp-keyring", "sp-keystore 0.34.0", "sp-maybe-compressed-blob 11.0.0", "sp-metadata-ir 0.6.0", - "sp-mixnet 0.4.0", - "sp-mmr-primitives 26.0.0", - "sp-npos-elections 26.0.0", - "sp-offchain 26.0.0", + "sp-mixnet", + "sp-mmr-primitives", + "sp-npos-elections", + "sp-offchain", "sp-panic-handler 13.0.0", "sp-rpc", "sp-runtime 31.0.1", "sp-runtime-interface 24.0.0", "sp-runtime-interface-proc-macro 17.0.0", - "sp-session 27.0.0", - "sp-staking 26.0.0", + "sp-session", + "sp-staking", "sp-state-machine 0.35.0", - "sp-statement-store 10.0.0", + "sp-statement-store", "sp-std 14.0.0", "sp-storage 19.0.0", - "sp-timestamp 26.0.0", + "sp-timestamp", "sp-tracing 16.0.0", - "sp-transaction-pool 26.0.0", - "sp-transaction-storage-proof 26.0.0", + "sp-transaction-pool", + "sp-transaction-storage-proof", "sp-trie 29.0.0", "sp-version 29.0.0", "sp-version-proc-macro 13.0.0", @@ -19092,11 +16054,11 @@ dependencies = [ "sp-weights 27.0.0", "staging-chain-spec-builder", "staging-node-inspect", - "staging-parachain-info 0.7.0", + "staging-parachain-info", "staging-tracking-allocator", - "staging-xcm 7.0.0", - "staging-xcm-builder 7.0.0", - "staging-xcm-executor 7.0.0", + "staging-xcm", + "staging-xcm-builder", + "staging-xcm-executor", "subkey", "substrate-bip39 0.4.7", "substrate-build-script-utils", @@ -19105,246 +16067,14 @@ dependencies = [ "substrate-prometheus-endpoint", "substrate-rpc-client", "substrate-state-trie-migration-rpc", - "substrate-wasm-builder 17.0.0", - "testnet-parachains-constants 1.0.0", + "substrate-wasm-builder", + "testnet-parachains-constants", "tracing-gum", "tracing-gum-proc-macro", "xcm-emulator", - "xcm-procedural 7.0.0", - "xcm-runtime-apis 0.1.0", - "xcm-simulator 7.0.0", -] - -[[package]] -name = "polkadot-sdk" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb819108697967452fa6d8d96ab4c0d48cbaa423b3156499dcb24f1cf95d6775" -dependencies = [ - "asset-test-utils 18.0.0", - "assets-common 0.18.0", - "binary-merkle-tree 15.0.1", - "bp-header-chain 0.18.1", - "bp-messages 0.18.0", - "bp-parachains 0.18.0", - "bp-polkadot 0.16.0", - "bp-polkadot-core 0.18.0", - "bp-relayers 0.18.0", - "bp-runtime 0.18.0", - "bp-test-utils 0.18.0", - "bp-xcm-bridge-hub 0.4.0", - "bp-xcm-bridge-hub-router 0.14.1", - "bridge-hub-common 0.10.0", - "bridge-hub-test-utils 0.18.0", - "bridge-runtime-common 0.18.0", - "cumulus-pallet-aura-ext 0.17.0", - "cumulus-pallet-dmp-queue 0.17.0", - "cumulus-pallet-parachain-system 0.17.1", - "cumulus-pallet-parachain-system-proc-macro 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", - "cumulus-pallet-session-benchmarking 19.0.0", - "cumulus-pallet-solo-to-para 0.17.0", - "cumulus-pallet-xcm 0.17.0", - "cumulus-pallet-xcmp-queue 0.17.0", - "cumulus-ping 0.17.0", - "cumulus-primitives-aura 0.15.0", - "cumulus-primitives-core 0.16.0", - "cumulus-primitives-parachain-inherent 0.16.0", - "cumulus-primitives-proof-size-hostfunction 0.10.0", - "cumulus-primitives-storage-weight-reclaim 8.0.0", - "cumulus-primitives-timestamp 0.16.0", - "cumulus-primitives-utility 0.17.0", - "cumulus-test-relay-sproof-builder 0.16.0", - "frame-benchmarking 38.0.0", - "frame-benchmarking-pallet-pov 28.0.0", - "frame-election-provider-support 38.0.0", - "frame-executive 38.0.0", - "frame-metadata-hash-extension 0.6.0", - "frame-support 38.0.0", - "frame-support-procedural 30.0.4", - "frame-system 38.0.0", - "frame-system-benchmarking 38.0.0", - "frame-system-rpc-runtime-api 34.0.0", - "frame-try-runtime 0.44.0", - "pallet-alliance 37.0.0", - "pallet-asset-conversion 20.0.0", - "pallet-asset-conversion-ops 0.6.0", - "pallet-asset-conversion-tx-payment 20.0.0", - "pallet-asset-rate 17.0.0", - "pallet-asset-tx-payment 38.0.0", - "pallet-assets 40.0.0", - "pallet-assets-freezer 0.5.0", - "pallet-atomic-swap 38.0.0", - "pallet-aura 37.0.0", - "pallet-authority-discovery 38.0.0", - "pallet-authorship 38.0.0", - "pallet-babe 38.0.0", - "pallet-bags-list 37.0.0", - "pallet-balances 39.0.0", - "pallet-beefy 39.0.0", - "pallet-beefy-mmr 39.0.0", - "pallet-bounties 37.0.0", - "pallet-bridge-grandpa 0.18.0", - "pallet-bridge-messages 0.18.0", - "pallet-bridge-parachains 0.18.0", - "pallet-bridge-relayers 0.18.0", - "pallet-broker 0.17.0", - "pallet-child-bounties 37.0.0", - "pallet-collator-selection 19.0.0", - "pallet-collective 38.0.0", - "pallet-collective-content 0.16.0", - "pallet-contracts 38.0.0", - "pallet-contracts-mock-network 14.0.0", - "pallet-conviction-voting 38.0.0", - "pallet-core-fellowship 22.0.0", - "pallet-delegated-staking 5.0.0", - "pallet-democracy 38.0.0", - "pallet-dev-mode 20.0.0", - "pallet-election-provider-multi-phase 37.0.0", - "pallet-election-provider-support-benchmarking 37.0.0", - "pallet-elections-phragmen 39.0.0", - "pallet-fast-unstake 37.0.0", - "pallet-glutton 24.0.0", - "pallet-grandpa 38.0.0", - "pallet-identity 38.0.0", - "pallet-im-online 37.0.0", - "pallet-indices 38.0.0", - "pallet-insecure-randomness-collective-flip 26.0.0", - "pallet-lottery 38.0.0", - "pallet-membership 38.0.0", - "pallet-message-queue 41.0.1", - "pallet-migrations 8.0.0", - "pallet-mixnet 0.14.0", - "pallet-mmr 38.0.0", - "pallet-multisig 38.0.0", - "pallet-nft-fractionalization 21.0.0", - "pallet-nfts 32.0.0", - "pallet-nfts-runtime-api 24.0.0", - "pallet-nis 38.0.0", - "pallet-node-authorization 38.0.0", - "pallet-nomination-pools 35.0.0", - "pallet-nomination-pools-benchmarking 36.0.0", - "pallet-nomination-pools-runtime-api 33.0.0", - "pallet-offences 37.0.0", - "pallet-offences-benchmarking 38.0.0", - "pallet-paged-list 0.16.0", - "pallet-parameters 0.9.0", - "pallet-preimage 38.0.0", - "pallet-proxy 38.0.0", - "pallet-ranked-collective 38.0.0", - "pallet-recovery 38.0.0", - "pallet-referenda 38.0.0", - "pallet-remark 38.0.0", - "pallet-revive 0.2.0", - "pallet-revive-fixtures 0.2.0", - "pallet-revive-mock-network 0.2.0", - "pallet-root-offences 35.0.0", - "pallet-root-testing 14.0.0", - "pallet-safe-mode 19.0.0", - "pallet-salary 23.0.0", - "pallet-scheduler 39.0.0", - "pallet-scored-pool 38.0.0", - "pallet-session 38.0.0", - "pallet-session-benchmarking 38.0.0", - "pallet-skip-feeless-payment 13.0.0", - "pallet-society 38.0.0", - "pallet-staking 38.0.0", - "pallet-staking-reward-fn 22.0.0", - "pallet-staking-runtime-api 24.0.0", - "pallet-state-trie-migration 40.0.0", - "pallet-statement 20.0.0", - "pallet-sudo 38.0.0", - "pallet-timestamp 37.0.0", - "pallet-tips 37.0.0", - "pallet-transaction-payment 38.0.0", - "pallet-transaction-payment-rpc-runtime-api 38.0.0", - "pallet-transaction-storage 37.0.0", - "pallet-treasury 37.0.0", - "pallet-tx-pause 19.0.0", - "pallet-uniques 38.0.0", - "pallet-utility 38.0.0", - "pallet-vesting 38.0.0", - "pallet-whitelist 37.0.0", - "pallet-xcm 17.0.0", - "pallet-xcm-benchmarks 17.0.0", - "pallet-xcm-bridge-hub 0.13.0", - "pallet-xcm-bridge-hub-router 0.15.1", - "parachains-common 18.0.0", - "parachains-runtimes-test-utils 17.0.0", - "polkadot-core-primitives 15.0.0", - "polkadot-parachain-primitives 14.0.0", - "polkadot-primitives 16.0.0", - "polkadot-runtime-common 17.0.0", - "polkadot-runtime-metrics 17.0.0", - "polkadot-runtime-parachains 17.0.1", - "polkadot-sdk-frame 0.7.0", - "sc-executor 0.40.1", - "slot-range-helper 15.0.0", - "snowbridge-beacon-primitives 0.10.0", - "snowbridge-core 0.10.0", - "snowbridge-ethereum 0.9.0", - "snowbridge-outbound-queue-merkle-tree 0.9.1", - "snowbridge-outbound-queue-runtime-api 0.10.0", - "snowbridge-pallet-ethereum-client 0.10.0", - "snowbridge-pallet-ethereum-client-fixtures 0.18.0", - "snowbridge-pallet-inbound-queue 0.10.0", - "snowbridge-pallet-inbound-queue-fixtures 0.18.0", - "snowbridge-pallet-outbound-queue 0.10.0", - "snowbridge-pallet-system 0.10.0", - "snowbridge-router-primitives 0.16.0", - "snowbridge-runtime-common 0.10.0", - "snowbridge-runtime-test-common 0.10.0", - "snowbridge-system-runtime-api 0.10.0", - "sp-api 34.0.0", - "sp-api-proc-macro 20.0.0", - "sp-application-crypto 38.0.0", - "sp-arithmetic 26.0.0", - "sp-authority-discovery 34.0.0", - "sp-block-builder 34.0.0", - "sp-consensus-aura 0.40.0", - "sp-consensus-babe 0.40.0", - "sp-consensus-beefy 22.1.0", - "sp-consensus-grandpa 21.0.0", - "sp-consensus-pow 0.40.0", - "sp-consensus-slots 0.40.1", - "sp-core 34.0.0", - "sp-core-hashing 16.0.0", - "sp-crypto-ec-utils 0.14.0", - "sp-crypto-hashing 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sp-debug-derive 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sp-externalities 0.29.0", - "sp-genesis-builder 0.15.1", - "sp-inherents 34.0.0", - "sp-io 38.0.0", - "sp-keyring 39.0.0", - "sp-keystore 0.40.0", - "sp-metadata-ir 0.7.0", - "sp-mixnet 0.12.0", - "sp-mmr-primitives 34.1.0", - "sp-npos-elections 34.0.0", - "sp-offchain 34.0.0", - "sp-runtime 39.0.2", - "sp-runtime-interface 28.0.0", - "sp-session 36.0.0", - "sp-staking 36.0.0", - "sp-state-machine 0.43.0", - "sp-statement-store 18.0.0", - "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sp-storage 21.0.0", - "sp-timestamp 34.0.0", - "sp-tracing 17.0.1", - "sp-transaction-pool 34.0.0", - "sp-transaction-storage-proof 34.0.0", - "sp-trie 37.0.0", - "sp-version 37.0.0", - "sp-wasm-interface 21.0.1", - "sp-weights 31.0.0", - "staging-parachain-info 0.17.0", - "staging-xcm 14.2.0", - "staging-xcm-builder 17.0.1", - "staging-xcm-executor 17.0.0", - "substrate-bip39 0.6.0", - "testnet-parachains-constants 10.0.0", - "xcm-runtime-apis 0.4.0", + "xcm-procedural", + "xcm-runtime-apis", + "xcm-simulator", ] [[package]] @@ -19353,57 +16083,56 @@ version = "0.0.1" dependencies = [ "assert_cmd", "chain-spec-guide-runtime", - "cmd_lib", "cumulus-client-service", - "cumulus-pallet-aura-ext 0.7.0", - "cumulus-pallet-parachain-system 0.7.0", - "cumulus-primitives-proof-size-hostfunction 0.2.0", - "cumulus-primitives-storage-weight-reclaim 1.0.0", + "cumulus-pallet-aura-ext", + "cumulus-pallet-parachain-system", + "cumulus-primitives-proof-size-hostfunction", + "cumulus-primitives-storage-weight-reclaim", "docify", - "frame-benchmarking 28.0.0", - "frame-executive 28.0.0", - "frame-metadata-hash-extension 0.1.0", - "frame-support 28.0.0", - "frame-system 28.0.0", + "frame-benchmarking", + "frame-executive", + "frame-metadata-hash-extension", + "frame-support", + "frame-system", "kitchensink-runtime", "log", "minimal-template-runtime", - "pallet-asset-conversion-tx-payment 10.0.0", - "pallet-asset-tx-payment 28.0.0", - "pallet-assets 29.1.0", - "pallet-aura 27.0.0", - "pallet-authorship 28.0.0", - "pallet-babe 28.0.0", - "pallet-balances 28.0.0", - "pallet-broker 0.6.0", - "pallet-collective 28.0.0", - "pallet-contracts 27.0.0", + "pallet-asset-conversion-tx-payment", + "pallet-asset-tx-payment", + "pallet-assets", + "pallet-aura", + "pallet-authorship", + "pallet-babe", + "pallet-balances", + "pallet-broker", + "pallet-collective", + "pallet-contracts", "pallet-default-config-example", - "pallet-democracy 28.0.0", + "pallet-democracy", "pallet-example-authorization-tx-extension", "pallet-example-offchain-worker", "pallet-example-single-block-migrations", "pallet-examples", - "pallet-grandpa 28.0.0", - "pallet-multisig 28.0.0", - "pallet-nfts 22.0.0", - "pallet-preimage 28.0.0", - "pallet-proxy 28.0.0", - "pallet-referenda 28.0.0", - "pallet-scheduler 29.0.0", - "pallet-skip-feeless-payment 3.0.0", - "pallet-timestamp 27.0.0", - "pallet-transaction-payment 28.0.0", - "pallet-uniques 28.0.0", - "pallet-utility 28.0.0", - "pallet-xcm 7.0.0", + "pallet-grandpa", + "pallet-multisig", + "pallet-nfts", + "pallet-preimage", + "pallet-proxy", + "pallet-referenda", + "pallet-scheduler", + "pallet-skip-feeless-payment", + "pallet-timestamp", + "pallet-transaction-payment", + "pallet-uniques", + "pallet-utility", + "pallet-xcm", "parachain-template-runtime", "parity-scale-codec", "polkadot-omni-node-lib", - "polkadot-sdk 0.1.0", + "polkadot-sdk", "polkadot-sdk-docs-first-pallet", "polkadot-sdk-docs-first-runtime", - "polkadot-sdk-frame 0.1.0", + "polkadot-sdk-frame", "rand", "sc-chain-spec", "sc-cli", @@ -19426,10 +16155,10 @@ dependencies = [ "sp-api 26.0.0", "sp-arithmetic 23.0.0", "sp-core 28.0.0", - "sp-genesis-builder 0.8.0", + "sp-genesis-builder", "sp-io 30.0.0", - "sp-keyring 31.0.0", - "sp-offchain 26.0.0", + "sp-keyring", + "sp-offchain", "sp-runtime 31.0.1", "sp-runtime-interface 24.0.0", "sp-std 14.0.0", @@ -19438,14 +16167,14 @@ dependencies = [ "sp-weights 27.0.0", "staging-chain-spec-builder", "staging-node-cli", - "staging-parachain-info 0.7.0", - "staging-xcm 7.0.0", - "staging-xcm-builder 7.0.0", - "staging-xcm-executor 7.0.0", + "staging-parachain-info", + "staging-xcm", + "staging-xcm-builder", + "staging-xcm-executor", "subkey", - "substrate-wasm-builder 17.0.0", + "substrate-wasm-builder", "xcm-docs", - "xcm-simulator 7.0.0", + "xcm-simulator", ] [[package]] @@ -19454,7 +16183,7 @@ version = "0.0.0" dependencies = [ "docify", "parity-scale-codec", - "polkadot-sdk-frame 0.1.0", + "polkadot-sdk-frame", "scale-info", ] @@ -19463,18 +16192,18 @@ name = "polkadot-sdk-docs-first-runtime" version = "0.0.0" dependencies = [ "docify", - "pallet-balances 28.0.0", - "pallet-sudo 28.0.0", - "pallet-timestamp 27.0.0", - "pallet-transaction-payment 28.0.0", - "pallet-transaction-payment-rpc-runtime-api 28.0.0", + "pallet-balances", + "pallet-sudo", + "pallet-timestamp", + "pallet-transaction-payment", + "pallet-transaction-payment-rpc-runtime-api", "parity-scale-codec", "polkadot-sdk-docs-first-pallet", - "polkadot-sdk-frame 0.1.0", + "polkadot-sdk-frame", "scale-info", "serde_json", - "sp-keyring 31.0.0", - "substrate-wasm-builder 17.0.0", + "sp-keyring", + "substrate-wasm-builder", ] [[package]] @@ -19482,87 +16211,54 @@ name = "polkadot-sdk-frame" version = "0.1.0" dependencies = [ "docify", - "frame-benchmarking 28.0.0", - "frame-executive 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", - "frame-system-benchmarking 28.0.0", - "frame-system-rpc-runtime-api 26.0.0", - "frame-try-runtime 0.34.0", + "frame-benchmarking", + "frame-executive", + "frame-support", + "frame-system", + "frame-system-benchmarking", + "frame-system-rpc-runtime-api", + "frame-try-runtime", "log", "pallet-examples", "parity-scale-codec", "scale-info", "sp-api 26.0.0", "sp-arithmetic 23.0.0", - "sp-block-builder 26.0.0", - "sp-consensus-aura 0.32.0", - "sp-consensus-grandpa 13.0.0", + "sp-block-builder", + "sp-consensus-aura", + "sp-consensus-grandpa", "sp-core 28.0.0", - "sp-genesis-builder 0.8.0", - "sp-inherents 26.0.0", + "sp-genesis-builder", + "sp-inherents", "sp-io 30.0.0", - "sp-keyring 31.0.0", - "sp-offchain 26.0.0", + "sp-keyring", + "sp-offchain", "sp-runtime 31.0.1", - "sp-session 27.0.0", + "sp-session", "sp-storage 19.0.0", - "sp-transaction-pool 26.0.0", + "sp-transaction-pool", "sp-version 29.0.0", ] -[[package]] -name = "polkadot-sdk-frame" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cbdeb15ce08142082461afe1a62c15f7ce10a731d91b203ad6a8dc8d2e4a6a54" -dependencies = [ - "docify", - "frame-benchmarking 38.0.0", - "frame-executive 38.0.0", - "frame-support 38.0.0", - "frame-system 38.0.0", - "frame-system-benchmarking 38.0.0", - "frame-system-rpc-runtime-api 34.0.0", - "frame-try-runtime 0.44.0", - "log", - "parity-scale-codec", - "scale-info", - "sp-api 34.0.0", - "sp-arithmetic 26.0.0", - "sp-block-builder 34.0.0", - "sp-consensus-aura 0.40.0", - "sp-consensus-grandpa 21.0.0", - "sp-core 34.0.0", - "sp-inherents 34.0.0", - "sp-io 38.0.0", - "sp-offchain 34.0.0", - "sp-runtime 39.0.2", - "sp-session 36.0.0", - "sp-storage 21.0.0", - "sp-transaction-pool 34.0.0", - "sp-version 37.0.0", -] - [[package]] name = "polkadot-service" version = "7.0.0" dependencies = [ "assert_matches", "async-trait", - "frame-benchmarking 28.0.0", + "frame-benchmarking", "frame-benchmarking-cli", - "frame-metadata-hash-extension 0.1.0", - "frame-system 28.0.0", - "frame-system-rpc-runtime-api 26.0.0", + "frame-metadata-hash-extension", + "frame-system", + "frame-system-rpc-runtime-api", "futures", "is_executable", "kvdb", "kvdb-rocksdb", "log", "mmr-gadget", - "pallet-transaction-payment 28.0.0", - "pallet-transaction-payment-rpc-runtime-api 28.0.0", + "pallet-transaction-payment", + "pallet-transaction-payment-rpc-runtime-api", "parity-db", "parity-scale-codec", "parking_lot 0.12.3", @@ -19571,7 +16267,7 @@ dependencies = [ "polkadot-availability-distribution", "polkadot-availability-recovery", "polkadot-collator-protocol", - "polkadot-core-primitives 7.0.0", + "polkadot-core-primitives", "polkadot-dispute-distribution", "polkadot-gossip-support", "polkadot-network-bridge", @@ -19598,14 +16294,14 @@ dependencies = [ "polkadot-node-subsystem-types", "polkadot-node-subsystem-util", "polkadot-overseer", - "polkadot-primitives 7.0.0", + "polkadot-primitives", "polkadot-primitives-test-helpers", "polkadot-rpc", - "polkadot-runtime-parachains 7.0.0", + "polkadot-runtime-parachains", "polkadot-statement-distribution", "polkadot-test-client", "rococo-runtime", - "rococo-runtime-constants 7.0.0", + "rococo-runtime-constants", "sc-authority-discovery", "sc-basic-authorship", "sc-chain-spec", @@ -19629,35 +16325,35 @@ dependencies = [ "serde", "serde_json", "sp-api 26.0.0", - "sp-authority-discovery 26.0.0", - "sp-block-builder 26.0.0", + "sp-authority-discovery", + "sp-block-builder", "sp-blockchain", "sp-consensus", - "sp-consensus-babe 0.32.0", - "sp-consensus-beefy 13.0.0", - "sp-consensus-grandpa 13.0.0", + "sp-consensus-babe", + "sp-consensus-beefy", + "sp-consensus-grandpa", "sp-core 28.0.0", - "sp-genesis-builder 0.8.0", - "sp-inherents 26.0.0", + "sp-genesis-builder", + "sp-inherents", "sp-io 30.0.0", - "sp-keyring 31.0.0", - "sp-mmr-primitives 26.0.0", - "sp-offchain 26.0.0", + "sp-keyring", + "sp-mmr-primitives", + "sp-offchain", "sp-runtime 31.0.1", - "sp-session 27.0.0", - "sp-timestamp 26.0.0", + "sp-session", + "sp-timestamp", "sp-tracing 16.0.0", - "sp-transaction-pool 26.0.0", + "sp-transaction-pool", "sp-version 29.0.0", "sp-weights 27.0.0", - "staging-xcm 7.0.0", + "staging-xcm", "substrate-prometheus-endpoint", "tempfile", "thiserror", "tracing-gum", "westend-runtime", - "westend-runtime-constants 7.0.0", - "xcm-runtime-apis 0.1.0", + "westend-runtime-constants", + "xcm-runtime-apis", ] [[package]] @@ -19671,14 +16367,14 @@ dependencies = [ "fatality", "futures", "futures-timer", - "indexmap 2.7.0", + "indexmap 2.2.3", "parity-scale-codec", "polkadot-node-network-protocol", "polkadot-node-primitives", "polkadot-node-subsystem", "polkadot-node-subsystem-test-helpers", "polkadot-node-subsystem-util", - "polkadot-primitives 7.0.0", + "polkadot-primitives", "polkadot-primitives-test-helpers", "polkadot-subsystem-bench", "rand_chacha", @@ -19686,11 +16382,11 @@ dependencies = [ "sc-keystore", "sc-network", "sp-application-crypto 30.0.0", - "sp-authority-discovery 26.0.0", + "sp-authority-discovery", "sp-core 28.0.0", - "sp-keyring 31.0.0", + "sp-keyring", "sp-keystore 0.34.0", - "sp-staking 26.0.0", + "sp-staking", "sp-tracing 16.0.0", "thiserror", "tracing-gum", @@ -19701,7 +16397,7 @@ name = "polkadot-statement-table" version = "7.0.0" dependencies = [ "parity-scale-codec", - "polkadot-primitives 7.0.0", + "polkadot-primitives", "sp-core 28.0.0", "tracing-gum", ] @@ -19745,7 +16441,7 @@ dependencies = [ "polkadot-node-subsystem-types", "polkadot-node-subsystem-util", "polkadot-overseer", - "polkadot-primitives 7.0.0", + "polkadot-primitives", "polkadot-primitives-test-helpers", "polkadot-service", "polkadot-statement-distribution", @@ -19767,12 +16463,12 @@ dependencies = [ "sha1", "sp-application-crypto 30.0.0", "sp-consensus", - "sp-consensus-babe 0.32.0", + "sp-consensus-babe", "sp-core 28.0.0", - "sp-keyring 31.0.0", + "sp-keyring", "sp-keystore 0.34.0", "sp-runtime 31.0.1", - "sp-timestamp 26.0.0", + "sp-timestamp", "sp-tracing 16.0.0", "strum 0.26.3", "substrate-prometheus-endpoint", @@ -19785,11 +16481,11 @@ dependencies = [ name = "polkadot-test-client" version = "1.0.0" dependencies = [ - "frame-benchmarking 28.0.0", + "frame-benchmarking", "futures", "parity-scale-codec", "polkadot-node-subsystem", - "polkadot-primitives 7.0.0", + "polkadot-primitives", "polkadot-test-runtime", "polkadot-test-service", "sc-block-builder", @@ -19799,14 +16495,14 @@ dependencies = [ "sp-api 26.0.0", "sp-blockchain", "sp-consensus", - "sp-consensus-babe 0.32.0", + "sp-consensus-babe", "sp-core 28.0.0", - "sp-inherents 26.0.0", + "sp-inherents", "sp-io 30.0.0", - "sp-keyring 31.0.0", + "sp-keyring", "sp-runtime 31.0.1", "sp-state-machine 0.35.0", - "sp-timestamp 26.0.0", + "sp-timestamp", "substrate-test-client", ] @@ -19834,7 +16530,7 @@ dependencies = [ "polkadot-node-subsystem-test-helpers", "polkadot-node-subsystem-types", "polkadot-node-subsystem-util", - "polkadot-primitives 7.0.0", + "polkadot-primitives", "rand", "sp-core 28.0.0", "sp-keystore 0.34.0", @@ -19846,58 +16542,58 @@ dependencies = [ name = "polkadot-test-runtime" version = "1.0.0" dependencies = [ - "frame-election-provider-support 28.0.0", - "frame-executive 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", - "frame-system-rpc-runtime-api 26.0.0", + "frame-election-provider-support", + "frame-executive", + "frame-support", + "frame-system", + "frame-system-rpc-runtime-api", "hex-literal", "log", - "pallet-authority-discovery 28.0.0", - "pallet-authorship 28.0.0", - "pallet-babe 28.0.0", - "pallet-balances 28.0.0", - "pallet-grandpa 28.0.0", - "pallet-indices 28.0.0", - "pallet-offences 27.0.0", - "pallet-session 28.0.0", - "pallet-staking 28.0.0", + "pallet-authority-discovery", + "pallet-authorship", + "pallet-babe", + "pallet-balances", + "pallet-grandpa", + "pallet-indices", + "pallet-offences", + "pallet-session", + "pallet-staking", "pallet-staking-reward-curve", - "pallet-sudo 28.0.0", - "pallet-timestamp 27.0.0", - "pallet-transaction-payment 28.0.0", - "pallet-transaction-payment-rpc-runtime-api 28.0.0", - "pallet-vesting 28.0.0", - "pallet-xcm 7.0.0", + "pallet-sudo", + "pallet-timestamp", + "pallet-transaction-payment", + "pallet-transaction-payment-rpc-runtime-api", + "pallet-vesting", + "pallet-xcm", "parity-scale-codec", - "polkadot-primitives 7.0.0", - "polkadot-runtime-common 7.0.0", - "polkadot-runtime-parachains 7.0.0", + "polkadot-primitives", + "polkadot-runtime-common", + "polkadot-runtime-parachains", "scale-info", "serde", "serde_json", "sp-api 26.0.0", - "sp-authority-discovery 26.0.0", - "sp-block-builder 26.0.0", - "sp-consensus-babe 0.32.0", - "sp-consensus-beefy 13.0.0", + "sp-authority-discovery", + "sp-block-builder", + "sp-consensus-babe", + "sp-consensus-beefy", "sp-core 28.0.0", - "sp-genesis-builder 0.8.0", - "sp-inherents 26.0.0", + "sp-genesis-builder", + "sp-inherents", "sp-io 30.0.0", - "sp-keyring 31.0.0", - "sp-mmr-primitives 26.0.0", - "sp-offchain 26.0.0", + "sp-keyring", + "sp-mmr-primitives", + "sp-offchain", "sp-runtime 31.0.1", - "sp-session 27.0.0", - "sp-staking 26.0.0", - "sp-transaction-pool 26.0.0", + "sp-session", + "sp-staking", + "sp-transaction-pool", "sp-trie 29.0.0", "sp-version 29.0.0", - "staging-xcm 7.0.0", - "staging-xcm-builder 7.0.0", - "staging-xcm-executor 7.0.0", - "substrate-wasm-builder 17.0.0", + "staging-xcm", + "staging-xcm-builder", + "staging-xcm-executor", + "substrate-wasm-builder", "test-runtime-constants", "tiny-keccak", ] @@ -19906,20 +16602,20 @@ dependencies = [ name = "polkadot-test-service" version = "1.0.0" dependencies = [ - "frame-system 28.0.0", + "frame-system", "futures", "hex", - "pallet-balances 28.0.0", - "pallet-staking 28.0.0", - "pallet-transaction-payment 28.0.0", + "pallet-balances", + "pallet-staking", + "pallet-transaction-payment", "polkadot-node-primitives", "polkadot-node-subsystem", "polkadot-overseer", - "polkadot-parachain-primitives 6.0.0", - "polkadot-primitives 7.0.0", + "polkadot-parachain-primitives", + "polkadot-primitives", "polkadot-rpc", - "polkadot-runtime-common 7.0.0", - "polkadot-runtime-parachains 7.0.0", + "polkadot-runtime-common", + "polkadot-runtime-parachains", "polkadot-service", "polkadot-test-runtime", "rand", @@ -19936,14 +16632,14 @@ dependencies = [ "sc-transaction-pool", "serde_json", "sp-arithmetic 23.0.0", - "sp-authority-discovery 26.0.0", + "sp-authority-discovery", "sp-blockchain", "sp-consensus", - "sp-consensus-babe 0.32.0", - "sp-consensus-grandpa 13.0.0", + "sp-consensus-babe", + "sp-consensus-grandpa", "sp-core 28.0.0", - "sp-inherents 26.0.0", - "sp-keyring 31.0.0", + "sp-inherents", + "sp-keyring", "sp-runtime 31.0.1", "sp-state-machine 0.35.0", "substrate-test-client", @@ -19997,28 +16693,15 @@ dependencies = [ [[package]] name = "polkavm" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7ec0c5935f2eff23cfc4653002f4f8d12b37f87a720e0631282d188c32089d6" -dependencies = [ - "libc", - "log", - "polkavm-assembler 0.10.0", - "polkavm-common 0.10.0", - "polkavm-linux-raw 0.10.0", -] - -[[package]] -name = "polkavm" -version = "0.18.0" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd044ab1d3b11567ab6b98ca71259a992b4034220d5972988a0e96518e5d343d" +checksum = "57e79a14b15ed38cb5b9a1e38d02e933f19e3d180ae5b325fed606c5e5b9177e" dependencies = [ "libc", "log", - "polkavm-assembler 0.18.0", - "polkavm-common 0.18.0", - "polkavm-linux-raw 0.18.0", + "polkavm-assembler 0.13.0", + "polkavm-common 0.13.0", + "polkavm-linux-raw 0.13.0", ] [[package]] @@ -20032,18 +16715,9 @@ dependencies = [ [[package]] name = "polkavm-assembler" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8e4fd5a43100bf1afe9727b8130d01f966f5cfc9144d5604b21e795c2bcd80e" -dependencies = [ - "log", -] - -[[package]] -name = "polkavm-assembler" -version = "0.18.0" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eaad38dc420bfed79e6f731471c973ce5ff5e47ab403e63cf40358fef8a6368f" +checksum = "4e8da55465000feb0a61bbf556ed03024db58f3420eca37721fc726b3b2136bf" dependencies = [ "log", ] @@ -20065,23 +16739,19 @@ dependencies = [ [[package]] name = "polkavm-common" -version = "0.10.0" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0097b48bc0bedf9f3f537ce8f37e8f1202d8d83f9b621bdb21ff2c59b9097c50" +checksum = "084b4339aae7dfdaaa5aa7d634110afd95970e0737b6fb2a0cb10db8b56b753c" dependencies = [ "log", - "polkavm-assembler 0.10.0", + "polkavm-assembler 0.13.0", ] [[package]] name = "polkavm-common" -version = "0.18.0" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31ff33982a807d8567645d4784b9b5d7ab87bcb494f534a57cadd9012688e102" -dependencies = [ - "log", - "polkavm-assembler 0.18.0", -] +checksum = "711952a783e9c5ad407cdacb1ed147f36d37c5d43417c1091d86456d2999417b" [[package]] name = "polkavm-derive" @@ -20103,20 +16773,11 @@ dependencies = [ [[package]] name = "polkavm-derive" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0dcc701385c08c31bdb0569f0c51a290c580d892fa77f1dd88a7352a62679ecf" -dependencies = [ - "polkavm-derive-impl-macro 0.10.0", -] - -[[package]] -name = "polkavm-derive" -version = "0.18.0" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2eb703f3b6404c13228402e98a5eae063fd16b8f58afe334073ec105ee4117e" +checksum = "b4832a0aebf6cefc988bb7b2d74ea8c86c983164672e2fc96300f356a1babfc1" dependencies = [ - "polkavm-derive-impl-macro 0.18.0", + "polkavm-derive-impl-macro 0.14.0", ] [[package]] @@ -20145,23 +16806,11 @@ dependencies = [ [[package]] name = "polkavm-derive-impl" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7855353a5a783dd5d09e3b915474bddf66575f5a3cf45dec8d1c5e051ba320dc" -dependencies = [ - "polkavm-common 0.10.0", - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.87", -] - -[[package]] -name = "polkavm-derive-impl" -version = "0.18.0" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12d2840cc62a0550156b1676fed8392271ddf2fab4a00661db56231424674624" +checksum = "e339fc7c11310fe5adf711d9342278ac44a75c9784947937cce12bd4f30842f2" dependencies = [ - "polkavm-common 0.18.0", + "polkavm-common 0.14.0", "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.87", @@ -20189,21 +16838,11 @@ dependencies = [ [[package]] name = "polkavm-derive-impl-macro" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9324fe036de37c17829af233b46ef6b5562d4a0c09bb7fdb9f8378856dee30cf" -dependencies = [ - "polkavm-derive-impl 0.10.0", - "syn 2.0.87", -] - -[[package]] -name = "polkavm-derive-impl-macro" -version = "0.18.0" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48c16669ddc7433e34c1007d31080b80901e3e8e523cb9d4b441c3910cf9294b" +checksum = "b569754b15060d03000c09e3bf11509d527f60b75d79b4c30c3625b5071d9702" dependencies = [ - "polkavm-derive-impl 0.18.0", + "polkavm-derive-impl 0.14.0", "syn 2.0.87", ] @@ -20224,31 +16863,15 @@ dependencies = [ [[package]] name = "polkavm-linker" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d704edfe7bdcc876784f19436d53d515b65eb07bc9a0fae77085d552c2dbbb5" -dependencies = [ - "gimli 0.28.0", - "hashbrown 0.14.5", - "log", - "object 0.36.1", - "polkavm-common 0.10.0", - "regalloc2 0.9.3", - "rustc-demangle", -] - -[[package]] -name = "polkavm-linker" -version = "0.18.0" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9bfe793b094d9ea5c99b7c43ba46e277b0f8f48f4bbfdbabf8d3ebf701a4bd3" +checksum = "0959ac3b0f4fd5caf5c245c637705f19493efe83dba31a83bbba928b93b0116a" dependencies = [ - "dirs", "gimli 0.31.1", "hashbrown 0.14.5", "log", "object 0.36.1", - "polkavm-common 0.18.0", + "polkavm-common 0.14.0", "regalloc2 0.9.3", "rustc-demangle", ] @@ -20261,15 +16884,9 @@ checksum = "26e85d3456948e650dff0cfc85603915847faf893ed1e66b020bb82ef4557120" [[package]] name = "polkavm-linux-raw" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26e45fa59c7e1bb12ef5289080601e9ec9b31435f6e32800a5c90c132453d126" - -[[package]] -name = "polkavm-linux-raw" -version = "0.18.0" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23eff02c070c70f31878a3d915e88a914ecf3e153741e2fb572dde28cce20fde" +checksum = "686c4dd9c9c16cc22565b51bdbb269792318d0fd2e6b966b5f6c788534cad0e9" [[package]] name = "polling" @@ -20296,7 +16913,7 @@ dependencies = [ "cfg-if", "concurrent-queue", "pin-project-lite", - "rustix 0.38.21", + "rustix 0.38.25", "tracing", "windows-sys 0.52.0", ] @@ -20356,7 +16973,7 @@ dependencies = [ "findshlibs", "libc", "log", - "nix 0.26.4", + "nix 0.26.2", "once_cell", "parking_lot 0.12.3", "smallvec", @@ -20455,7 +17072,6 @@ dependencies = [ "fixed-hash", "impl-codec 0.6.0", "impl-num-traits 0.1.2", - "impl-rlp 0.3.0", "impl-serde 0.4.0", "scale-info", "uint 0.9.5", @@ -20470,7 +17086,7 @@ dependencies = [ "fixed-hash", "impl-codec 0.7.0", "impl-num-traits 0.2.0", - "impl-rlp 0.4.0", + "impl-rlp", "impl-serde 0.5.0", "scale-info", "uint 0.10.0", @@ -20484,7 +17100,7 @@ checksum = "a172e6cc603231f2cf004232eabcecccc0da53ba576ab286ef7baa0cfc7927ad" dependencies = [ "coarsetime", "crossbeam-queue", - "derive_more 0.99.17", + "derive_more", "futures", "futures-timer", "nanorand", @@ -20563,6 +17179,17 @@ version = "0.5.20+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dc375e1527247fe1a97d8b7156678dfe7c1af2fc075c9a4db3690ecd2a148068" +[[package]] +name = "proc-macro-warning" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d1eaa7fa0aa1929ffdf7eeb6eac234dde6268914a14ad44d23521ab6a9b258e" +dependencies = [ + "proc-macro2 1.0.86", + "quote 1.0.37", + "syn 2.0.87", +] + [[package]] name = "proc-macro-warning" version = "1.0.0" @@ -20604,7 +17231,7 @@ dependencies = [ "hex", "lazy_static", "procfs-core", - "rustix 0.38.21", + "rustix 0.38.25", ] [[package]] @@ -20634,9 +17261,9 @@ dependencies = [ [[package]] name = "prometheus-client" -version = "0.22.3" +version = "0.21.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "504ee9ff529add891127c4827eb481bd69dc0ebc72e9a682e187db4caa60c3ca" +checksum = "3c99afa9a01501019ac3a14d71d9f94050346f55ca471ce90c799a15c58f61e2" dependencies = [ "dtoa", "itoa", @@ -20681,7 +17308,7 @@ dependencies = [ "rand", "rand_chacha", "rand_xorshift", - "regex-syntax 0.8.5", + "regex-syntax 0.8.2", "rusty-fork", "tempfile", "unarray", @@ -20725,7 +17352,7 @@ checksum = "f8650aabb6c35b860610e9cff5dc1af886c9e25073b7b1712a68972af4281302" dependencies = [ "bytes", "heck 0.5.0", - "itertools 0.13.0", + "itertools 0.12.1", "log", "multimap", "once_cell", @@ -20771,7 +17398,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "acf0c195eebb4af52c752bec4f52f645da98b6e92077a04110c7f349477ae5ac" dependencies = [ "anyhow", - "itertools 0.13.0", + "itertools 0.12.1", "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.87", @@ -20807,7 +17434,7 @@ dependencies = [ "log", "names", "prost 0.11.9", - "reqwest 0.11.27", + "reqwest 0.11.20", "thiserror", "url", "winapi", @@ -20867,15 +17494,15 @@ dependencies = [ [[package]] name = "quick-protobuf-codec" -version = "0.3.1" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15a0580ab32b169745d7a39db2ba969226ca16738931be152a3209b409de2474" +checksum = "f8ededb1cd78531627244d51dd0c7139fbe736c7d57af0092a76f0ffb2f56e98" dependencies = [ - "asynchronous-codec 0.7.0", + "asynchronous-codec", "bytes", "quick-protobuf 0.8.1", "thiserror", - "unsigned-varint 0.8.0", + "unsigned-varint 0.7.2", ] [[package]] @@ -20900,6 +17527,24 @@ dependencies = [ "rand", ] +[[package]] +name = "quinn" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8cc2c5017e4b43d5995dcea317bc46c1e09404c0a9664d2908f7f02dfe943d75" +dependencies = [ + "bytes", + "futures-io", + "pin-project-lite", + "quinn-proto 0.10.6", + "quinn-udp 0.4.1", + "rustc-hash 1.1.0", + "rustls 0.21.7", + "thiserror", + "tokio", + "tracing", +] + [[package]] name = "quinn" version = "0.11.5" @@ -20907,18 +17552,34 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8c7c5fdde3cdae7203427dc4f0a68fe0ed09833edc525a03456b153b79828684" dependencies = [ "bytes", - "futures-io", "pin-project-lite", - "quinn-proto", - "quinn-udp", + "quinn-proto 0.11.8", + "quinn-udp 0.5.4", "rustc-hash 2.0.0", - "rustls 0.23.18", + "rustls 0.23.14", "socket2 0.5.7", "thiserror", "tokio", "tracing", ] +[[package]] +name = "quinn-proto" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "141bf7dfde2fbc246bfd3fe12f2455aa24b0fbd9af535d8c86c7bd1381ff2b1a" +dependencies = [ + "bytes", + "rand", + "ring 0.16.20", + "rustc-hash 1.1.0", + "rustls 0.21.7", + "slab", + "thiserror", + "tinyvec", + "tracing", +] + [[package]] name = "quinn-proto" version = "0.11.8" @@ -20927,15 +17588,28 @@ checksum = "fadfaed2cd7f389d0161bb73eeb07b7b78f8691047a6f3e73caaeae55310a4a6" dependencies = [ "bytes", "rand", - "ring 0.17.8", + "ring 0.17.7", "rustc-hash 2.0.0", - "rustls 0.23.18", + "rustls 0.23.14", "slab", "thiserror", "tinyvec", "tracing", ] +[[package]] +name = "quinn-udp" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "055b4e778e8feb9f93c4e439f71dc2156ef13360b432b799e179a8c4cdf0b1d7" +dependencies = [ + "bytes", + "libc", + "socket2 0.5.7", + "tracing", + "windows-sys 0.48.0", +] + [[package]] name = "quinn-udp" version = "0.5.4" @@ -20982,7 +17656,6 @@ dependencies = [ "libc", "rand_chacha", "rand_core 0.6.4", - "serde", ] [[package]] @@ -21106,15 +17779,19 @@ dependencies = [ ] [[package]] -name = "rcgen" -version = "0.11.3" +name = "reconnecting-jsonrpsee-ws-client" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52c4f3084aa3bc7dfbba4eff4fab2a54db4324965d8872ab933565e6fbd83bc6" +checksum = "06fa4f17e09edfc3131636082faaec633c7baa269396b4004040bc6c52f49f65" dependencies = [ - "pem 3.0.4", - "ring 0.16.20", - "time", - "yasna", + "cfg_aliases 0.2.1", + "finito", + "futures", + "jsonrpsee 0.23.2", + "serde_json", + "thiserror", + "tokio", + "tracing", ] [[package]] @@ -21161,7 +17838,7 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "87413ebb313323d431e85d0afc5a68222aaed972843537cbfe5f061cf1b4bcab" dependencies = [ - "derive_more 0.99.17", + "derive_more", "fs-err", "static_init", "thiserror", @@ -21214,14 +17891,14 @@ dependencies = [ [[package]] name = "regex" -version = "1.11.1" +version = "1.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" +checksum = "4219d74c6b67a3654a9fbebc4b419e22126d13d2f3c4a07ee0cb61ff79a79619" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.4.8", - "regex-syntax 0.8.5", + "regex-automata 0.4.7", + "regex-syntax 0.8.2", ] [[package]] @@ -21241,13 +17918,13 @@ checksum = "fed1ceff11a1dddaee50c9dc8e4938bd106e9d89ae372f192311e7da498e3b69" [[package]] name = "regex-automata" -version = "0.4.8" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "368758f23274712b504848e9d5a6f010445cc8b87a7cdb4d7cbee666c1288da3" +checksum = "38caf58cc5ef2fed281f89292ef23f6365465ed9a41b7a7754eb4e26496c92df" dependencies = [ "aho-corasick", "memchr", - "regex-syntax 0.8.5", + "regex-syntax 0.8.2", ] [[package]] @@ -21258,9 +17935,9 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" -version = "0.8.5" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" +checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" [[package]] name = "relative-path" @@ -21274,19 +17951,19 @@ version = "0.1.0" dependencies = [ "async-std", "async-trait", - "bp-header-chain 0.7.0", - "bp-messages 0.7.0", - "bp-polkadot-core 0.7.0", - "bp-runtime 0.7.0", + "bp-header-chain", + "bp-messages", + "bp-polkadot-core", + "bp-runtime", "finality-relay", - "frame-support 28.0.0", + "frame-support", "futures", - "jsonrpsee", + "jsonrpsee 0.24.3", "log", "num-traits", - "pallet-transaction-payment 28.0.0", - "pallet-transaction-payment-rpc-runtime-api 28.0.0", - "pallet-utility 28.0.0", + "pallet-transaction-payment", + "pallet-transaction-payment-rpc-runtime-api", + "pallet-utility", "parity-scale-codec", "quick_cache", "rand", @@ -21296,14 +17973,14 @@ dependencies = [ "sc-transaction-pool-api", "scale-info", "serde_json", - "sp-consensus-grandpa 13.0.0", + "sp-consensus-grandpa", "sp-core 28.0.0", "sp-rpc", "sp-runtime 31.0.1", "sp-std 14.0.0", "sp-trie 29.0.0", "sp-version 29.0.0", - "staging-xcm 7.0.0", + "staging-xcm", "thiserror", "tokio", ] @@ -21316,7 +17993,7 @@ dependencies = [ "async-std", "async-trait", "backoff", - "bp-runtime 0.7.0", + "bp-runtime", "console", "futures", "isahc", @@ -21339,21 +18016,21 @@ name = "remote-ext-tests-bags-list" version = "1.0.0" dependencies = [ "clap 4.5.13", - "frame-system 28.0.0", + "frame-system", "log", "pallet-bags-list-remote-tests", "sp-core 28.0.0", "sp-tracing 16.0.0", "tokio", "westend-runtime", - "westend-runtime-constants 7.0.0", + "westend-runtime-constants", ] [[package]] name = "reqwest" -version = "0.11.27" +version = "0.11.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd67538700a17451e7cba03ac727fb961abb7607553461627b97de0b89cf4a62" +checksum = "3e9ad3fe7488d7e34558a2033d45a0c90b72d97b4f80705666fea71472e2e6a1" dependencies = [ "base64 0.21.7", "bytes", @@ -21379,8 +18056,6 @@ dependencies = [ "serde", "serde_json", "serde_urlencoded", - "sync_wrapper 0.1.2", - "system-configuration", "tokio", "tokio-native-tls", "tokio-rustls 0.24.1", @@ -21390,14 +18065,14 @@ dependencies = [ "wasm-bindgen-futures", "web-sys", "webpki-roots 0.25.2", - "winreg", + "winreg 0.50.0", ] [[package]] name = "reqwest" -version = "0.12.9" +version = "0.12.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a77c62af46e79de0a562e1a9849205ffcb7fc1238876e9bd743357570e04046f" +checksum = "c7d6d2a27d57148378eb5e111173f4276ad26340ecc5c49a4a2152167a2d6a37" dependencies = [ "base64 0.22.1", "bytes", @@ -21417,14 +18092,14 @@ dependencies = [ "once_cell", "percent-encoding", "pin-project-lite", - "quinn", - "rustls 0.23.18", + "quinn 0.11.5", + "rustls 0.23.14", "rustls-pemfile 2.0.0", "rustls-pki-types", "serde", "serde_json", "serde_urlencoded", - "sync_wrapper 1.0.1", + "sync_wrapper", "tokio", "tokio-rustls 0.26.0", "tower-service", @@ -21433,7 +18108,7 @@ dependencies = [ "wasm-bindgen-futures", "web-sys", "webpki-roots 0.26.3", - "windows-registry", + "winreg 0.52.0", ] [[package]] @@ -21490,17 +18165,16 @@ dependencies = [ [[package]] name = "ring" -version = "0.17.8" +version = "0.17.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d" +checksum = "688c63d65483050968b2a8937f7995f443e27041a0f7700aa59b0822aedebb74" dependencies = [ "cc", - "cfg-if", "getrandom", "libc", "spin 0.9.8", "untrusted 0.9.0", - "windows-sys 0.52.0", + "windows-sys 0.48.0", ] [[package]] @@ -21553,138 +18227,138 @@ name = "rococo-emulated-chain" version = "0.0.0" dependencies = [ "emulated-integration-tests-common", - "parachains-common 7.0.0", - "polkadot-primitives 7.0.0", + "parachains-common", + "polkadot-primitives", "rococo-runtime", - "rococo-runtime-constants 7.0.0", + "rococo-runtime-constants", "sc-consensus-grandpa", - "sp-authority-discovery 26.0.0", - "sp-consensus-babe 0.32.0", - "sp-consensus-beefy 13.0.0", + "sp-authority-discovery", + "sp-consensus-babe", + "sp-consensus-beefy", "sp-core 28.0.0", - "sp-keyring 31.0.0", + "sp-keyring", ] [[package]] name = "rococo-parachain-runtime" version = "0.6.0" dependencies = [ - "cumulus-pallet-aura-ext 0.7.0", - "cumulus-pallet-parachain-system 0.7.0", - "cumulus-pallet-xcm 0.7.0", - "cumulus-pallet-xcmp-queue 0.7.0", - "cumulus-ping 0.7.0", - "cumulus-primitives-aura 0.7.0", - "cumulus-primitives-core 0.7.0", - "cumulus-primitives-storage-weight-reclaim 1.0.0", - "cumulus-primitives-utility 0.7.0", - "frame-benchmarking 28.0.0", - "frame-executive 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", - "frame-system-rpc-runtime-api 26.0.0", - "pallet-assets 29.1.0", - "pallet-aura 27.0.0", - "pallet-balances 28.0.0", - "pallet-message-queue 31.0.0", - "pallet-sudo 28.0.0", - "pallet-timestamp 27.0.0", - "pallet-transaction-payment 28.0.0", - "pallet-transaction-payment-rpc-runtime-api 28.0.0", - "pallet-xcm 7.0.0", - "parachains-common 7.0.0", - "parity-scale-codec", - "polkadot-parachain-primitives 6.0.0", - "polkadot-runtime-common 7.0.0", + "cumulus-pallet-aura-ext", + "cumulus-pallet-parachain-system", + "cumulus-pallet-xcm", + "cumulus-pallet-xcmp-queue", + "cumulus-ping", + "cumulus-primitives-aura", + "cumulus-primitives-core", + "cumulus-primitives-storage-weight-reclaim", + "cumulus-primitives-utility", + "frame-benchmarking", + "frame-executive", + "frame-support", + "frame-system", + "frame-system-rpc-runtime-api", + "pallet-assets", + "pallet-aura", + "pallet-balances", + "pallet-message-queue", + "pallet-sudo", + "pallet-timestamp", + "pallet-transaction-payment", + "pallet-transaction-payment-rpc-runtime-api", + "pallet-xcm", + "parachains-common", + "parity-scale-codec", + "polkadot-parachain-primitives", + "polkadot-runtime-common", "scale-info", "sp-api 26.0.0", - "sp-block-builder 26.0.0", - "sp-consensus-aura 0.32.0", + "sp-block-builder", + "sp-consensus-aura", "sp-core 28.0.0", - "sp-genesis-builder 0.8.0", - "sp-inherents 26.0.0", - "sp-offchain 26.0.0", + "sp-genesis-builder", + "sp-inherents", + "sp-offchain", "sp-runtime 31.0.1", - "sp-session 27.0.0", - "sp-transaction-pool 26.0.0", + "sp-session", + "sp-transaction-pool", "sp-version 29.0.0", - "staging-parachain-info 0.7.0", - "staging-xcm 7.0.0", - "staging-xcm-builder 7.0.0", - "staging-xcm-executor 7.0.0", - "substrate-wasm-builder 17.0.0", - "testnet-parachains-constants 1.0.0", + "staging-parachain-info", + "staging-xcm", + "staging-xcm-builder", + "staging-xcm-executor", + "substrate-wasm-builder", + "testnet-parachains-constants", ] [[package]] name = "rococo-runtime" version = "7.0.0" dependencies = [ - "binary-merkle-tree 13.0.0", + "binary-merkle-tree", "bitvec", - "frame-benchmarking 28.0.0", - "frame-executive 28.0.0", - "frame-metadata-hash-extension 0.1.0", + "frame-benchmarking", + "frame-executive", + "frame-metadata-hash-extension", "frame-remote-externalities", - "frame-support 28.0.0", - "frame-system 28.0.0", - "frame-system-benchmarking 28.0.0", - "frame-system-rpc-runtime-api 26.0.0", - "frame-try-runtime 0.34.0", + "frame-support", + "frame-system", + "frame-system-benchmarking", + "frame-system-rpc-runtime-api", + "frame-try-runtime", "hex-literal", "log", - "pallet-asset-rate 7.0.0", - "pallet-authority-discovery 28.0.0", - "pallet-authorship 28.0.0", - "pallet-babe 28.0.0", - "pallet-balances 28.0.0", - "pallet-beefy 28.0.0", - "pallet-beefy-mmr 28.0.0", - "pallet-bounties 27.0.0", - "pallet-child-bounties 27.0.0", - "pallet-collective 28.0.0", - "pallet-conviction-voting 28.0.0", - "pallet-democracy 28.0.0", - "pallet-elections-phragmen 29.0.0", - "pallet-grandpa 28.0.0", - "pallet-identity 29.0.0", - "pallet-indices 28.0.0", - "pallet-membership 28.0.0", - "pallet-message-queue 31.0.0", - "pallet-migrations 1.0.0", - "pallet-mmr 27.0.0", - "pallet-multisig 28.0.0", - "pallet-nis 28.0.0", - "pallet-offences 27.0.0", - "pallet-parameters 0.1.0", - "pallet-preimage 28.0.0", - "pallet-proxy 28.0.0", - "pallet-ranked-collective 28.0.0", - "pallet-recovery 28.0.0", - "pallet-referenda 28.0.0", - "pallet-root-testing 4.0.0", - "pallet-scheduler 29.0.0", - "pallet-session 28.0.0", - "pallet-society 28.0.0", - "pallet-staking 28.0.0", - "pallet-state-trie-migration 29.0.0", - "pallet-sudo 28.0.0", - "pallet-timestamp 27.0.0", - "pallet-tips 27.0.0", - "pallet-transaction-payment 28.0.0", - "pallet-transaction-payment-rpc-runtime-api 28.0.0", - "pallet-treasury 27.0.0", - "pallet-utility 28.0.0", - "pallet-vesting 28.0.0", - "pallet-whitelist 27.0.0", - "pallet-xcm 7.0.0", - "pallet-xcm-benchmarks 7.0.0", - "parity-scale-codec", - "polkadot-parachain-primitives 6.0.0", - "polkadot-primitives 7.0.0", - "polkadot-runtime-common 7.0.0", - "polkadot-runtime-parachains 7.0.0", - "rococo-runtime-constants 7.0.0", + "pallet-asset-rate", + "pallet-authority-discovery", + "pallet-authorship", + "pallet-babe", + "pallet-balances", + "pallet-beefy", + "pallet-beefy-mmr", + "pallet-bounties", + "pallet-child-bounties", + "pallet-collective", + "pallet-conviction-voting", + "pallet-democracy", + "pallet-elections-phragmen", + "pallet-grandpa", + "pallet-identity", + "pallet-indices", + "pallet-membership", + "pallet-message-queue", + "pallet-migrations", + "pallet-mmr", + "pallet-multisig", + "pallet-nis", + "pallet-offences", + "pallet-parameters", + "pallet-preimage", + "pallet-proxy", + "pallet-ranked-collective", + "pallet-recovery", + "pallet-referenda", + "pallet-root-testing", + "pallet-scheduler", + "pallet-session", + "pallet-society", + "pallet-staking", + "pallet-state-trie-migration", + "pallet-sudo", + "pallet-timestamp", + "pallet-tips", + "pallet-transaction-payment", + "pallet-transaction-payment-rpc-runtime-api", + "pallet-treasury", + "pallet-utility", + "pallet-vesting", + "pallet-whitelist", + "pallet-xcm", + "pallet-xcm-benchmarks", + "parity-scale-codec", + "polkadot-parachain-primitives", + "polkadot-primitives", + "polkadot-runtime-common", + "polkadot-runtime-parachains", + "rococo-runtime-constants", "scale-info", "separator", "serde", @@ -21693,66 +18367,49 @@ dependencies = [ "smallvec", "sp-api 26.0.0", "sp-arithmetic 23.0.0", - "sp-authority-discovery 26.0.0", - "sp-block-builder 26.0.0", - "sp-consensus-babe 0.32.0", - "sp-consensus-beefy 13.0.0", - "sp-consensus-grandpa 13.0.0", - "sp-core 28.0.0", - "sp-genesis-builder 0.8.0", - "sp-inherents 26.0.0", + "sp-authority-discovery", + "sp-block-builder", + "sp-consensus-babe", + "sp-consensus-beefy", + "sp-consensus-grandpa", + "sp-core 28.0.0", + "sp-genesis-builder", + "sp-inherents", "sp-io 30.0.0", - "sp-keyring 31.0.0", - "sp-mmr-primitives 26.0.0", - "sp-offchain 26.0.0", + "sp-keyring", + "sp-mmr-primitives", + "sp-offchain", "sp-runtime 31.0.1", - "sp-session 27.0.0", - "sp-staking 26.0.0", + "sp-session", + "sp-staking", "sp-storage 19.0.0", "sp-tracing 16.0.0", - "sp-transaction-pool 26.0.0", + "sp-transaction-pool", "sp-trie 29.0.0", "sp-version 29.0.0", - "staging-xcm 7.0.0", - "staging-xcm-builder 7.0.0", - "staging-xcm-executor 7.0.0", + "staging-xcm", + "staging-xcm-builder", + "staging-xcm-executor", "static_assertions", - "substrate-wasm-builder 17.0.0", + "substrate-wasm-builder", "tiny-keccak", "tokio", - "xcm-runtime-apis 0.1.0", + "xcm-runtime-apis", ] [[package]] name = "rococo-runtime-constants" version = "7.0.0" dependencies = [ - "frame-support 28.0.0", - "polkadot-primitives 7.0.0", - "polkadot-runtime-common 7.0.0", + "frame-support", + "polkadot-primitives", + "polkadot-runtime-common", "smallvec", "sp-core 28.0.0", "sp-runtime 31.0.1", "sp-weights 27.0.0", - "staging-xcm 7.0.0", - "staging-xcm-builder 7.0.0", -] - -[[package]] -name = "rococo-runtime-constants" -version = "17.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1ec6683a2e52fe3be2eaf942a80619abd99eb36e973c5ab4489a2f3b100db5c" -dependencies = [ - "frame-support 38.0.0", - "polkadot-primitives 16.0.0", - "polkadot-runtime-common 17.0.0", - "smallvec", - "sp-core 34.0.0", - "sp-runtime 39.0.2", - "sp-weights 31.0.0", - "staging-xcm 14.2.0", - "staging-xcm-builder 17.0.1", + "staging-xcm", + "staging-xcm-builder", ] [[package]] @@ -21973,14 +18630,14 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.21" +version = "0.38.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b426b0506e5d50a7d8dafcf2e81471400deb602392c7dd110815afb4eaf02a3" +checksum = "dc99bc2d4f1fed22595588a013687477aedf3cdcfb26558c559edb67b4d9b22e" dependencies = [ "bitflags 2.6.0", "errno", "libc", - "linux-raw-sys 0.4.10", + "linux-raw-sys 0.4.11", "windows-sys 0.48.0", ] @@ -22014,7 +18671,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bf4ef73721ac7bcd79b2b315da7779d8fc09718c6b3d2d1b2d94850eb8c18432" dependencies = [ "log", - "ring 0.17.8", + "ring 0.17.7", "rustls-pki-types", "rustls-webpki 0.102.8", "subtle 2.5.0", @@ -22023,13 +18680,13 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.18" +version = "0.23.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c9cc1d47e243d655ace55ed38201c19ae02c148ae56412ab8750e8f0166ab7f" +checksum = "415d9944693cb90382053259f89fbb077ea730ad7273047ec63b19bc9b160ba8" dependencies = [ "log", "once_cell", - "ring 0.17.8", + "ring 0.17.7", "rustls-pki-types", "rustls-webpki 0.102.8", "subtle 2.5.0", @@ -22095,9 +18752,9 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.10.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16f1201b3c9a7ee8039bcadc17b7e605e2945b27eee7631788c1bd2b0643674b" +checksum = "0e696e35370c65c9c541198af4543ccd580cf17fc25d8e05c5a242b202488c55" [[package]] name = "rustls-platform-verifier" @@ -22110,7 +18767,7 @@ dependencies = [ "jni", "log", "once_cell", - "rustls 0.23.18", + "rustls 0.23.14", "rustls-native-certs 0.7.0", "rustls-platform-verifier-android", "rustls-webpki 0.102.8", @@ -22142,7 +18799,7 @@ version = "0.102.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "64ca1bc8749bd4cf37b5ce386cc146580777b4e8572c7b97baf22c83f444bee9" dependencies = [ - "ring 0.17.8", + "ring 0.17.7", "rustls-pki-types", "untrusted 0.9.0", ] @@ -22178,12 +18835,13 @@ dependencies = [ [[package]] name = "ruzstd" -version = "0.6.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5174a470eeb535a721ae9fdd6e291c2411a906b96592182d05217591d5c5cf7b" +checksum = "58c4eb8a81997cf040a091d1f7e1938aeab6749d3a0dfa73af43cdc32393483d" dependencies = [ "byteorder", - "derive_more 0.99.17", + "derive_more", + "twox-hash", ] [[package]] @@ -22221,15 +18879,6 @@ dependencies = [ "bytemuck", ] -[[package]] -name = "salsa20" -version = "0.10.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97a22f5af31f73a954c10289c93e8a50cc23d971e80ee446f1f6f7137a088213" -dependencies = [ - "cipher 0.4.4", -] - [[package]] name = "same-file" version = "1.0.6" @@ -22257,19 +18906,7 @@ checksum = "a3f01218e73ea57916be5f08987995ac802d6f4ede4ea5ce0242e468c590e4e2" dependencies = [ "log", "sp-core 33.0.1", - "sp-wasm-interface 21.0.1", - "thiserror", -] - -[[package]] -name = "sc-allocator" -version = "29.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b975ee3a95eaacb611e7b415737a7fa2db4d8ad7b880cc1b97371b04e95c7903" -dependencies = [ - "log", - "sp-core 34.0.0", - "sp-wasm-interface 21.0.1", + "sp-wasm-interface 21.0.0", "thiserror", ] @@ -22281,6 +18918,7 @@ dependencies = [ "futures", "futures-timer", "ip_network", + "libp2p", "linked_hash_set", "log", "multihash 0.19.1", @@ -22293,7 +18931,7 @@ dependencies = [ "sc-network", "sc-network-types", "sp-api 26.0.0", - "sp-authority-discovery 26.0.0", + "sp-authority-discovery", "sp-blockchain", "sp-core 28.0.0", "sp-keystore 0.34.0", @@ -22323,7 +18961,7 @@ dependencies = [ "sp-blockchain", "sp-consensus", "sp-core 28.0.0", - "sp-inherents 26.0.0", + "sp-inherents", "sp-runtime 31.0.1", "substrate-prometheus-endpoint", "substrate-test-runtime-client", @@ -22335,10 +18973,10 @@ version = "0.33.0" dependencies = [ "parity-scale-codec", "sp-api 26.0.0", - "sp-block-builder 26.0.0", + "sp-block-builder", "sp-blockchain", "sp-core 28.0.0", - "sp-inherents 26.0.0", + "sp-inherents", "sp-runtime 31.0.1", "sp-state-machine 0.35.0", "sp-trie 29.0.0", @@ -22365,12 +19003,12 @@ dependencies = [ "serde_json", "sp-application-crypto 30.0.0", "sp-blockchain", - "sp-consensus-babe 0.32.0", + "sp-consensus-babe", "sp-core 28.0.0", "sp-crypto-hashing 0.1.0", - "sp-genesis-builder 0.8.0", + "sp-genesis-builder", "sp-io 30.0.0", - "sp-keyring 31.0.0", + "sp-keyring", "sp-runtime 31.0.1", "sp-state-machine 0.35.0", "sp-tracing 16.0.0", @@ -22420,7 +19058,7 @@ dependencies = [ "serde_json", "sp-blockchain", "sp-core 28.0.0", - "sp-keyring 31.0.0", + "sp-keyring", "sp-keystore 0.34.0", "sp-panic-handler 13.0.0", "sp-runtime 31.0.1", @@ -22451,7 +19089,7 @@ dependencies = [ "sp-externalities 0.25.0", "sp-runtime 31.0.1", "sp-state-machine 0.35.0", - "sp-statement-store 10.0.0", + "sp-statement-store", "sp-storage 19.0.0", "sp-test-primitives", "sp-trie 29.0.0", @@ -22536,17 +19174,17 @@ dependencies = [ "sc-telemetry", "sp-api 26.0.0", "sp-application-crypto 30.0.0", - "sp-block-builder 26.0.0", + "sp-block-builder", "sp-blockchain", "sp-consensus", - "sp-consensus-aura 0.32.0", - "sp-consensus-slots 0.32.0", + "sp-consensus-aura", + "sp-consensus-slots", "sp-core 28.0.0", - "sp-inherents 26.0.0", - "sp-keyring 31.0.0", + "sp-inherents", + "sp-keyring", "sp-keystore 0.34.0", "sp-runtime 31.0.1", - "sp-timestamp 26.0.0", + "sp-timestamp", "sp-tracing 16.0.0", "substrate-prometheus-endpoint", "substrate-test-runtime-client", @@ -22578,18 +19216,18 @@ dependencies = [ "sc-transaction-pool-api", "sp-api 26.0.0", "sp-application-crypto 30.0.0", - "sp-block-builder 26.0.0", + "sp-block-builder", "sp-blockchain", "sp-consensus", - "sp-consensus-babe 0.32.0", - "sp-consensus-slots 0.32.0", + "sp-consensus-babe", + "sp-consensus-slots", "sp-core 28.0.0", "sp-crypto-hashing 0.1.0", - "sp-inherents 26.0.0", - "sp-keyring 31.0.0", + "sp-inherents", + "sp-keyring", "sp-keystore 0.34.0", "sp-runtime 31.0.1", - "sp-timestamp 26.0.0", + "sp-timestamp", "sp-tracing 16.0.0", "substrate-prometheus-endpoint", "substrate-test-runtime-client", @@ -22602,7 +19240,7 @@ name = "sc-consensus-babe-rpc" version = "0.34.0" dependencies = [ "futures", - "jsonrpsee", + "jsonrpsee 0.24.3", "sc-consensus", "sc-consensus-babe", "sc-consensus-epochs", @@ -22615,9 +19253,9 @@ dependencies = [ "sp-application-crypto 30.0.0", "sp-blockchain", "sp-consensus", - "sp-consensus-babe 0.32.0", + "sp-consensus-babe", "sp-core 28.0.0", - "sp-keyring 31.0.0", + "sp-keyring", "sp-keystore 0.34.0", "sp-runtime 31.0.1", "substrate-test-runtime-client", @@ -22652,13 +19290,13 @@ dependencies = [ "sp-arithmetic 23.0.0", "sp-blockchain", "sp-consensus", - "sp-consensus-beefy 13.0.0", - "sp-consensus-grandpa 13.0.0", + "sp-consensus-beefy", + "sp-consensus-grandpa", "sp-core 28.0.0", "sp-crypto-hashing 0.1.0", - "sp-keyring 31.0.0", + "sp-keyring", "sp-keystore 0.34.0", - "sp-mmr-primitives 26.0.0", + "sp-mmr-primitives", "sp-runtime 31.0.1", "sp-tracing 16.0.0", "substrate-prometheus-endpoint", @@ -22674,7 +19312,7 @@ name = "sc-consensus-beefy-rpc" version = "13.0.0" dependencies = [ "futures", - "jsonrpsee", + "jsonrpsee 0.24.3", "log", "parity-scale-codec", "parking_lot 0.12.3", @@ -22683,7 +19321,7 @@ dependencies = [ "serde", "serde_json", "sp-application-crypto 30.0.0", - "sp-consensus-beefy 13.0.0", + "sp-consensus-beefy", "sp-core 28.0.0", "sp-runtime 31.0.1", "substrate-test-runtime-client", @@ -22740,10 +19378,10 @@ dependencies = [ "sp-arithmetic 23.0.0", "sp-blockchain", "sp-consensus", - "sp-consensus-grandpa 13.0.0", + "sp-consensus-grandpa", "sp-core 28.0.0", "sp-crypto-hashing 0.1.0", - "sp-keyring 31.0.0", + "sp-keyring", "sp-keystore 0.34.0", "sp-runtime 31.0.1", "sp-tracing 16.0.0", @@ -22759,7 +19397,7 @@ version = "0.19.0" dependencies = [ "finality-grandpa", "futures", - "jsonrpsee", + "jsonrpsee 0.24.3", "log", "parity-scale-codec", "sc-block-builder", @@ -22768,9 +19406,9 @@ dependencies = [ "sc-rpc", "serde", "sp-blockchain", - "sp-consensus-grandpa 13.0.0", + "sp-consensus-grandpa", "sp-core 28.0.0", - "sp-keyring 31.0.0", + "sp-keyring", "sp-runtime 31.0.1", "substrate-test-runtime-client", "thiserror", @@ -22785,7 +19423,7 @@ dependencies = [ "async-trait", "futures", "futures-timer", - "jsonrpsee", + "jsonrpsee 0.24.3", "log", "parity-scale-codec", "sc-basic-authorship", @@ -22800,14 +19438,14 @@ dependencies = [ "sp-api 26.0.0", "sp-blockchain", "sp-consensus", - "sp-consensus-aura 0.32.0", - "sp-consensus-babe 0.32.0", - "sp-consensus-slots 0.32.0", + "sp-consensus-aura", + "sp-consensus-babe", + "sp-consensus-slots", "sp-core 28.0.0", - "sp-inherents 26.0.0", + "sp-inherents", "sp-keystore 0.34.0", "sp-runtime 31.0.1", - "sp-timestamp 26.0.0", + "sp-timestamp", "substrate-prometheus-endpoint", "substrate-test-runtime-client", "substrate-test-runtime-transaction-pool", @@ -22828,12 +19466,12 @@ dependencies = [ "sc-client-api", "sc-consensus", "sp-api 26.0.0", - "sp-block-builder 26.0.0", + "sp-block-builder", "sp-blockchain", "sp-consensus", - "sp-consensus-pow 0.32.0", + "sp-consensus-pow", "sp-core 28.0.0", - "sp-inherents 26.0.0", + "sp-inherents", "sp-runtime 31.0.1", "substrate-prometheus-endpoint", "thiserror", @@ -22854,9 +19492,9 @@ dependencies = [ "sp-arithmetic 23.0.0", "sp-blockchain", "sp-consensus", - "sp-consensus-slots 0.32.0", + "sp-consensus-slots", "sp-core 28.0.0", - "sp-inherents 26.0.0", + "sp-inherents", "sp-runtime 31.0.1", "sp-state-machine 0.35.0", "substrate-test-runtime-client", @@ -22921,31 +19559,7 @@ dependencies = [ "sp-runtime-interface 27.0.0", "sp-trie 35.0.0", "sp-version 35.0.0", - "sp-wasm-interface 21.0.1", - "tracing", -] - -[[package]] -name = "sc-executor" -version = "0.40.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f0cc0a3728fd033589183460c5a49b2e7545d09dc89a098216ef9e9aadcd9dc" -dependencies = [ - "parity-scale-codec", - "parking_lot 0.12.3", - "sc-executor-common 0.35.0", - "sc-executor-polkavm 0.32.0", - "sc-executor-wasmtime 0.35.0", - "schnellru", - "sp-api 34.0.0", - "sp-core 34.0.0", - "sp-externalities 0.29.0", - "sp-io 38.0.0", - "sp-panic-handler 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sp-runtime-interface 28.0.0", - "sp-trie 37.0.0", - "sp-version 37.0.0", - "sp-wasm-interface 21.0.1", + "sp-wasm-interface 21.0.0", "tracing", ] @@ -22953,7 +19567,7 @@ dependencies = [ name = "sc-executor-common" version = "0.29.0" dependencies = [ - "polkavm 0.18.0", + "polkavm 0.9.3", "sc-allocator 23.0.0", "sp-maybe-compressed-blob 11.0.0", "sp-wasm-interface 20.0.0", @@ -22970,21 +19584,7 @@ dependencies = [ "polkavm 0.9.3", "sc-allocator 28.0.0", "sp-maybe-compressed-blob 11.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sp-wasm-interface 21.0.1", - "thiserror", - "wasm-instrument", -] - -[[package]] -name = "sc-executor-common" -version = "0.35.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c3b703a33dcb7cddf19176fdf12294b9a6408125836b0f4afee3e6969e7f190" -dependencies = [ - "polkavm 0.9.3", - "sc-allocator 29.0.0", - "sp-maybe-compressed-blob 11.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sp-wasm-interface 21.0.1", + "sp-wasm-interface 21.0.0", "thiserror", "wasm-instrument", ] @@ -22994,7 +19594,7 @@ name = "sc-executor-polkavm" version = "0.29.0" dependencies = [ "log", - "polkavm 0.18.0", + "polkavm 0.9.3", "sc-executor-common 0.29.0", "sp-wasm-interface 20.0.0", ] @@ -23008,19 +19608,7 @@ dependencies = [ "log", "polkavm 0.9.3", "sc-executor-common 0.34.0", - "sp-wasm-interface 21.0.1", -] - -[[package]] -name = "sc-executor-polkavm" -version = "0.32.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26fe58d9cacfab73e5595fa84b80f7bd03efebe54a0574daaeb221a1d1f7ab80" -dependencies = [ - "log", - "polkavm 0.9.3", - "sc-executor-common 0.35.0", - "sp-wasm-interface 21.0.1", + "sp-wasm-interface 21.0.0", ] [[package]] @@ -23062,26 +19650,7 @@ dependencies = [ "sc-allocator 28.0.0", "sc-executor-common 0.34.0", "sp-runtime-interface 27.0.0", - "sp-wasm-interface 21.0.1", - "wasmtime", -] - -[[package]] -name = "sc-executor-wasmtime" -version = "0.35.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8cd498f2f77ec1f861c30804f5bfd796d4afcc8ce44ea1f11bfbe2847551d161" -dependencies = [ - "anyhow", - "cfg-if", - "libc", - "log", - "parking_lot 0.12.3", - "rustix 0.36.15", - "sc-allocator 29.0.0", - "sc-executor-common 0.35.0", - "sp-runtime-interface 28.0.0", - "sp-wasm-interface 21.0.1", + "sp-wasm-interface 21.0.0", "wasmtime", ] @@ -23138,7 +19707,7 @@ dependencies = [ "sp-consensus", "sp-core 28.0.0", "sp-keystore 0.34.0", - "sp-mixnet 0.4.0", + "sp-mixnet", "sp-runtime 31.0.1", "thiserror", ] @@ -23151,7 +19720,7 @@ dependencies = [ "assert_matches", "async-channel 1.9.0", "async-trait", - "asynchronous-codec 0.6.2", + "asynchronous-codec", "bytes", "cid 0.9.0", "criterion", @@ -23160,7 +19729,7 @@ dependencies = [ "futures", "futures-timer", "ip_network", - "libp2p 0.54.1", + "libp2p", "linked_hash_set", "litep2p", "log", @@ -23222,7 +19791,7 @@ dependencies = [ "sc-consensus", "sc-network-types", "sp-consensus", - "sp-consensus-grandpa 13.0.0", + "sp-consensus-grandpa", "sp-runtime 31.0.1", "tempfile", ] @@ -23285,7 +19854,7 @@ dependencies = [ "sc-network-types", "sp-consensus", "sp-runtime 31.0.1", - "sp-statement-store 10.0.0", + "sp-statement-store", "substrate-prometheus-endpoint", ] @@ -23317,7 +19886,7 @@ dependencies = [ "sp-arithmetic 23.0.0", "sp-blockchain", "sp-consensus", - "sp-consensus-grandpa 13.0.0", + "sp-consensus-grandpa", "sp-core 28.0.0", "sp-runtime 31.0.1", "sp-test-primitives", @@ -23336,7 +19905,7 @@ dependencies = [ "async-trait", "futures", "futures-timer", - "libp2p 0.54.1", + "libp2p", "log", "parking_lot 0.12.3", "rand", @@ -23383,10 +19952,8 @@ name = "sc-network-types" version = "0.10.0" dependencies = [ "bs58", - "bytes", "ed25519-dalek", "libp2p-identity", - "libp2p-kad", "litep2p", "log", "multiaddr 0.18.1", @@ -23417,7 +19984,7 @@ dependencies = [ "parity-scale-codec", "parking_lot 0.12.3", "rand", - "rustls 0.23.18", + "rustls 0.23.14", "sc-block-builder", "sc-client-api", "sc-client-db", @@ -23432,7 +19999,7 @@ dependencies = [ "sp-core 28.0.0", "sp-externalities 0.25.0", "sp-keystore 0.34.0", - "sp-offchain 26.0.0", + "sp-offchain", "sp-runtime 31.0.1", "sp-tracing 16.0.0", "substrate-test-runtime-client", @@ -23455,7 +20022,7 @@ version = "29.0.0" dependencies = [ "assert_matches", "futures", - "jsonrpsee", + "jsonrpsee 0.24.3", "log", "parity-scale-codec", "parking_lot 0.12.3", @@ -23479,11 +20046,11 @@ dependencies = [ "sp-crypto-hashing 0.1.0", "sp-io 30.0.0", "sp-keystore 0.34.0", - "sp-offchain 26.0.0", + "sp-offchain", "sp-rpc", "sp-runtime 31.0.1", - "sp-session 27.0.0", - "sp-statement-store 10.0.0", + "sp-session", + "sp-statement-store", "sp-version 29.0.0", "substrate-test-runtime-client", "tokio", @@ -23493,7 +20060,7 @@ dependencies = [ name = "sc-rpc-api" version = "0.33.0" dependencies = [ - "jsonrpsee", + "jsonrpsee 0.24.3", "parity-scale-codec", "sc-chain-spec", "sc-mixnet", @@ -23520,7 +20087,7 @@ dependencies = [ "http-body-util", "hyper 1.3.1", "ip_network", - "jsonrpsee", + "jsonrpsee 0.24.3", "log", "sc-rpc-api", "serde", @@ -23537,12 +20104,10 @@ version = "0.34.0" dependencies = [ "array-bytes", "assert_matches", - "async-trait", "futures", "futures-util", "hex", - "itertools 0.11.0", - "jsonrpsee", + "jsonrpsee 0.24.3", "log", "parity-scale-codec", "parking_lot 0.12.3", @@ -23584,26 +20149,7 @@ dependencies = [ "sp-io 30.0.0", "sp-runtime 31.0.1", "sp-runtime-interface 24.0.0", - "substrate-wasm-builder 17.0.0", -] - -[[package]] -name = "sc-runtime-utilities" -version = "0.1.0" -dependencies = [ - "cumulus-primitives-proof-size-hostfunction 0.2.0", - "cumulus-test-runtime", - "parity-scale-codec", - "sc-executor 0.32.0", - "sc-executor-common 0.29.0", - "sp-core 28.0.0", - "sp-crypto-hashing 0.1.0", - "sp-io 30.0.0", - "sp-state-machine 0.35.0", - "sp-version 29.0.0", - "sp-wasm-interface 20.0.0", - "subxt", - "thiserror", + "substrate-wasm-builder", ] [[package]] @@ -23615,7 +20161,7 @@ dependencies = [ "exit-future", "futures", "futures-timer", - "jsonrpsee", + "jsonrpsee 0.24.3", "log", "parity-scale-codec", "parking_lot 0.12.3", @@ -23653,11 +20199,11 @@ dependencies = [ "sp-externalities 0.25.0", "sp-keystore 0.34.0", "sp-runtime 31.0.1", - "sp-session 27.0.0", + "sp-session", "sp-state-machine 0.35.0", "sp-storage 19.0.0", - "sp-transaction-pool 26.0.0", - "sp-transaction-storage-proof 26.0.0", + "sp-transaction-pool", + "sp-transaction-storage-proof", "sp-trie 29.0.0", "sp-version 29.0.0", "static_init", @@ -23730,7 +20276,7 @@ dependencies = [ "sp-blockchain", "sp-core 28.0.0", "sp-runtime 31.0.1", - "sp-statement-store 10.0.0", + "sp-statement-store", "sp-tracing 16.0.0", "substrate-prometheus-endpoint", "tempfile", @@ -23753,7 +20299,7 @@ dependencies = [ name = "sc-sync-state-rpc" version = "0.34.0" dependencies = [ - "jsonrpsee", + "jsonrpsee 0.24.3", "parity-scale-codec", "sc-chain-spec", "sc-client-api", @@ -23771,7 +20317,7 @@ dependencies = [ name = "sc-sysinfo" version = "27.0.0" dependencies = [ - "derive_more 0.99.17", + "derive_more", "futures", "libc", "log", @@ -23794,7 +20340,7 @@ version = "15.0.0" dependencies = [ "chrono", "futures", - "libp2p 0.54.1", + "libp2p", "log", "parking_lot 0.12.3", "pin-project", @@ -23856,7 +20402,7 @@ dependencies = [ "criterion", "futures", "futures-timer", - "indexmap 2.7.0", + "indexmap 2.2.3", "itertools 0.11.0", "linked-hash-map", "log", @@ -23874,7 +20420,7 @@ dependencies = [ "sp-crypto-hashing 0.1.0", "sp-runtime 31.0.1", "sp-tracing 16.0.0", - "sp-transaction-pool 26.0.0", + "sp-transaction-pool", "substrate-prometheus-endpoint", "substrate-test-runtime", "substrate-test-runtime-client", @@ -23932,22 +20478,9 @@ version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e98f3262c250d90e700bb802eb704e1f841e03331c2eb815e46516c4edbf5b27" dependencies = [ - "derive_more 0.99.17", - "parity-scale-codec", - "scale-bits", - "scale-type-resolver", - "smallvec", -] - -[[package]] -name = "scale-decode" -version = "0.14.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8ae9cc099ae85ff28820210732b00f019546f36f33225f509fe25d5816864a0" -dependencies = [ - "derive_more 1.0.0", + "derive_more", "parity-scale-codec", - "primitive-types 0.13.1", + "primitive-types 0.12.2", "scale-bits", "scale-decode-derive", "scale-type-resolver", @@ -23956,25 +20489,25 @@ dependencies = [ [[package]] name = "scale-decode-derive" -version = "0.14.0" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ed9401effa946b493f9f84dc03714cca98119b230497df6f3df6b84a2b03648" +checksum = "9bb22f574168103cdd3133b19281639ca65ad985e24612728f727339dcaf4021" dependencies = [ - "darling", + "darling 0.14.4", "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.87", + "syn 1.0.109", ] [[package]] name = "scale-encode" -version = "0.8.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f9271284d05d0749c40771c46180ce89905fd95aa72a2a2fddb4b7c0aa424db" +checksum = "4ba0b9c48dc0eb20c60b083c29447c0c4617cb7c4a4c9fef72aa5c5bc539e15e" dependencies = [ - "derive_more 1.0.0", + "derive_more", "parity-scale-codec", - "primitive-types 0.13.1", + "primitive-types 0.12.2", "scale-bits", "scale-encode-derive", "scale-type-resolver", @@ -23983,26 +20516,26 @@ dependencies = [ [[package]] name = "scale-encode-derive" -version = "0.8.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "102fbc6236de6c53906c0b262f12c7aa69c2bdc604862c12728f5f4d370bc137" +checksum = "82ab7e60e2d9c8d47105f44527b26f04418e5e624ffc034f6b4a86c0ba19c5bf" dependencies = [ - "darling", - "proc-macro-crate 3.1.0", + "darling 0.14.4", + "proc-macro-crate 1.3.1", "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.87", + "syn 1.0.109", ] [[package]] name = "scale-info" -version = "2.11.6" +version = "2.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "346a3b32eba2640d17a9cb5927056b08f3de90f65b72fe09402c2ad07d684d0b" +checksum = "eca070c12893629e2cc820a9761bedf6ce1dcddc9852984d1dc734b8bd9bd024" dependencies = [ "bitvec", "cfg-if", - "derive_more 1.0.0", + "derive_more", "parity-scale-codec", "scale-info-derive", "serde", @@ -24010,14 +20543,14 @@ dependencies = [ [[package]] name = "scale-info-derive" -version = "2.11.6" +version = "2.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6630024bf739e2179b91fb424b28898baf819414262c5d376677dbff1fe7ebf" +checksum = "2d35494501194174bda522a32605929eefc9ecf7e0a326c26db1fdd85881eb62" dependencies = [ "proc-macro-crate 3.1.0", "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.87", + "syn 1.0.109", ] [[package]] @@ -24032,9 +20565,9 @@ dependencies = [ [[package]] name = "scale-typegen" -version = "0.9.0" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0dc4c70c7fea2eef1740f0081d3fe385d8bee1eef11e9272d3bec7dc8e5438e0" +checksum = "498d1aecf2ea61325d4511787c115791639c0fd21ef4f8e11e49dd09eff2bbac" dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", @@ -24045,17 +20578,18 @@ dependencies = [ [[package]] name = "scale-value" -version = "0.17.0" +version = "0.16.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5e0ef2a0ee1e02a69ada37feb87ea1616ce9808aca072befe2d3131bf28576e" +checksum = "ba4d772cfb7569e03868400344a1695d16560bf62b86b918604773607d39ec84" dependencies = [ "base58", "blake2 0.10.6", - "derive_more 1.0.0", + "derive_more", "either", + "frame-metadata 15.1.0", "parity-scale-codec", "scale-bits", - "scale-decode 0.14.0", + "scale-decode", "scale-encode", "scale-info", "scale-type-resolver", @@ -24160,18 +20694,6 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a3cf7c11c38cb994f3d40e8a8cde3bbd1f72a435e4c49e85d6553d8312306152" -[[package]] -name = "scrypt" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0516a385866c09368f0b5bcd1caff3366aace790fcd46e2bb032697bb172fd1f" -dependencies = [ - "password-hash", - "pbkdf2", - "salsa20", - "sha2 0.10.8", -] - [[package]] name = "sct" version = "0.7.0" @@ -24212,18 +20734,7 @@ version = "0.28.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d24b59d129cdadea20aea4fb2352fa053712e5d713eee47d700cd4b2bc002f10" dependencies = [ - "secp256k1-sys 0.9.2", -] - -[[package]] -name = "secp256k1" -version = "0.30.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b50c5943d326858130af85e049f2661ba3c78b26589b8ab98e65e80ae44a1252" -dependencies = [ - "bitcoin_hashes 0.14.0", - "rand", - "secp256k1-sys 0.10.1", + "secp256k1-sys", ] [[package]] @@ -24235,15 +20746,6 @@ dependencies = [ "cc", ] -[[package]] -name = "secp256k1-sys" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4387882333d3aa8cb20530a17c69a3752e97837832f34f6dccc760e715001d9" -dependencies = [ - "cc", -] - [[package]] name = "secrecy" version = "0.8.0" @@ -24254,15 +20756,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "secrecy" -version = "0.10.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e891af845473308773346dc847b2c23ee78fe442e0472ac50e22a18a93d3ae5a" -dependencies = [ - "zeroize", -] - [[package]] name = "security-framework" version = "2.11.0" @@ -24344,6 +20837,12 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f638d531eccd6e23b980caf34876660d38e265409d8e99b397ab71eb3612fad0" +[[package]] +name = "send_wrapper" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd0b0ec5f1c1ca621c432a25813d8d60c88abe6d3e08a3eb9cf37d97a0fe3d73" + [[package]] name = "separator" version = "0.4.1" @@ -24424,7 +20923,7 @@ version = "1.0.132" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d726bfaff4b320266d395898905d0eba0345aae23b54aee3a737e260fd46db03" dependencies = [ - "indexmap 2.7.0", + "indexmap 2.2.3", "itoa", "memchr", "ryu", @@ -24458,7 +20957,7 @@ version = "0.9.34+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" dependencies = [ - "indexmap 2.7.0", + "indexmap 2.2.3", "itoa", "ryu", "serde", @@ -24540,19 +21039,9 @@ name = "sha3" version = "0.10.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "75872d278a8f37ef87fa0ddbda7802605cb18344497949862c0d4dcb291eba60" -dependencies = [ - "digest 0.10.7", - "keccak", -] - -[[package]] -name = "sha3-asm" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c28efc5e327c837aa837c59eae585fc250715ef939ac32881bcc11677cd02d46" -dependencies = [ - "cc", - "cfg-if", +dependencies = [ + "digest 0.10.7", + "keccak", ] [[package]] @@ -24669,18 +21158,6 @@ dependencies = [ "sp-runtime 31.0.1", ] -[[package]] -name = "slot-range-helper" -version = "15.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e34f1146a457a5c554dedeae6c7273aa54c3b031f3e9eb0abd037b5511e2ce9" -dependencies = [ - "enumn", - "parity-scale-codec", - "paste", - "sp-runtime 39.0.2", -] - [[package]] name = "slotmap" version = "1.0.6" @@ -24765,7 +21242,7 @@ dependencies = [ "bs58", "chacha20", "crossbeam-queue", - "derive_more 0.99.17", + "derive_more", "ed25519-zebra 4.0.3", "either", "event-listener 2.5.3", @@ -24806,33 +21283,34 @@ dependencies = [ [[package]] name = "smoldot" -version = "0.18.0" +version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "966e72d77a3b2171bb7461d0cb91f43670c63558c62d7cf42809cae6c8b6b818" +checksum = "e6d1eaa97d77be4d026a1e7ffad1bb3b78448763b357ea6f8188d3e6f736a9b9" dependencies = [ "arrayvec 0.7.4", "async-lock 3.4.0", "atomic-take", - "base64 0.22.1", + "base64 0.21.7", "bip39", "blake2-rfc", "bs58", "chacha20", "crossbeam-queue", - "derive_more 0.99.17", + "derive_more", "ed25519-zebra 4.0.3", "either", - "event-listener 5.3.1", + "event-listener 4.0.3", "fnv", "futures-lite 2.3.0", "futures-util", "hashbrown 0.14.5", "hex", "hmac 0.12.1", - "itertools 0.13.0", + "itertools 0.12.1", "libm", "libsecp256k1", "merlin", + "no-std-net", "nom", "num-bigint", "num-rational", @@ -24842,7 +21320,7 @@ dependencies = [ "poly1305", "rand", "rand_chacha", - "ruzstd 0.6.0", + "ruzstd 0.5.0", "schnorrkel 0.11.4", "serde", "serde_json", @@ -24851,9 +21329,9 @@ dependencies = [ "siphasher 1.0.1", "slab", "smallvec", - "soketto 0.8.0", + "soketto 0.7.1", "twox-hash", - "wasmi 0.32.3", + "wasmi 0.31.2", "x25519-dalek", "zeroize", ] @@ -24868,7 +21346,7 @@ dependencies = [ "async-lock 2.8.0", "base64 0.21.7", "blake2-rfc", - "derive_more 0.99.17", + "derive_more", "either", "event-listener 2.5.3", "fnv", @@ -24896,27 +21374,27 @@ dependencies = [ [[package]] name = "smoldot-light" -version = "0.16.2" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a33b06891f687909632ce6a4e3fd7677b24df930365af3d0bcb078310129f3f" +checksum = "5496f2d116b7019a526b1039ec2247dd172b8670633b1a64a614c9ea12c9d8c7" dependencies = [ "async-channel 2.3.0", "async-lock 3.4.0", - "base64 0.22.1", + "base64 0.21.7", "blake2-rfc", - "bs58", - "derive_more 0.99.17", + "derive_more", "either", - "event-listener 5.3.1", + "event-listener 4.0.3", "fnv", "futures-channel", "futures-lite 2.3.0", "futures-util", "hashbrown 0.14.5", "hex", - "itertools 0.13.0", + "itertools 0.12.1", "log", "lru 0.12.3", + "no-std-net", "parking_lot 0.12.3", "pin-project", "rand", @@ -24926,7 +21404,7 @@ dependencies = [ "siphasher 1.0.1", "slab", "smol 2.0.2", - "smoldot 0.18.0", + "smoldot 0.16.0", "zeroize", ] @@ -24947,7 +21425,7 @@ dependencies = [ "chacha20poly1305", "curve25519-dalek 4.1.3", "rand_core 0.6.4", - "ring 0.17.8", + "ring 0.17.7", "rustc_version 0.4.0", "sha2 0.10.8", "subtle 2.5.0", @@ -24968,14 +21446,14 @@ name = "snowbridge-beacon-primitives" version = "0.2.0" dependencies = [ "byte-slice-cast", - "frame-support 28.0.0", + "frame-support", "hex", "hex-literal", "parity-scale-codec", "rlp 0.6.1", "scale-info", "serde", - "snowbridge-ethereum 0.3.0", + "snowbridge-ethereum", "snowbridge-milagro-bls", "sp-core 28.0.0", "sp-io 30.0.0", @@ -24985,84 +21463,37 @@ dependencies = [ "ssz_rs_derive", ] -[[package]] -name = "snowbridge-beacon-primitives" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10bd720997e558beb556d354238fa90781deb38241cf31c1b6368738ef21c279" -dependencies = [ - "byte-slice-cast", - "frame-support 38.0.0", - "hex", - "parity-scale-codec", - "rlp 0.5.2", - "scale-info", - "serde", - "snowbridge-ethereum 0.9.0", - "snowbridge-milagro-bls", - "sp-core 34.0.0", - "sp-io 38.0.0", - "sp-runtime 39.0.2", - "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "ssz_rs", - "ssz_rs_derive", -] - [[package]] name = "snowbridge-core" version = "0.2.0" dependencies = [ - "ethabi-decode 2.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", + "ethabi-decode", + "frame-support", + "frame-system", "hex", "hex-literal", "parity-scale-codec", - "polkadot-parachain-primitives 6.0.0", + "polkadot-parachain-primitives", "scale-info", "serde", - "snowbridge-beacon-primitives 0.2.0", + "snowbridge-beacon-primitives", "sp-arithmetic 23.0.0", "sp-core 28.0.0", "sp-io 30.0.0", "sp-runtime 31.0.1", "sp-std 14.0.0", - "staging-xcm 7.0.0", - "staging-xcm-builder 7.0.0", - "staging-xcm-executor 7.0.0", -] - -[[package]] -name = "snowbridge-core" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6be61e4db95d1e253a1d5e722953b2d2f6605e5f9761f0a919e5d3fbdbff9da9" -dependencies = [ - "ethabi-decode 1.0.0", - "frame-support 38.0.0", - "frame-system 38.0.0", - "hex-literal", - "parity-scale-codec", - "polkadot-parachain-primitives 14.0.0", - "scale-info", - "serde", - "snowbridge-beacon-primitives 0.10.0", - "sp-arithmetic 26.0.0", - "sp-core 34.0.0", - "sp-io 38.0.0", - "sp-runtime 39.0.2", - "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "staging-xcm 14.2.0", - "staging-xcm-builder 17.0.1", + "staging-xcm", + "staging-xcm-builder", + "staging-xcm-executor", ] [[package]] name = "snowbridge-ethereum" version = "0.3.0" dependencies = [ - "ethabi-decode 2.0.0", - "ethbloom 0.14.1", - "ethereum-types 0.15.1", + "ethabi-decode", + "ethbloom", + "ethereum-types", "hex-literal", "parity-bytes", "parity-scale-codec", @@ -25078,27 +21509,6 @@ dependencies = [ "wasm-bindgen-test", ] -[[package]] -name = "snowbridge-ethereum" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc3d6d549c57df27cf89ec852f932fa4008eea877a6911a87e03e8002104eabd" -dependencies = [ - "ethabi-decode 1.0.0", - "ethbloom 0.13.0", - "ethereum-types 0.14.1", - "hex-literal", - "parity-bytes", - "parity-scale-codec", - "rlp 0.5.2", - "scale-info", - "serde", - "serde-big-array", - "sp-io 38.0.0", - "sp-runtime 39.0.2", - "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "snowbridge-milagro-bls" version = "1.5.4" @@ -25129,174 +21539,83 @@ dependencies = [ "sp-tracing 16.0.0", ] -[[package]] -name = "snowbridge-outbound-queue-merkle-tree" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74c6a9b65fa61711b704f0c6afb3663c6288288e8822ddae5cc1146fe3ad9ce8" -dependencies = [ - "parity-scale-codec", - "scale-info", - "sp-core 34.0.0", - "sp-runtime 39.0.2", -] - [[package]] name = "snowbridge-outbound-queue-runtime-api" version = "0.2.0" dependencies = [ - "frame-support 28.0.0", + "frame-support", "parity-scale-codec", - "snowbridge-core 0.2.0", - "snowbridge-outbound-queue-merkle-tree 0.3.0", + "snowbridge-core", + "snowbridge-outbound-queue-merkle-tree", "sp-api 26.0.0", "sp-std 14.0.0", ] -[[package]] -name = "snowbridge-outbound-queue-runtime-api" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38d27b8d9cb8022637a5ce4f52692520fa75874f393e04ef5cd75bd8795087f6" -dependencies = [ - "frame-support 38.0.0", - "parity-scale-codec", - "snowbridge-core 0.10.0", - "snowbridge-outbound-queue-merkle-tree 0.9.1", - "sp-api 34.0.0", - "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "snowbridge-pallet-ethereum-client" version = "0.2.0" dependencies = [ - "frame-benchmarking 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", + "frame-benchmarking", + "frame-support", + "frame-system", "hex-literal", "log", - "pallet-timestamp 27.0.0", + "pallet-timestamp", "parity-scale-codec", "rand", "scale-info", "serde", "serde_json", - "snowbridge-beacon-primitives 0.2.0", - "snowbridge-core 0.2.0", - "snowbridge-ethereum 0.3.0", - "snowbridge-pallet-ethereum-client-fixtures 0.9.0", + "snowbridge-beacon-primitives", + "snowbridge-core", + "snowbridge-ethereum", + "snowbridge-pallet-ethereum-client-fixtures", "sp-core 28.0.0", "sp-io 30.0.0", - "sp-keyring 31.0.0", + "sp-keyring", "sp-runtime 31.0.1", "sp-std 14.0.0", "static_assertions", ] -[[package]] -name = "snowbridge-pallet-ethereum-client" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d53d32d8470c643f9f8c1f508e1e34263f76297e4c9150e10e8f2e0b63992e1" -dependencies = [ - "frame-benchmarking 38.0.0", - "frame-support 38.0.0", - "frame-system 38.0.0", - "log", - "pallet-timestamp 37.0.0", - "parity-scale-codec", - "scale-info", - "serde", - "snowbridge-beacon-primitives 0.10.0", - "snowbridge-core 0.10.0", - "snowbridge-ethereum 0.9.0", - "snowbridge-pallet-ethereum-client-fixtures 0.18.0", - "sp-core 34.0.0", - "sp-io 38.0.0", - "sp-runtime 39.0.2", - "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "static_assertions", -] - [[package]] name = "snowbridge-pallet-ethereum-client-fixtures" version = "0.9.0" dependencies = [ "hex-literal", - "snowbridge-beacon-primitives 0.2.0", - "snowbridge-core 0.2.0", + "snowbridge-beacon-primitives", + "snowbridge-core", "sp-core 28.0.0", "sp-std 14.0.0", ] -[[package]] -name = "snowbridge-pallet-ethereum-client-fixtures" -version = "0.18.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3984b98465af1d862d4e87ba783e1731f2a3f851b148d6cb98d526cebd351185" -dependencies = [ - "hex-literal", - "snowbridge-beacon-primitives 0.10.0", - "snowbridge-core 0.10.0", - "sp-core 34.0.0", - "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "snowbridge-pallet-inbound-queue" version = "0.2.0" dependencies = [ - "alloy-core", - "frame-benchmarking 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", + "alloy-primitives", + "alloy-sol-types", + "frame-benchmarking", + "frame-support", + "frame-system", "hex-literal", "log", - "pallet-balances 28.0.0", + "pallet-balances", "parity-scale-codec", "scale-info", "serde", - "snowbridge-beacon-primitives 0.2.0", - "snowbridge-core 0.2.0", - "snowbridge-pallet-ethereum-client 0.2.0", - "snowbridge-pallet-inbound-queue-fixtures 0.10.0", - "snowbridge-router-primitives 0.9.0", + "snowbridge-beacon-primitives", + "snowbridge-core", + "snowbridge-pallet-ethereum-client", + "snowbridge-pallet-inbound-queue-fixtures", + "snowbridge-router-primitives", "sp-core 28.0.0", "sp-io 30.0.0", - "sp-keyring 31.0.0", + "sp-keyring", "sp-runtime 31.0.1", "sp-std 14.0.0", - "staging-xcm 7.0.0", - "staging-xcm-executor 7.0.0", -] - -[[package]] -name = "snowbridge-pallet-inbound-queue" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2e6a9d00e60e3744e6b6f0c21fea6694b9c6401ac40e41340a96e561dcf1935" -dependencies = [ - "alloy-primitives 0.4.2", - "alloy-sol-types 0.4.2", - "frame-benchmarking 38.0.0", - "frame-support 38.0.0", - "frame-system 38.0.0", - "log", - "pallet-balances 39.0.0", - "parity-scale-codec", - "scale-info", - "serde", - "snowbridge-beacon-primitives 0.10.0", - "snowbridge-core 0.10.0", - "snowbridge-pallet-inbound-queue-fixtures 0.18.0", - "snowbridge-router-primitives 0.16.0", - "sp-core 34.0.0", - "sp-io 38.0.0", - "sp-runtime 39.0.2", - "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "staging-xcm 14.2.0", - "staging-xcm-executor 17.0.0", + "staging-xcm", + "staging-xcm-executor", ] [[package]] @@ -25304,272 +21623,133 @@ name = "snowbridge-pallet-inbound-queue-fixtures" version = "0.10.0" dependencies = [ "hex-literal", - "snowbridge-beacon-primitives 0.2.0", - "snowbridge-core 0.2.0", + "snowbridge-beacon-primitives", + "snowbridge-core", "sp-core 28.0.0", "sp-std 14.0.0", ] -[[package]] -name = "snowbridge-pallet-inbound-queue-fixtures" -version = "0.18.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b099db83f4c10c0bf84e87deb1596019f91411ea1c8c9733ea9a7f2e7e967073" -dependencies = [ - "hex-literal", - "snowbridge-beacon-primitives 0.10.0", - "snowbridge-core 0.10.0", - "sp-core 34.0.0", - "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "snowbridge-pallet-outbound-queue" version = "0.2.0" dependencies = [ - "bridge-hub-common 0.1.0", - "ethabi-decode 2.0.0", - "frame-benchmarking 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", - "pallet-message-queue 31.0.0", + "bridge-hub-common", + "ethabi-decode", + "frame-benchmarking", + "frame-support", + "frame-system", + "pallet-message-queue", "parity-scale-codec", "scale-info", "serde", - "snowbridge-core 0.2.0", - "snowbridge-outbound-queue-merkle-tree 0.3.0", + "snowbridge-core", + "snowbridge-outbound-queue-merkle-tree", "sp-arithmetic 23.0.0", "sp-core 28.0.0", "sp-io 30.0.0", - "sp-keyring 31.0.0", + "sp-keyring", "sp-runtime 31.0.1", "sp-std 14.0.0", ] -[[package]] -name = "snowbridge-pallet-outbound-queue" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7d49478041b6512c710d0d4655675d146fe00a8e0c1624e5d8a1d6c161d490f" -dependencies = [ - "bridge-hub-common 0.10.0", - "ethabi-decode 1.0.0", - "frame-benchmarking 38.0.0", - "frame-support 38.0.0", - "frame-system 38.0.0", - "parity-scale-codec", - "scale-info", - "serde", - "snowbridge-core 0.10.0", - "snowbridge-outbound-queue-merkle-tree 0.9.1", - "sp-arithmetic 26.0.0", - "sp-core 34.0.0", - "sp-io 38.0.0", - "sp-runtime 39.0.2", - "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "snowbridge-pallet-system" version = "0.2.0" dependencies = [ - "frame-benchmarking 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", + "frame-benchmarking", + "frame-support", + "frame-system", "hex", "hex-literal", "log", - "pallet-balances 28.0.0", - "pallet-message-queue 31.0.0", + "pallet-balances", + "pallet-message-queue", "parity-scale-codec", - "polkadot-primitives 7.0.0", + "polkadot-primitives", "scale-info", - "snowbridge-core 0.2.0", - "snowbridge-pallet-outbound-queue 0.2.0", + "snowbridge-core", + "snowbridge-pallet-outbound-queue", "sp-core 28.0.0", "sp-io 30.0.0", - "sp-keyring 31.0.0", + "sp-keyring", "sp-runtime 31.0.1", "sp-std 14.0.0", - "staging-xcm 7.0.0", - "staging-xcm-executor 7.0.0", -] - -[[package]] -name = "snowbridge-pallet-system" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "674db59b3c8013382e5c07243ad9439b64d81d2e8b3c4f08d752b55aa5de697e" -dependencies = [ - "frame-benchmarking 38.0.0", - "frame-support 38.0.0", - "frame-system 38.0.0", - "log", - "parity-scale-codec", - "scale-info", - "snowbridge-core 0.10.0", - "sp-core 34.0.0", - "sp-io 38.0.0", - "sp-runtime 39.0.2", - "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "staging-xcm 14.2.0", - "staging-xcm-executor 17.0.0", + "staging-xcm", + "staging-xcm-executor", ] [[package]] name = "snowbridge-router-primitives" version = "0.9.0" dependencies = [ - "frame-support 28.0.0", - "hex-literal", - "log", - "parity-scale-codec", - "scale-info", - "snowbridge-core 0.2.0", - "sp-core 28.0.0", - "sp-io 30.0.0", - "sp-runtime 31.0.1", - "sp-std 14.0.0", - "staging-xcm 7.0.0", - "staging-xcm-executor 7.0.0", -] - -[[package]] -name = "snowbridge-router-primitives" -version = "0.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "025f1e6805753821b1db539369f1fb183fd59fd5df7023f7633a4c0cfd3e62f9" -dependencies = [ - "frame-support 38.0.0", + "frame-support", "hex-literal", "log", "parity-scale-codec", "scale-info", - "snowbridge-core 0.10.0", - "sp-core 34.0.0", - "sp-io 38.0.0", - "sp-runtime 39.0.2", - "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "staging-xcm 14.2.0", - "staging-xcm-executor 17.0.0", -] - -[[package]] -name = "snowbridge-runtime-common" -version = "0.2.0" -dependencies = [ - "frame-support 28.0.0", - "log", - "parity-scale-codec", - "snowbridge-core 0.2.0", - "sp-arithmetic 23.0.0", - "sp-std 14.0.0", - "staging-xcm 7.0.0", - "staging-xcm-builder 7.0.0", - "staging-xcm-executor 7.0.0", -] - -[[package]] -name = "snowbridge-runtime-common" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6093f0e73d6cfdd2eea8712155d1d75b5063fc9b1d854d2665b097b4bb29570d" -dependencies = [ - "frame-support 38.0.0", - "log", - "parity-scale-codec", - "snowbridge-core 0.10.0", - "sp-arithmetic 26.0.0", - "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "staging-xcm 14.2.0", - "staging-xcm-builder 17.0.1", - "staging-xcm-executor 17.0.0", -] - -[[package]] -name = "snowbridge-runtime-test-common" -version = "0.2.0" -dependencies = [ - "cumulus-pallet-parachain-system 0.7.0", - "frame-support 28.0.0", - "frame-system 28.0.0", - "pallet-balances 28.0.0", - "pallet-collator-selection 9.0.0", - "pallet-message-queue 31.0.0", - "pallet-session 28.0.0", - "pallet-timestamp 27.0.0", - "pallet-utility 28.0.0", - "pallet-xcm 7.0.0", - "parachains-runtimes-test-utils 7.0.0", - "parity-scale-codec", - "snowbridge-core 0.2.0", - "snowbridge-pallet-ethereum-client 0.2.0", - "snowbridge-pallet-ethereum-client-fixtures 0.9.0", - "snowbridge-pallet-outbound-queue 0.2.0", - "snowbridge-pallet-system 0.2.0", + "snowbridge-core", "sp-core 28.0.0", - "sp-io 30.0.0", - "sp-keyring 31.0.0", - "sp-runtime 31.0.1", - "staging-parachain-info 0.7.0", - "staging-xcm 7.0.0", - "staging-xcm-executor 7.0.0", -] - -[[package]] -name = "snowbridge-runtime-test-common" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "893480d6cde2489051c65efb5d27fa87efe047b3b61216d8e27bb2f0509b7faf" -dependencies = [ - "cumulus-pallet-parachain-system 0.17.1", - "frame-support 38.0.0", - "frame-system 38.0.0", - "pallet-balances 39.0.0", - "pallet-collator-selection 19.0.0", - "pallet-message-queue 41.0.1", - "pallet-session 38.0.0", - "pallet-timestamp 37.0.0", - "pallet-utility 38.0.0", - "pallet-xcm 17.0.0", - "parachains-runtimes-test-utils 17.0.0", - "parity-scale-codec", - "snowbridge-core 0.10.0", - "snowbridge-pallet-ethereum-client 0.10.0", - "snowbridge-pallet-ethereum-client-fixtures 0.18.0", - "snowbridge-pallet-outbound-queue 0.10.0", - "snowbridge-pallet-system 0.10.0", - "sp-core 34.0.0", - "sp-io 38.0.0", - "sp-keyring 39.0.0", - "sp-runtime 39.0.2", - "staging-parachain-info 0.17.0", - "staging-xcm 14.2.0", - "staging-xcm-executor 17.0.0", + "sp-io 30.0.0", + "sp-runtime 31.0.1", + "sp-std 14.0.0", + "staging-xcm", + "staging-xcm-executor", ] [[package]] -name = "snowbridge-system-runtime-api" +name = "snowbridge-runtime-common" version = "0.2.0" dependencies = [ + "frame-support", + "log", "parity-scale-codec", - "snowbridge-core 0.2.0", - "sp-api 26.0.0", + "snowbridge-core", + "sp-arithmetic 23.0.0", "sp-std 14.0.0", - "staging-xcm 7.0.0", + "staging-xcm", + "staging-xcm-builder", + "staging-xcm-executor", +] + +[[package]] +name = "snowbridge-runtime-test-common" +version = "0.2.0" +dependencies = [ + "cumulus-pallet-parachain-system", + "frame-support", + "frame-system", + "pallet-balances", + "pallet-collator-selection", + "pallet-message-queue", + "pallet-session", + "pallet-timestamp", + "pallet-utility", + "pallet-xcm", + "parachains-runtimes-test-utils", + "parity-scale-codec", + "snowbridge-core", + "snowbridge-pallet-ethereum-client", + "snowbridge-pallet-ethereum-client-fixtures", + "snowbridge-pallet-outbound-queue", + "snowbridge-pallet-system", + "sp-core 28.0.0", + "sp-io 30.0.0", + "sp-keyring", + "sp-runtime 31.0.1", + "staging-parachain-info", + "staging-xcm", + "staging-xcm-executor", ] [[package]] name = "snowbridge-system-runtime-api" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68b8b83b3db781c49844312a23965073e4d93341739a35eafe526c53b578d3b7" +version = "0.2.0" dependencies = [ "parity-scale-codec", - "snowbridge-core 0.10.0", - "sp-api 34.0.0", - "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "staging-xcm 14.2.0", + "snowbridge-core", + "sp-api 26.0.0", + "sp-std 14.0.0", + "staging-xcm", ] [[package]] @@ -25629,11 +21809,11 @@ version = "0.0.0" dependencies = [ "clap 4.5.13", "frame-benchmarking-cli", - "frame-metadata-hash-extension 0.1.0", - "frame-system 28.0.0", + "frame-metadata-hash-extension", + "frame-system", "futures", - "jsonrpsee", - "pallet-transaction-payment 28.0.0", + "jsonrpsee 0.24.3", + "pallet-transaction-payment", "pallet-transaction-payment-rpc", "sc-basic-authorship", "sc-cli", @@ -25651,17 +21831,17 @@ dependencies = [ "serde_json", "solochain-template-runtime", "sp-api 26.0.0", - "sp-block-builder 26.0.0", + "sp-block-builder", "sp-blockchain", - "sp-consensus-aura 0.32.0", - "sp-consensus-grandpa 13.0.0", + "sp-consensus-aura", + "sp-consensus-grandpa", "sp-core 28.0.0", - "sp-genesis-builder 0.8.0", - "sp-inherents 26.0.0", + "sp-genesis-builder", + "sp-inherents", "sp-io 30.0.0", - "sp-keyring 31.0.0", + "sp-keyring", "sp-runtime 31.0.1", - "sp-timestamp 26.0.0", + "sp-timestamp", "substrate-build-script-utils", "substrate-frame-rpc-system", ] @@ -25670,40 +21850,40 @@ dependencies = [ name = "solochain-template-runtime" version = "0.0.0" dependencies = [ - "frame-benchmarking 28.0.0", - "frame-executive 28.0.0", - "frame-metadata-hash-extension 0.1.0", - "frame-support 28.0.0", - "frame-system 28.0.0", - "frame-system-benchmarking 28.0.0", - "frame-system-rpc-runtime-api 26.0.0", - "frame-try-runtime 0.34.0", - "pallet-aura 27.0.0", - "pallet-balances 28.0.0", - "pallet-grandpa 28.0.0", - "pallet-sudo 28.0.0", + "frame-benchmarking", + "frame-executive", + "frame-metadata-hash-extension", + "frame-support", + "frame-system", + "frame-system-benchmarking", + "frame-system-rpc-runtime-api", + "frame-try-runtime", + "pallet-aura", + "pallet-balances", + "pallet-grandpa", + "pallet-sudo", "pallet-template", - "pallet-timestamp 27.0.0", - "pallet-transaction-payment 28.0.0", - "pallet-transaction-payment-rpc-runtime-api 28.0.0", + "pallet-timestamp", + "pallet-transaction-payment", + "pallet-transaction-payment-rpc-runtime-api", "parity-scale-codec", "scale-info", "serde_json", "sp-api 26.0.0", - "sp-block-builder 26.0.0", - "sp-consensus-aura 0.32.0", - "sp-consensus-grandpa 13.0.0", + "sp-block-builder", + "sp-consensus-aura", + "sp-consensus-grandpa", "sp-core 28.0.0", - "sp-genesis-builder 0.8.0", - "sp-inherents 26.0.0", - "sp-keyring 31.0.0", - "sp-offchain 26.0.0", + "sp-genesis-builder", + "sp-inherents", + "sp-keyring", + "sp-offchain", "sp-runtime 31.0.1", - "sp-session 27.0.0", + "sp-session", "sp-storage 19.0.0", - "sp-transaction-pool 26.0.0", + "sp-transaction-pool", "sp-version 29.0.0", - "substrate-wasm-builder 17.0.0", + "substrate-wasm-builder", ] [[package]] @@ -25751,29 +21931,6 @@ dependencies = [ "thiserror", ] -[[package]] -name = "sp-api" -version = "34.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbce492e0482134128b7729ea36f5ef1a9f9b4de2d48ff8dde7b5e464e28ce75" -dependencies = [ - "docify", - "hash-db", - "log", - "parity-scale-codec", - "scale-info", - "sp-api-proc-macro 20.0.0", - "sp-core 34.0.0", - "sp-externalities 0.29.0", - "sp-metadata-ir 0.7.0", - "sp-runtime 39.0.2", - "sp-runtime-interface 28.0.0", - "sp-state-machine 0.43.0", - "sp-trie 37.0.0", - "sp-version 37.0.0", - "thiserror", -] - [[package]] name = "sp-api-proc-macro" version = "15.0.0" @@ -25803,21 +21960,6 @@ dependencies = [ "syn 2.0.87", ] -[[package]] -name = "sp-api-proc-macro" -version = "20.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9aadf9e97e694f0e343978aa632938c5de309cbcc8afed4136cb71596737278" -dependencies = [ - "Inflector", - "blake2 0.10.6", - "expander", - "proc-macro-crate 3.1.0", - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.87", -] - [[package]] name = "sp-api-test" version = "2.0.1" @@ -25832,7 +21974,6 @@ dependencies = [ "sp-api 26.0.0", "sp-consensus", "sp-core 28.0.0", - "sp-metadata-ir 0.6.0", "sp-runtime 31.0.1", "sp-state-machine 0.35.0", "sp-tracing 16.0.0", @@ -25855,43 +21996,44 @@ dependencies = [ [[package]] name = "sp-application-crypto" -version = "35.0.0" +version = "33.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57541120624a76379cc993cbb85064a5148957a92da032567e54bce7977f51fc" +checksum = "13ca6121c22c8bd3d1dce1f05c479101fd0d7b159bef2a3e8c834138d839c75c" dependencies = [ "parity-scale-codec", "scale-info", "serde", - "sp-core 32.0.0", - "sp-io 35.0.0", + "sp-core 31.0.0", + "sp-io 33.0.0", "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "sp-application-crypto" -version = "36.0.0" +version = "35.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "296282f718f15d4d812664415942665302a484d3495cf8d2e2ab3192b32d2c73" +checksum = "57541120624a76379cc993cbb85064a5148957a92da032567e54bce7977f51fc" dependencies = [ "parity-scale-codec", "scale-info", "serde", - "sp-core 33.0.1", - "sp-io 36.0.0", + "sp-core 32.0.0", + "sp-io 35.0.0", "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "sp-application-crypto" -version = "38.0.0" +version = "36.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d8133012faa5f75b2f0b1619d9f720c1424ac477152c143e5f7dbde2fe1a958" +checksum = "296282f718f15d4d812664415942665302a484d3495cf8d2e2ab3192b32d2c73" dependencies = [ "parity-scale-codec", "scale-info", "serde", - "sp-core 34.0.0", - "sp-io 38.0.0", + "sp-core 33.0.1", + "sp-io 36.0.0", + "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -25922,6 +22064,21 @@ dependencies = [ "static_assertions", ] +[[package]] +name = "sp-arithmetic" +version = "25.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "910c07fa263b20bf7271fdd4adcb5d3217dfdac14270592e0780223542e7e114" +dependencies = [ + "integer-sqrt", + "num-traits", + "parity-scale-codec", + "scale-info", + "serde", + "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "static_assertions", +] + [[package]] name = "sp-arithmetic" version = "26.0.0" @@ -25978,39 +22135,15 @@ dependencies = [ "sp-runtime 31.0.1", ] -[[package]] -name = "sp-authority-discovery" -version = "34.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "519c33af0e25ba2dd2eb3790dc404d634b6e4ce0801bcc8fa3574e07c365e734" -dependencies = [ - "parity-scale-codec", - "scale-info", - "sp-api 34.0.0", - "sp-application-crypto 38.0.0", - "sp-runtime 39.0.2", -] - [[package]] name = "sp-block-builder" version = "26.0.0" dependencies = [ "sp-api 26.0.0", - "sp-inherents 26.0.0", + "sp-inherents", "sp-runtime 31.0.1", ] -[[package]] -name = "sp-block-builder" -version = "34.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74738809461e3d4bd707b5b94e0e0c064a623a74a6a8fe5c98514417a02858dd" -dependencies = [ - "sp-api 34.0.0", - "sp-inherents 34.0.0", - "sp-runtime 39.0.2", -] - [[package]] name = "sp-blockchain" version = "28.0.0" @@ -26037,7 +22170,7 @@ dependencies = [ "futures", "log", "sp-core 28.0.0", - "sp-inherents 26.0.0", + "sp-inherents", "sp-runtime 31.0.1", "sp-state-machine 0.35.0", "sp-test-primitives", @@ -26053,27 +22186,10 @@ dependencies = [ "scale-info", "sp-api 26.0.0", "sp-application-crypto 30.0.0", - "sp-consensus-slots 0.32.0", - "sp-inherents 26.0.0", + "sp-consensus-slots", + "sp-inherents", "sp-runtime 31.0.1", - "sp-timestamp 26.0.0", -] - -[[package]] -name = "sp-consensus-aura" -version = "0.40.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a8faaa05bbcb9c41f0cc535c4c1315abf6df472b53eae018678d1b4d811ac47" -dependencies = [ - "async-trait", - "parity-scale-codec", - "scale-info", - "sp-api 34.0.0", - "sp-application-crypto 38.0.0", - "sp-consensus-slots 0.40.1", - "sp-inherents 34.0.0", - "sp-runtime 39.0.2", - "sp-timestamp 34.0.0", + "sp-timestamp", ] [[package]] @@ -26086,30 +22202,11 @@ dependencies = [ "serde", "sp-api 26.0.0", "sp-application-crypto 30.0.0", - "sp-consensus-slots 0.32.0", + "sp-consensus-slots", "sp-core 28.0.0", - "sp-inherents 26.0.0", + "sp-inherents", "sp-runtime 31.0.1", - "sp-timestamp 26.0.0", -] - -[[package]] -name = "sp-consensus-babe" -version = "0.40.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36ee95e17ee8dcd14db7d584b899a426565ca9abe5a266ab82277977fc547f86" -dependencies = [ - "async-trait", - "parity-scale-codec", - "scale-info", - "serde", - "sp-api 34.0.0", - "sp-application-crypto 38.0.0", - "sp-consensus-slots 0.40.1", - "sp-core 34.0.0", - "sp-inherents 34.0.0", - "sp-runtime 39.0.2", - "sp-timestamp 34.0.0", + "sp-timestamp", ] [[package]] @@ -26126,35 +22223,13 @@ dependencies = [ "sp-crypto-hashing 0.1.0", "sp-io 30.0.0", "sp-keystore 0.34.0", - "sp-mmr-primitives 26.0.0", + "sp-mmr-primitives", "sp-runtime 31.0.1", "sp-weights 27.0.0", "strum 0.26.3", "w3f-bls", ] -[[package]] -name = "sp-consensus-beefy" -version = "22.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1d97e8cd75d85d15cda6f1923cf3834e848f80d5a6de1cf4edbbc5f0ad607eb" -dependencies = [ - "lazy_static", - "parity-scale-codec", - "scale-info", - "serde", - "sp-api 34.0.0", - "sp-application-crypto 38.0.0", - "sp-core 34.0.0", - "sp-crypto-hashing 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sp-io 38.0.0", - "sp-keystore 0.40.0", - "sp-mmr-primitives 34.1.0", - "sp-runtime 39.0.2", - "sp-weights 31.0.0", - "strum 0.26.3", -] - [[package]] name = "sp-consensus-grandpa" version = "13.0.0" @@ -26171,24 +22246,6 @@ dependencies = [ "sp-runtime 31.0.1", ] -[[package]] -name = "sp-consensus-grandpa" -version = "21.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "587b791efe6c5f18e09dbbaf1ece0ee7b5fe51602c233e7151a3676b0de0260b" -dependencies = [ - "finality-grandpa", - "log", - "parity-scale-codec", - "scale-info", - "serde", - "sp-api 34.0.0", - "sp-application-crypto 38.0.0", - "sp-core 34.0.0", - "sp-keystore 0.40.0", - "sp-runtime 39.0.2", -] - [[package]] name = "sp-consensus-pow" version = "0.32.0" @@ -26199,18 +22256,6 @@ dependencies = [ "sp-runtime 31.0.1", ] -[[package]] -name = "sp-consensus-pow" -version = "0.40.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fa6b7d199a1c16cea1b74ee7cee174bf08f2120ab66a87bee7b12353100b47c" -dependencies = [ - "parity-scale-codec", - "sp-api 34.0.0", - "sp-core 34.0.0", - "sp-runtime 39.0.2", -] - [[package]] name = "sp-consensus-sassafras" version = "0.3.4-dev" @@ -26220,7 +22265,7 @@ dependencies = [ "serde", "sp-api 26.0.0", "sp-application-crypto 30.0.0", - "sp-consensus-slots 0.32.0", + "sp-consensus-slots", "sp-core 28.0.0", "sp-runtime 31.0.1", ] @@ -26232,19 +22277,7 @@ dependencies = [ "parity-scale-codec", "scale-info", "serde", - "sp-timestamp 26.0.0", -] - -[[package]] -name = "sp-consensus-slots" -version = "0.40.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbafb7ed44f51c22fa277fb39b33dc601fa426133a8e2b53f3f46b10f07fba43" -dependencies = [ - "parity-scale-codec", - "scale-info", - "serde", - "sp-timestamp 34.0.0", + "sp-timestamp", ] [[package]] @@ -26278,8 +22311,8 @@ dependencies = [ "regex", "scale-info", "schnorrkel 0.11.4", - "secp256k1 0.28.2", - "secrecy 0.8.0", + "secp256k1", + "secrecy", "serde", "serde_json", "sp-crypto-hashing 0.1.0", @@ -26326,8 +22359,8 @@ dependencies = [ "rand", "scale-info", "schnorrkel 0.11.4", - "secp256k1 0.28.2", - "secrecy 0.8.0", + "secp256k1", + "secrecy", "serde", "sp-crypto-hashing 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "sp-debug-derive 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -26373,8 +22406,8 @@ dependencies = [ "rand", "scale-info", "schnorrkel 0.11.4", - "secp256k1 0.28.2", - "secrecy 0.8.0", + "secp256k1", + "secrecy", "serde", "sp-crypto-hashing 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "sp-debug-derive 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -26420,8 +22453,8 @@ dependencies = [ "rand", "scale-info", "schnorrkel 0.11.4", - "secp256k1 0.28.2", - "secrecy 0.8.0", + "secp256k1", + "secrecy", "serde", "sp-crypto-hashing 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "sp-debug-derive 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -26437,53 +22470,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "sp-core" -version = "34.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c961a5e33fb2962fa775c044ceba43df9c6f917e2c35d63bfe23738468fa76a7" -dependencies = [ - "array-bytes", - "bitflags 1.3.2", - "blake2 0.10.6", - "bounded-collections", - "bs58", - "dyn-clonable", - "ed25519-zebra 4.0.3", - "futures", - "hash-db", - "hash256-std-hasher", - "impl-serde 0.4.0", - "itertools 0.11.0", - "k256", - "libsecp256k1", - "log", - "merlin", - "parity-bip39", - "parity-scale-codec", - "parking_lot 0.12.3", - "paste", - "primitive-types 0.12.2", - "rand", - "scale-info", - "schnorrkel 0.11.4", - "secp256k1 0.28.2", - "secrecy 0.8.0", - "serde", - "sp-crypto-hashing 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sp-debug-derive 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sp-externalities 0.29.0", - "sp-runtime-interface 28.0.0", - "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sp-storage 21.0.0", - "ss58-registry", - "substrate-bip39 0.6.0", - "thiserror", - "tracing", - "w3f-bls", - "zeroize", -] - [[package]] name = "sp-core-fuzz" version = "0.0.0" @@ -26500,15 +22486,6 @@ dependencies = [ "sp-crypto-hashing 0.1.0", ] -[[package]] -name = "sp-core-hashing" -version = "16.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f812cb2dff962eb378c507612a50f1c59f52d92eb97b710f35be3c2346a3cd7" -dependencies = [ - "sp-crypto-hashing 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "sp-core-hashing-proc-macro" version = "15.0.0" @@ -26519,7 +22496,7 @@ dependencies = [ [[package]] name = "sp-crypto-ec-utils" version = "0.4.1" -source = "git+https://github.com/paritytech/polkadot-sdk#82912acb33a9030c0ef3bf590a34fca09b72dc5f" +source = "git+https://github.com/paritytech/polkadot-sdk#838a534da874cf6071fba1df07643c6c5b033ae0" dependencies = [ "ark-bls12-377", "ark-bls12-377-ext", @@ -26556,27 +22533,6 @@ dependencies = [ "sp-runtime-interface 24.0.0", ] -[[package]] -name = "sp-crypto-ec-utils" -version = "0.14.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2acb24f8a607a48a87f0ee4c090fc5d577eee49ff39ced6a3c491e06eca03c37" -dependencies = [ - "ark-bls12-377", - "ark-bls12-377-ext", - "ark-bls12-381", - "ark-bls12-381-ext", - "ark-bw6-761", - "ark-bw6-761-ext", - "ark-ec", - "ark-ed-on-bls12-377", - "ark-ed-on-bls12-377-ext", - "ark-ed-on-bls12-381-bandersnatch", - "ark-ed-on-bls12-381-bandersnatch-ext", - "ark-scale 0.0.12", - "sp-runtime-interface 28.0.0", -] - [[package]] name = "sp-crypto-hashing" version = "0.1.0" @@ -26687,30 +22643,19 @@ dependencies = [ name = "sp-externalities" version = "0.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1d6a4572eadd4a63cff92509a210bf425501a0c5e76574b30a366ac77653787" -dependencies = [ - "environmental", - "parity-scale-codec", - "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sp-storage 20.0.0", -] - -[[package]] -name = "sp-externalities" -version = "0.28.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33abaec4be69b1613796bbf430decbbcaaf978756379e2016e683a4d6379cd02" +checksum = "a1d6a4572eadd4a63cff92509a210bf425501a0c5e76574b30a366ac77653787" dependencies = [ "environmental", "parity-scale-codec", - "sp-storage 21.0.0", + "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-storage 20.0.0", ] [[package]] name = "sp-externalities" -version = "0.29.0" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a904407d61cb94228c71b55a9d3708e9d6558991f9e83bd42bd91df37a159d30" +checksum = "33abaec4be69b1613796bbf430decbbcaaf978756379e2016e683a4d6379cd02" dependencies = [ "environmental", "parity-scale-codec", @@ -26728,19 +22673,6 @@ dependencies = [ "sp-runtime 31.0.1", ] -[[package]] -name = "sp-genesis-builder" -version = "0.15.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32a646ed222fd86d5680faa4a8967980eb32f644cae6c8523e1c689a6deda3e8" -dependencies = [ - "parity-scale-codec", - "scale-info", - "serde_json", - "sp-api 34.0.0", - "sp-runtime 39.0.2", -] - [[package]] name = "sp-inherents" version = "26.0.0" @@ -26754,20 +22686,6 @@ dependencies = [ "thiserror", ] -[[package]] -name = "sp-inherents" -version = "34.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afffbddc380d99a90c459ba1554bbbc01d62e892de9f1485af6940b89c4c0d57" -dependencies = [ - "async-trait", - "impl-trait-for-tuples", - "parity-scale-codec", - "scale-info", - "sp-runtime 39.0.2", - "thiserror", -] - [[package]] name = "sp-io" version = "30.0.0" @@ -26778,9 +22696,9 @@ dependencies = [ "libsecp256k1", "log", "parity-scale-codec", - "polkavm-derive 0.18.0", + "polkavm-derive 0.9.1", "rustversion", - "secp256k1 0.28.2", + "secp256k1", "sp-core 28.0.0", "sp-crypto-hashing 0.1.0", "sp-externalities 0.25.0", @@ -26795,9 +22713,9 @@ dependencies = [ [[package]] name = "sp-io" -version = "35.0.0" +version = "33.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b64ab18a0e29def6511139a8c45a59c14a846105aab6f9cc653523bd3b81f55" +checksum = "3e09bba780b55bd9e67979cd8f654a31e4a6cf45426ff371394a65953d2177f2" dependencies = [ "bytes", "ed25519-dalek", @@ -26806,25 +22724,25 @@ dependencies = [ "parity-scale-codec", "polkavm-derive 0.9.1", "rustversion", - "secp256k1 0.28.2", - "sp-core 32.0.0", + "secp256k1", + "sp-core 31.0.0", "sp-crypto-hashing 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sp-externalities 0.28.0", - "sp-keystore 0.38.0", - "sp-runtime-interface 27.0.0", - "sp-state-machine 0.40.0", + "sp-externalities 0.27.0", + "sp-keystore 0.37.0", + "sp-runtime-interface 26.0.0", + "sp-state-machine 0.38.0", "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sp-tracing 17.0.1", - "sp-trie 34.0.0", + "sp-tracing 16.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-trie 32.0.0", "tracing", "tracing-core", ] [[package]] name = "sp-io" -version = "36.0.0" +version = "35.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7a31ce27358b73656a09b4933f09a700019d63afa15ede966f7c9893c1d4db5" +checksum = "8b64ab18a0e29def6511139a8c45a59c14a846105aab6f9cc653523bd3b81f55" dependencies = [ "bytes", "ed25519-dalek", @@ -26833,43 +22751,43 @@ dependencies = [ "parity-scale-codec", "polkavm-derive 0.9.1", "rustversion", - "secp256k1 0.28.2", - "sp-core 33.0.1", + "secp256k1", + "sp-core 32.0.0", "sp-crypto-hashing 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "sp-externalities 0.28.0", - "sp-keystore 0.39.0", + "sp-keystore 0.38.0", "sp-runtime-interface 27.0.0", - "sp-state-machine 0.41.0", + "sp-state-machine 0.40.0", "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sp-tracing 17.0.1", - "sp-trie 35.0.0", + "sp-tracing 17.0.0", + "sp-trie 34.0.0", "tracing", "tracing-core", ] [[package]] name = "sp-io" -version = "38.0.0" +version = "36.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59ef7eb561bb4839cc8424ce58c5ea236cbcca83f26fcc0426d8decfe8aa97d4" +checksum = "e7a31ce27358b73656a09b4933f09a700019d63afa15ede966f7c9893c1d4db5" dependencies = [ "bytes", - "docify", "ed25519-dalek", "libsecp256k1", "log", "parity-scale-codec", "polkavm-derive 0.9.1", "rustversion", - "secp256k1 0.28.2", - "sp-core 34.0.0", + "secp256k1", + "sp-core 33.0.1", "sp-crypto-hashing 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sp-externalities 0.29.0", - "sp-keystore 0.40.0", - "sp-runtime-interface 28.0.0", - "sp-state-machine 0.43.0", - "sp-tracing 17.0.1", - "sp-trie 37.0.0", + "sp-externalities 0.28.0", + "sp-keystore 0.39.0", + "sp-runtime-interface 27.0.0", + "sp-state-machine 0.41.0", + "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-tracing 17.0.0", + "sp-trie 35.0.0", "tracing", "tracing-core", ] @@ -26883,17 +22801,6 @@ dependencies = [ "strum 0.26.3", ] -[[package]] -name = "sp-keyring" -version = "39.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c0e20624277f578b27f44ecfbe2ebc2e908488511ee2c900c5281599f700ab3" -dependencies = [ - "sp-core 34.0.0", - "sp-runtime 39.0.2", - "strum 0.26.3", -] - [[package]] name = "sp-keystore" version = "0.34.0" @@ -26908,38 +22815,38 @@ dependencies = [ [[package]] name = "sp-keystore" -version = "0.38.0" +version = "0.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e6c7a7abd860a5211a356cf9d5fcabf0eb37d997985e5d722b6b33dcc815528" +checksum = "bdbab8b61bd61d5f8625a0c75753b5d5a23be55d3445419acd42caf59cf6236b" dependencies = [ "parity-scale-codec", "parking_lot 0.12.3", - "sp-core 32.0.0", - "sp-externalities 0.28.0", + "sp-core 31.0.0", + "sp-externalities 0.27.0", ] [[package]] name = "sp-keystore" -version = "0.39.0" +version = "0.38.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92a909528663a80829b95d582a20dd4c9acd6e575650dee2bcaf56f4740b305e" +checksum = "4e6c7a7abd860a5211a356cf9d5fcabf0eb37d997985e5d722b6b33dcc815528" dependencies = [ "parity-scale-codec", "parking_lot 0.12.3", - "sp-core 33.0.1", + "sp-core 32.0.0", "sp-externalities 0.28.0", ] [[package]] name = "sp-keystore" -version = "0.40.0" +version = "0.39.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0248b4d784cb4a01472276928977121fa39d977a5bb24793b6b15e64b046df42" +checksum = "92a909528663a80829b95d582a20dd4c9acd6e575650dee2bcaf56f4740b305e" dependencies = [ "parity-scale-codec", "parking_lot 0.12.3", - "sp-core 34.0.0", - "sp-externalities 0.29.0", + "sp-core 33.0.1", + "sp-externalities 0.28.0", ] [[package]] @@ -26964,7 +22871,7 @@ dependencies = [ name = "sp-metadata-ir" version = "0.6.0" dependencies = [ - "frame-metadata 18.0.0", + "frame-metadata 16.0.0", "parity-scale-codec", "scale-info", ] @@ -26990,18 +22897,6 @@ dependencies = [ "sp-application-crypto 30.0.0", ] -[[package]] -name = "sp-mixnet" -version = "0.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b0b017dd54823b6e62f9f7171a1df350972e5c6d0bf17e0c2f78680b5c31942" -dependencies = [ - "parity-scale-codec", - "scale-info", - "sp-api 34.0.0", - "sp-application-crypto 38.0.0", -] - [[package]] name = "sp-mmr-primitives" version = "26.0.0" @@ -27019,24 +22914,6 @@ dependencies = [ "thiserror", ] -[[package]] -name = "sp-mmr-primitives" -version = "34.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a12dd76e368f1e48144a84b4735218b712f84b3f976970e2f25a29b30440e10" -dependencies = [ - "log", - "parity-scale-codec", - "polkadot-ckb-merkle-mountain-range", - "scale-info", - "serde", - "sp-api 34.0.0", - "sp-core 34.0.0", - "sp-debug-derive 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sp-runtime 39.0.2", - "thiserror", -] - [[package]] name = "sp-npos-elections" version = "26.0.0" @@ -27051,20 +22928,6 @@ dependencies = [ "substrate-test-utils", ] -[[package]] -name = "sp-npos-elections" -version = "34.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af922f112c7c1ed199eabe14f12a82ceb75e1adf0804870eccfbcf3399492847" -dependencies = [ - "parity-scale-codec", - "scale-info", - "serde", - "sp-arithmetic 26.0.0", - "sp-core 34.0.0", - "sp-runtime 39.0.2", -] - [[package]] name = "sp-npos-elections-fuzzer" version = "2.0.0-alpha.5" @@ -27072,7 +22935,7 @@ dependencies = [ "clap 4.5.13", "honggfuzz", "rand", - "sp-npos-elections 26.0.0", + "sp-npos-elections", "sp-runtime 31.0.1", ] @@ -27085,17 +22948,6 @@ dependencies = [ "sp-runtime 31.0.1", ] -[[package]] -name = "sp-offchain" -version = "34.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d9de237d72ecffd07f90826eef18360208b16d8de939d54e61591fac0fcbf99" -dependencies = [ - "sp-api 34.0.0", - "sp-core 34.0.0", - "sp-runtime 39.0.2", -] - [[package]] name = "sp-panic-handler" version = "13.0.0" @@ -27129,7 +22981,7 @@ dependencies = [ name = "sp-runtime" version = "31.0.1" dependencies = [ - "binary-merkle-tree 13.0.0", + "binary-merkle-tree", "docify", "either", "hash256-std-hasher", @@ -27161,9 +23013,9 @@ dependencies = [ [[package]] name = "sp-runtime" -version = "36.0.0" +version = "34.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6b85cb874b78ebb17307a910fc27edf259a0455ac5155d87eaed8754c037e07" +checksum = "ec3cb126971e7db2f0fcf8053dce740684c438c7180cfca1959598230f342c58" dependencies = [ "docify", "either", @@ -27176,45 +23028,44 @@ dependencies = [ "scale-info", "serde", "simple-mermaid 0.1.1", - "sp-application-crypto 35.0.0", - "sp-arithmetic 26.0.0", - "sp-core 32.0.0", - "sp-io 35.0.0", + "sp-application-crypto 33.0.0", + "sp-arithmetic 25.0.0", + "sp-core 31.0.0", + "sp-io 33.0.0", "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sp-weights 31.0.0", + "sp-weights 30.0.0", ] [[package]] name = "sp-runtime" -version = "37.0.0" +version = "36.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c2a6148bf0ba74999ecfea9b4c1ade544f0663e0baba19630bb7761b2142b19" +checksum = "a6b85cb874b78ebb17307a910fc27edf259a0455ac5155d87eaed8754c037e07" dependencies = [ "docify", "either", "hash256-std-hasher", "impl-trait-for-tuples", "log", - "num-traits", "parity-scale-codec", "paste", "rand", "scale-info", "serde", "simple-mermaid 0.1.1", - "sp-application-crypto 36.0.0", + "sp-application-crypto 35.0.0", "sp-arithmetic 26.0.0", - "sp-core 33.0.1", - "sp-io 36.0.0", + "sp-core 32.0.0", + "sp-io 35.0.0", "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "sp-weights 31.0.0", ] [[package]] name = "sp-runtime" -version = "39.0.2" +version = "37.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "658f23be7c79a85581029676a73265c107c5469157e3444c8c640fdbaa8bfed0" +checksum = "1c2a6148bf0ba74999ecfea9b4c1ade544f0663e0baba19630bb7761b2142b19" dependencies = [ "docify", "either", @@ -27228,13 +23079,12 @@ dependencies = [ "scale-info", "serde", "simple-mermaid 0.1.1", - "sp-application-crypto 38.0.0", + "sp-application-crypto 36.0.0", "sp-arithmetic 26.0.0", - "sp-core 34.0.0", - "sp-io 38.0.0", + "sp-core 33.0.1", + "sp-io 36.0.0", "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "sp-weights 31.0.0", - "tracing", ] [[package]] @@ -27262,7 +23112,7 @@ dependencies = [ "bytes", "impl-trait-for-tuples", "parity-scale-codec", - "polkavm-derive 0.18.0", + "polkavm-derive 0.9.1", "primitive-types 0.13.1", "rustversion", "sp-core 28.0.0", @@ -27314,35 +23164,15 @@ dependencies = [ "sp-runtime-interface-proc-macro 18.0.0", "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "sp-storage 21.0.0", - "sp-tracing 17.0.1", - "sp-wasm-interface 21.0.1", - "static_assertions", -] - -[[package]] -name = "sp-runtime-interface" -version = "28.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "985eb981f40c689c6a0012c937b68ed58dabb4341d06f2dfe4dfd5ed72fa4017" -dependencies = [ - "bytes", - "impl-trait-for-tuples", - "parity-scale-codec", - "polkavm-derive 0.9.1", - "primitive-types 0.12.2", - "sp-externalities 0.29.0", - "sp-runtime-interface-proc-macro 18.0.0", - "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sp-storage 21.0.0", - "sp-tracing 17.0.1", - "sp-wasm-interface 21.0.1", + "sp-tracing 17.0.0", + "sp-wasm-interface 21.0.0", "static_assertions", ] [[package]] name = "sp-runtime-interface-proc-macro" version = "11.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk#82912acb33a9030c0ef3bf590a34fca09b72dc5f" +source = "git+https://github.com/paritytech/polkadot-sdk#838a534da874cf6071fba1df07643c6c5b033ae0" dependencies = [ "Inflector", "proc-macro-crate 1.3.1", @@ -27401,7 +23231,7 @@ dependencies = [ "sp-core 28.0.0", "sp-io 30.0.0", "sp-runtime-interface 24.0.0", - "substrate-wasm-builder 17.0.0", + "substrate-wasm-builder", ] [[package]] @@ -27411,7 +23241,7 @@ dependencies = [ "sp-core 28.0.0", "sp-io 30.0.0", "sp-runtime-interface 24.0.0", - "substrate-wasm-builder 17.0.0", + "substrate-wasm-builder", ] [[package]] @@ -27424,22 +23254,7 @@ dependencies = [ "sp-core 28.0.0", "sp-keystore 0.34.0", "sp-runtime 31.0.1", - "sp-staking 26.0.0", -] - -[[package]] -name = "sp-session" -version = "36.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00a3a307fedc423fb8cd2a7726a3bbb99014f1b4b52f26153993e2aae3338fe6" -dependencies = [ - "parity-scale-codec", - "scale-info", - "sp-api 34.0.0", - "sp-core 34.0.0", - "sp-keystore 0.40.0", - "sp-runtime 39.0.2", - "sp-staking 36.0.0", + "sp-staking", ] [[package]] @@ -27454,34 +23269,6 @@ dependencies = [ "sp-runtime 31.0.1", ] -[[package]] -name = "sp-staking" -version = "34.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "143a764cacbab58347d8b2fd4c8909031fb0888d7b02a0ec9fa44f81f780d732" -dependencies = [ - "impl-trait-for-tuples", - "parity-scale-codec", - "scale-info", - "serde", - "sp-core 34.0.0", - "sp-runtime 39.0.2", -] - -[[package]] -name = "sp-staking" -version = "36.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a73eedb4b85f4cd420d31764827546aa22f82ce1646d0fd258993d051de7a90" -dependencies = [ - "impl-trait-for-tuples", - "parity-scale-codec", - "scale-info", - "serde", - "sp-core 34.0.0", - "sp-runtime 39.0.2", -] - [[package]] name = "sp-state-machine" version = "0.35.0" @@ -27503,14 +23290,14 @@ dependencies = [ "sp-trie 29.0.0", "thiserror", "tracing", - "trie-db", + "trie-db 0.29.1", ] [[package]] name = "sp-state-machine" -version = "0.40.0" +version = "0.38.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18084cb996c27d5d99a88750e0a8eb4af6870a40df97872a5923e6d293d95fb9" +checksum = "1eae0eac8034ba14437e772366336f579398a46d101de13dbb781ab1e35e67c5" dependencies = [ "hash-db", "log", @@ -27518,20 +23305,21 @@ dependencies = [ "parking_lot 0.12.3", "rand", "smallvec", - "sp-core 32.0.0", - "sp-externalities 0.28.0", + "sp-core 31.0.0", + "sp-externalities 0.27.0", "sp-panic-handler 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sp-trie 34.0.0", + "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-trie 32.0.0", "thiserror", "tracing", - "trie-db", + "trie-db 0.28.0", ] [[package]] name = "sp-state-machine" -version = "0.41.0" +version = "0.40.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f6ac196ea92c4d0613c071e1a050765dbfa30107a990224a4aba02c7dbcd063" +checksum = "18084cb996c27d5d99a88750e0a8eb4af6870a40df97872a5923e6d293d95fb9" dependencies = [ "hash-db", "log", @@ -27539,20 +23327,20 @@ dependencies = [ "parking_lot 0.12.3", "rand", "smallvec", - "sp-core 33.0.1", + "sp-core 32.0.0", "sp-externalities 0.28.0", "sp-panic-handler 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sp-trie 35.0.0", + "sp-trie 34.0.0", "thiserror", "tracing", - "trie-db", + "trie-db 0.29.1", ] [[package]] name = "sp-state-machine" -version = "0.43.0" +version = "0.41.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "930104d6ae882626e8880d9b1578da9300655d337a3ffb45e130c608b6c89660" +checksum = "6f6ac196ea92c4d0613c071e1a050765dbfa30107a990224a4aba02c7dbcd063" dependencies = [ "hash-db", "log", @@ -27560,13 +23348,13 @@ dependencies = [ "parking_lot 0.12.3", "rand", "smallvec", - "sp-core 34.0.0", - "sp-externalities 0.29.0", + "sp-core 33.0.1", + "sp-externalities 0.28.0", "sp-panic-handler 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sp-trie 37.0.0", + "sp-trie 35.0.0", "thiserror", "tracing", - "trie-db", + "trie-db 0.29.1", ] [[package]] @@ -27592,31 +23380,6 @@ dependencies = [ "x25519-dalek", ] -[[package]] -name = "sp-statement-store" -version = "18.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c219bc34ef4d1f9835f3ed881f965643c32034fcc030eb33b759dadbc802c1c2" -dependencies = [ - "aes-gcm", - "curve25519-dalek 4.1.3", - "ed25519-dalek", - "hkdf", - "parity-scale-codec", - "rand", - "scale-info", - "sha2 0.10.8", - "sp-api 34.0.0", - "sp-application-crypto 38.0.0", - "sp-core 34.0.0", - "sp-crypto-hashing 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sp-externalities 0.29.0", - "sp-runtime 39.0.2", - "sp-runtime-interface 28.0.0", - "thiserror", - "x25519-dalek", -] - [[package]] name = "sp-std" version = "8.0.0" @@ -27697,25 +23460,12 @@ dependencies = [ [[package]] name = "sp-timestamp" -version = "26.0.0" -dependencies = [ - "async-trait", - "parity-scale-codec", - "sp-inherents 26.0.0", - "sp-runtime 31.0.1", - "thiserror", -] - -[[package]] -name = "sp-timestamp" -version = "34.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72a1cb4df653d62ccc0dbce1db45d1c9443ec60247ee9576962d24da4c9c6f07" +version = "26.0.0" dependencies = [ "async-trait", "parity-scale-codec", - "sp-inherents 34.0.0", - "sp-runtime 39.0.2", + "sp-inherents", + "sp-runtime 31.0.1", "thiserror", ] @@ -27756,14 +23506,14 @@ dependencies = [ [[package]] name = "sp-tracing" -version = "17.0.1" +version = "17.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf641a1d17268c8fcfdb8e0fa51a79c2d4222f4cfda5f3944dbdbc384dced8d5" +checksum = "90b3decf116db9f1dfaf1f1597096b043d0e12c952d3bcdc018c6d6b77deec7e" dependencies = [ "parity-scale-codec", "tracing", "tracing-core", - "tracing-subscriber 0.3.18", + "tracing-subscriber 0.2.25", ] [[package]] @@ -27774,16 +23524,6 @@ dependencies = [ "sp-runtime 31.0.1", ] -[[package]] -name = "sp-transaction-pool" -version = "34.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc4bf251059485a7dd38fe4afeda8792983511cc47f342ff4695e2dcae6b5247" -dependencies = [ - "sp-api 34.0.0", - "sp-runtime 39.0.2", -] - [[package]] name = "sp-transaction-storage-proof" version = "26.0.0" @@ -27792,26 +23532,11 @@ dependencies = [ "parity-scale-codec", "scale-info", "sp-core 28.0.0", - "sp-inherents 26.0.0", + "sp-inherents", "sp-runtime 31.0.1", "sp-trie 29.0.0", ] -[[package]] -name = "sp-transaction-storage-proof" -version = "34.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c765c2e9817d95f13d42a9f2295c60723464669765c6e5acbacebd2f54932f67" -dependencies = [ - "async-trait", - "parity-scale-codec", - "scale-info", - "sp-core 34.0.0", - "sp-inherents 34.0.0", - "sp-runtime 39.0.2", - "sp-trie 37.0.0", -] - [[package]] name = "sp-trie" version = "29.0.0" @@ -27833,16 +23558,16 @@ dependencies = [ "thiserror", "tracing", "trie-bench", - "trie-db", + "trie-db 0.29.1", "trie-root", "trie-standardmap", ] [[package]] name = "sp-trie" -version = "34.0.0" +version = "32.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87727eced997f14d0f79e3a5186a80e38a9de87f6e9dc0baea5ebf8b7f9d8b66" +checksum = "f1aa91ad26c62b93d73e65f9ce7ebd04459c4bad086599348846a81988d6faa4" dependencies = [ "ahash 0.8.11", "hash-db", @@ -27854,19 +23579,20 @@ dependencies = [ "rand", "scale-info", "schnellru", - "sp-core 32.0.0", - "sp-externalities 0.28.0", + "sp-core 31.0.0", + "sp-externalities 0.27.0", + "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "thiserror", "tracing", - "trie-db", + "trie-db 0.28.0", "trie-root", ] [[package]] name = "sp-trie" -version = "35.0.0" +version = "34.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a61ab0c3e003f457203702e4753aa5fe9e762380543fada44650b1217e4aa5a5" +checksum = "87727eced997f14d0f79e3a5186a80e38a9de87f6e9dc0baea5ebf8b7f9d8b66" dependencies = [ "ahash 0.8.11", "hash-db", @@ -27878,19 +23604,19 @@ dependencies = [ "rand", "scale-info", "schnellru", - "sp-core 33.0.1", + "sp-core 32.0.0", "sp-externalities 0.28.0", "thiserror", "tracing", - "trie-db", + "trie-db 0.29.1", "trie-root", ] [[package]] name = "sp-trie" -version = "37.0.0" +version = "35.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6282aef9f4b6ecd95a67a45bcdb67a71f4a4155c09a53c10add4ffe823db18cd" +checksum = "a61ab0c3e003f457203702e4753aa5fe9e762380543fada44650b1217e4aa5a5" dependencies = [ "ahash 0.8.11", "hash-db", @@ -27902,11 +23628,11 @@ dependencies = [ "rand", "scale-info", "schnellru", - "sp-core 34.0.0", - "sp-externalities 0.29.0", + "sp-core 33.0.1", + "sp-externalities 0.28.0", "thiserror", "tracing", - "trie-db", + "trie-db 0.29.1", "trie-root", ] @@ -27944,30 +23670,12 @@ dependencies = [ "thiserror", ] -[[package]] -name = "sp-version" -version = "37.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d521a405707b5be561367cd3d442ff67588993de24062ce3adefcf8437ee9fe1" -dependencies = [ - "impl-serde 0.4.0", - "parity-scale-codec", - "parity-wasm", - "scale-info", - "serde", - "sp-crypto-hashing-proc-macro 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sp-runtime 39.0.2", - "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sp-version-proc-macro 14.0.0", - "thiserror", -] - [[package]] name = "sp-version-proc-macro" version = "13.0.0" dependencies = [ "parity-scale-codec", - "proc-macro-warning", + "proc-macro-warning 1.0.0", "proc-macro2 1.0.86", "quote 1.0.37", "sp-version 29.0.0", @@ -28026,9 +23734,9 @@ dependencies = [ [[package]] name = "sp-wasm-interface" -version = "21.0.1" +version = "21.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b066baa6d57951600b14ffe1243f54c47f9c23dd89c262e17ca00ae8dca58be9" +checksum = "3b04b919e150b4736d85089d49327eab65507deb1485eec929af69daa2278eb3" dependencies = [ "anyhow", "impl-trait-for-tuples", @@ -28051,6 +23759,22 @@ dependencies = [ "sp-debug-derive 14.0.0", ] +[[package]] +name = "sp-weights" +version = "30.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9af6c661fe3066b29f9e1d258000f402ff5cc2529a9191972d214e5871d0ba87" +dependencies = [ + "bounded-collections", + "parity-scale-codec", + "scale-info", + "serde", + "smallvec", + "sp-arithmetic 25.0.0", + "sp-debug-derive 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "sp-weights" version = "31.0.0" @@ -28168,16 +23892,16 @@ dependencies = [ "clap_complete", "criterion", "futures", - "jsonrpsee", + "jsonrpsee 0.24.3", "kitchensink-runtime", "log", - "nix 0.29.0", + "nix 0.28.0", "node-primitives", "node-rpc", "node-testing", "parity-scale-codec", "platforms", - "polkadot-sdk 0.1.0", + "polkadot-sdk", "pretty_assertions", "rand", "regex", @@ -28186,7 +23910,7 @@ dependencies = [ "serde", "serde_json", "soketto 0.8.0", - "sp-keyring 31.0.0", + "sp-keyring", "staging-node-inspect", "substrate-cli-test-utils", "subxt-signer", @@ -28210,7 +23934,7 @@ dependencies = [ "sp-core 28.0.0", "sp-io 30.0.0", "sp-runtime 31.0.1", - "sp-statement-store 10.0.0", + "sp-statement-store", "thiserror", ] @@ -28218,28 +23942,14 @@ dependencies = [ name = "staging-parachain-info" version = "0.7.0" dependencies = [ - "cumulus-primitives-core 0.7.0", - "frame-support 28.0.0", - "frame-system 28.0.0", + "cumulus-primitives-core", + "frame-support", + "frame-system", "parity-scale-codec", "scale-info", "sp-runtime 31.0.1", ] -[[package]] -name = "staging-parachain-info" -version = "0.17.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d28266dfddbfff721d70ad2f873380845b569adfab32f257cf97d9cedd894b68" -dependencies = [ - "cumulus-primitives-core 0.16.0", - "frame-support 38.0.0", - "frame-system 38.0.0", - "parity-scale-codec", - "scale-info", - "sp-runtime 39.0.2", -] - [[package]] name = "staging-tracking-allocator" version = "2.0.0" @@ -28252,7 +23962,7 @@ dependencies = [ "bounded-collections", "derivative", "environmental", - "frame-support 28.0.0", + "frame-support", "hex", "hex-literal", "impl-trait-for-tuples", @@ -28264,27 +23974,7 @@ dependencies = [ "sp-io 30.0.0", "sp-runtime 31.0.1", "sp-weights 27.0.0", - "xcm-procedural 7.0.0", -] - -[[package]] -name = "staging-xcm" -version = "14.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96bee7cd999e9cdf10f8db72342070d456e21e82a0f5962ff3b87edbd5f2b20e" -dependencies = [ - "array-bytes", - "bounded-collections", - "derivative", - "environmental", - "impl-trait-for-tuples", - "log", - "parity-scale-codec", - "scale-info", - "serde", - "sp-runtime 39.0.2", - "sp-weights 31.0.0", - "xcm-procedural 10.1.0", + "xcm-procedural", ] [[package]] @@ -28292,20 +23982,20 @@ name = "staging-xcm-builder" version = "7.0.0" dependencies = [ "assert_matches", - "frame-support 28.0.0", - "frame-system 28.0.0", + "frame-support", + "frame-system", "impl-trait-for-tuples", "log", - "pallet-asset-conversion 10.0.0", - "pallet-assets 29.1.0", - "pallet-balances 28.0.0", - "pallet-salary 13.0.0", - "pallet-transaction-payment 28.0.0", - "pallet-xcm 7.0.0", + "pallet-asset-conversion", + "pallet-assets", + "pallet-balances", + "pallet-salary", + "pallet-transaction-payment", + "pallet-xcm", "parity-scale-codec", - "polkadot-parachain-primitives 6.0.0", - "polkadot-primitives 7.0.0", - "polkadot-runtime-parachains 7.0.0", + "polkadot-parachain-primitives", + "polkadot-primitives", + "polkadot-runtime-parachains", "polkadot-test-runtime", "primitive-types 0.13.1", "scale-info", @@ -28314,31 +24004,8 @@ dependencies = [ "sp-io 30.0.0", "sp-runtime 31.0.1", "sp-weights 27.0.0", - "staging-xcm 7.0.0", - "staging-xcm-executor 7.0.0", -] - -[[package]] -name = "staging-xcm-builder" -version = "17.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3746adbbae27b1e6763f0cca622e15482ebcb94835a9e078c212dd7be896e35" -dependencies = [ - "frame-support 38.0.0", - "frame-system 38.0.0", - "impl-trait-for-tuples", - "log", - "pallet-asset-conversion 20.0.0", - "pallet-transaction-payment 38.0.0", - "parity-scale-codec", - "polkadot-parachain-primitives 14.0.0", - "scale-info", - "sp-arithmetic 26.0.0", - "sp-io 38.0.0", - "sp-runtime 39.0.2", - "sp-weights 31.0.0", - "staging-xcm 14.2.0", - "staging-xcm-executor 17.0.0", + "staging-xcm", + "staging-xcm-executor", ] [[package]] @@ -28346,8 +24013,8 @@ name = "staging-xcm-executor" version = "7.0.0" dependencies = [ "environmental", - "frame-benchmarking 28.0.0", - "frame-support 28.0.0", + "frame-benchmarking", + "frame-support", "impl-trait-for-tuples", "parity-scale-codec", "scale-info", @@ -28356,28 +24023,7 @@ dependencies = [ "sp-io 30.0.0", "sp-runtime 31.0.1", "sp-weights 27.0.0", - "staging-xcm 7.0.0", - "tracing", -] - -[[package]] -name = "staging-xcm-executor" -version = "17.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79dd0c5332a5318e58f0300b20768b71cf9427c906f94a743c9dc7c3ee9e7fa9" -dependencies = [ - "environmental", - "frame-benchmarking 38.0.0", - "frame-support 38.0.0", - "impl-trait-for-tuples", - "parity-scale-codec", - "scale-info", - "sp-arithmetic 26.0.0", - "sp-core 34.0.0", - "sp-io 38.0.0", - "sp-runtime 39.0.2", - "sp-weights 31.0.0", - "staging-xcm 14.2.0", + "staging-xcm", "tracing", ] @@ -28599,7 +24245,7 @@ version = "0.1.0" dependencies = [ "assert_cmd", "futures", - "nix 0.29.0", + "nix 0.28.0", "node-primitives", "regex", "sc-cli", @@ -28631,9 +24277,9 @@ dependencies = [ name = "substrate-frame-rpc-support" version = "29.0.0" dependencies = [ - "frame-support 28.0.0", - "frame-system 28.0.0", - "jsonrpsee", + "frame-support", + "frame-system", + "jsonrpsee 0.24.3", "parity-scale-codec", "sc-rpc-api", "scale-info", @@ -28650,16 +24296,16 @@ version = "28.0.0" dependencies = [ "assert_matches", "docify", - "frame-system-rpc-runtime-api 26.0.0", + "frame-system-rpc-runtime-api", "futures", - "jsonrpsee", + "jsonrpsee 0.24.3", "log", "parity-scale-codec", "sc-rpc-api", "sc-transaction-pool", "sc-transaction-pool-api", "sp-api 26.0.0", - "sp-block-builder 26.0.0", + "sp-block-builder", "sp-blockchain", "sp-core 28.0.0", "sp-runtime 31.0.1", @@ -28688,34 +24334,34 @@ dependencies = [ "anyhow", "async-std", "async-trait", - "bp-header-chain 0.7.0", - "bp-messages 0.7.0", - "bp-parachains 0.7.0", - "bp-polkadot-core 0.7.0", - "bp-relayers 0.7.0", - "bp-runtime 0.7.0", + "bp-header-chain", + "bp-messages", + "bp-parachains", + "bp-polkadot-core", + "bp-relayers", + "bp-runtime", "equivocation-detector", "finality-relay", - "frame-support 28.0.0", - "frame-system 28.0.0", + "frame-support", + "frame-system", "futures", "hex", "log", "messages-relay", "num-traits", - "pallet-balances 28.0.0", - "pallet-bridge-grandpa 0.7.0", - "pallet-bridge-messages 0.7.0", - "pallet-bridge-parachains 0.7.0", - "pallet-grandpa 28.0.0", - "pallet-transaction-payment 28.0.0", + "pallet-balances", + "pallet-bridge-grandpa", + "pallet-bridge-messages", + "pallet-bridge-parachains", + "pallet-grandpa", + "pallet-transaction-payment", "parachains-relay", "parity-scale-codec", "rbtag", "relay-substrate-client", "relay-utils", "scale-info", - "sp-consensus-grandpa 13.0.0", + "sp-consensus-grandpa", "sp-core 28.0.0", "sp-runtime 31.0.1", "sp-trie 29.0.0", @@ -28729,7 +24375,7 @@ name = "substrate-rpc-client" version = "0.33.0" dependencies = [ "async-trait", - "jsonrpsee", + "jsonrpsee 0.24.3", "log", "sc-rpc-api", "serde", @@ -28750,7 +24396,7 @@ dependencies = [ "sp-core 32.0.0", "sp-io 35.0.0", "sp-runtime 36.0.0", - "sp-wasm-interface 21.0.1", + "sp-wasm-interface 21.0.0", "thiserror", ] @@ -28758,7 +24404,7 @@ dependencies = [ name = "substrate-state-trie-migration-rpc" version = "27.0.0" dependencies = [ - "jsonrpsee", + "jsonrpsee 0.24.3", "parity-scale-codec", "sc-client-api", "sc-rpc-api", @@ -28768,7 +24414,7 @@ dependencies = [ "sp-runtime 31.0.1", "sp-state-machine 0.35.0", "sp-trie 29.0.0", - "trie-db", + "trie-db 0.29.1", ] [[package]] @@ -28790,7 +24436,7 @@ dependencies = [ "sp-blockchain", "sp-consensus", "sp-core 28.0.0", - "sp-keyring 31.0.0", + "sp-keyring", "sp-keystore 0.34.0", "sp-runtime 31.0.1", "sp-state-machine 0.35.0", @@ -28802,16 +24448,16 @@ name = "substrate-test-runtime" version = "2.0.0" dependencies = [ "array-bytes", - "frame-executive 28.0.0", - "frame-metadata-hash-extension 0.1.0", - "frame-support 28.0.0", - "frame-system 28.0.0", - "frame-system-rpc-runtime-api 26.0.0", + "frame-executive", + "frame-metadata-hash-extension", + "frame-support", + "frame-system", + "frame-system-rpc-runtime-api", "futures", "log", - "pallet-babe 28.0.0", - "pallet-balances 28.0.0", - "pallet-timestamp 27.0.0", + "pallet-babe", + "pallet-balances", + "pallet-timestamp", "parity-scale-codec", "sc-block-builder", "sc-chain-spec", @@ -28823,30 +24469,30 @@ dependencies = [ "serde_json", "sp-api 26.0.0", "sp-application-crypto 30.0.0", - "sp-block-builder 26.0.0", + "sp-block-builder", "sp-consensus", - "sp-consensus-aura 0.32.0", - "sp-consensus-babe 0.32.0", - "sp-consensus-grandpa 13.0.0", + "sp-consensus-aura", + "sp-consensus-babe", + "sp-consensus-grandpa", "sp-core 28.0.0", "sp-crypto-hashing 0.1.0", "sp-externalities 0.25.0", - "sp-genesis-builder 0.8.0", - "sp-inherents 26.0.0", + "sp-genesis-builder", + "sp-inherents", "sp-io 30.0.0", - "sp-keyring 31.0.0", - "sp-offchain 26.0.0", + "sp-keyring", + "sp-offchain", "sp-runtime 31.0.1", - "sp-session 27.0.0", + "sp-session", "sp-state-machine 0.35.0", "sp-tracing 16.0.0", - "sp-transaction-pool 26.0.0", + "sp-transaction-pool", "sp-trie 29.0.0", "sp-version 29.0.0", "substrate-test-runtime-client", - "substrate-wasm-builder 17.0.0", + "substrate-wasm-builder", "tracing", - "trie-db", + "trie-db 0.29.1", ] [[package]] @@ -28901,12 +24547,12 @@ dependencies = [ "cargo_metadata", "console", "filetime", - "frame-metadata 18.0.0", + "frame-metadata 16.0.0", "jobserver", "merkleized-metadata", "parity-scale-codec", "parity-wasm", - "polkavm-linker 0.18.0", + "polkavm-linker 0.9.2", "sc-executor 0.32.0", "shlex", "sp-core 28.0.0", @@ -28921,27 +24567,6 @@ dependencies = [ "wasm-opt", ] -[[package]] -name = "substrate-wasm-builder" -version = "24.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf035ffe7335fb24053edfe4d0a5780250eda772082a1b80ae25835dd4c09265" -dependencies = [ - "build-helper", - "cargo_metadata", - "console", - "filetime", - "jobserver", - "parity-wasm", - "polkavm-linker 0.9.2", - "sp-maybe-compressed-blob 11.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "strum 0.26.3", - "tempfile", - "toml 0.8.12", - "walkdir", - "wasm-opt", -] - [[package]] name = "subtle" version = "1.0.0" @@ -28972,7 +24597,7 @@ dependencies = [ "log", "num-format", "rand", - "reqwest 0.12.9", + "reqwest 0.12.5", "scale-info", "semver 1.0.18", "serde", @@ -28988,49 +24613,50 @@ dependencies = [ [[package]] name = "subxt" -version = "0.38.0" +version = "0.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c53029d133e4e0cb7933f1fe06f2c68804b956de9bb8fa930ffca44e9e5e4230" +checksum = "a160cba1edbf3ec4fbbeaea3f1a185f70448116a6bccc8276bb39adb3b3053bd" dependencies = [ "async-trait", "derive-where", "either", - "finito", - "frame-metadata 17.0.0", + "frame-metadata 16.0.0", "futures", "hex", - "impl-serde 0.5.0", - "jsonrpsee", + "impl-serde 0.4.0", + "instant", + "jsonrpsee 0.22.5", "parity-scale-codec", - "polkadot-sdk 0.7.0", - "primitive-types 0.13.1", + "primitive-types 0.12.2", + "reconnecting-jsonrpsee-ws-client", "scale-bits", - "scale-decode 0.14.0", + "scale-decode", "scale-encode", "scale-info", "scale-value", "serde", "serde_json", + "sp-crypto-hashing 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "subxt-core", "subxt-lightclient", "subxt-macro", "subxt-metadata", "thiserror", - "tokio", "tokio-util", "tracing", "url", - "wasm-bindgen-futures", - "web-time", ] [[package]] name = "subxt-codegen" -version = "0.38.0" +version = "0.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3cfcfb7d9589f3df0ac87c4988661cf3fb370761fcb19f2fd33104cc59daf22a" +checksum = "d703dca0905cc5272d7cc27a4ac5f37dcaae7671acc7fef0200057cc8c317786" dependencies = [ + "frame-metadata 16.0.0", "heck 0.5.0", + "hex", + "jsonrpsee 0.22.5", "parity-scale-codec", "proc-macro2 1.0.86", "quote 1.0.37", @@ -29039,48 +24665,49 @@ dependencies = [ "subxt-metadata", "syn 2.0.87", "thiserror", + "tokio", ] [[package]] name = "subxt-core" -version = "0.38.0" +version = "0.37.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ea28114366780d23684bd55ab879cd04c9d4cbba3b727a3854a3eca6bf29a1a" +checksum = "3af3b36405538a36b424d229dc908d1396ceb0994c90825ce928709eac1a159a" dependencies = [ "base58", "blake2 0.10.6", "derive-where", - "frame-decode", - "frame-metadata 17.0.0", + "frame-metadata 16.0.0", "hashbrown 0.14.5", "hex", - "impl-serde 0.5.0", - "keccak-hash", + "impl-serde 0.4.0", "parity-scale-codec", - "polkadot-sdk 0.7.0", - "primitive-types 0.13.1", + "primitive-types 0.12.2", "scale-bits", - "scale-decode 0.14.0", + "scale-decode", "scale-encode", "scale-info", "scale-value", "serde", "serde_json", + "sp-core 31.0.0", + "sp-crypto-hashing 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-runtime 34.0.0", "subxt-metadata", "tracing", ] [[package]] name = "subxt-lightclient" -version = "0.38.0" +version = "0.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "534d4b725183a9fa09ce0e0f135674473297fdd97dee4d683f41117f365ae997" +checksum = "9d9406fbdb9548c110803cb8afa750f8b911d51eefdf95474b11319591d225d9" dependencies = [ "futures", "futures-util", "serde", "serde_json", - "smoldot-light 0.16.2", + "smoldot-light 0.14.0", "thiserror", "tokio", "tokio-stream", @@ -29089,74 +24716,56 @@ dependencies = [ [[package]] name = "subxt-macro" -version = "0.38.0" +version = "0.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "228db9a5c95a6d8dc6152b4d6cdcbabc4f60821dd3f482a4f8791e022b7caadb" +checksum = "1c195f803d70687e409aba9be6c87115b5da8952cd83c4d13f2e043239818fcd" dependencies = [ - "darling", + "darling 0.20.10", "parity-scale-codec", - "proc-macro-error2", + "proc-macro-error", "quote 1.0.37", "scale-typegen", "subxt-codegen", - "subxt-utils-fetchmetadata", "syn 2.0.87", ] [[package]] name = "subxt-metadata" -version = "0.38.0" +version = "0.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee13e6862eda035557d9a2871955306aff540d2b89c06e0a62a1136a700aed28" +checksum = "738be5890fdeff899bbffff4d9c0f244fe2a952fb861301b937e3aa40ebb55da" dependencies = [ - "frame-decode", - "frame-metadata 17.0.0", + "frame-metadata 16.0.0", "hashbrown 0.14.5", "parity-scale-codec", - "polkadot-sdk 0.7.0", "scale-info", + "sp-crypto-hashing 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "subxt-signer" -version = "0.38.0" +version = "0.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e7a336d6a1f86f126100a4a717be58352de4c8214300c4f7807f974494efdb9" +checksum = "f49888ae6ae90fe01b471193528eea5bd4ed52d8eecd2d13f4a2333b87388850" dependencies = [ - "base64 0.22.1", "bip32", "bip39", "cfg-if", - "crypto_secretbox", "hex", "hmac 0.12.1", "keccak-hash", "parity-scale-codec", "pbkdf2", - "polkadot-sdk 0.7.0", "regex", "schnorrkel 0.11.4", - "scrypt", - "secp256k1 0.30.0", - "secrecy 0.10.3", - "serde", - "serde_json", + "secp256k1", + "secrecy", "sha2 0.10.8", + "sp-crypto-hashing 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "subxt-core", "zeroize", ] -[[package]] -name = "subxt-utils-fetchmetadata" -version = "0.38.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3082b17a86e3c3fe45d858d94d68f6b5247caace193dad6201688f24db8ba9bb" -dependencies = [ - "hex", - "parity-scale-codec", - "thiserror", -] - [[package]] name = "sval" version = "2.6.1" @@ -29316,9 +24925,6 @@ name = "sync_wrapper" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a7065abeca94b6a8a577f9bd45aa0867a2238b74e8eb67cf10d492bc39351394" -dependencies = [ - "futures-core", -] [[package]] name = "synstructure" @@ -29411,7 +25017,7 @@ dependencies = [ "cfg-if", "fastrand 2.1.0", "redox_syscall 0.4.1", - "rustix 0.38.21", + "rustix 0.38.25", "windows-sys 0.48.0", ] @@ -29441,7 +25047,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "21bebf2b7c9e0a515f6e0f8c51dc0f8e4696391e6f1ff30379559f8365fb0df7" dependencies = [ - "rustix 0.38.21", + "rustix 0.38.25", "windows-sys 0.48.0", ] @@ -29479,9 +25085,9 @@ version = "1.0.0" dependencies = [ "dlmalloc", "parity-scale-codec", - "polkadot-parachain-primitives 6.0.0", + "polkadot-parachain-primitives", "sp-io 30.0.0", - "substrate-wasm-builder 17.0.0", + "substrate-wasm-builder", "tiny-keccak", ] @@ -29498,14 +25104,14 @@ dependencies = [ "polkadot-node-core-pvf", "polkadot-node-primitives", "polkadot-node-subsystem", - "polkadot-parachain-primitives 6.0.0", - "polkadot-primitives 7.0.0", + "polkadot-parachain-primitives", + "polkadot-primitives", "polkadot-service", "polkadot-test-service", "sc-cli", "sc-service", "sp-core 28.0.0", - "sp-keyring 31.0.0", + "sp-keyring", "substrate-test-utils", "test-parachain-adder", "tokio", @@ -29516,7 +25122,7 @@ name = "test-parachain-halt" version = "1.0.0" dependencies = [ "rustversion", - "substrate-wasm-builder 17.0.0", + "substrate-wasm-builder", ] [[package]] @@ -29526,9 +25132,9 @@ dependencies = [ "dlmalloc", "log", "parity-scale-codec", - "polkadot-parachain-primitives 6.0.0", + "polkadot-parachain-primitives", "sp-io 30.0.0", - "substrate-wasm-builder 17.0.0", + "substrate-wasm-builder", "tiny-keccak", ] @@ -29545,14 +25151,14 @@ dependencies = [ "polkadot-node-core-pvf", "polkadot-node-primitives", "polkadot-node-subsystem", - "polkadot-parachain-primitives 6.0.0", - "polkadot-primitives 7.0.0", + "polkadot-parachain-primitives", + "polkadot-primitives", "polkadot-service", "polkadot-test-service", "sc-cli", "sc-service", "sp-core 28.0.0", - "sp-keyring 31.0.0", + "sp-keyring", "substrate-test-utils", "test-parachain-undying", "tokio", @@ -29573,8 +25179,8 @@ dependencies = [ name = "test-runtime-constants" version = "1.0.0" dependencies = [ - "frame-support 28.0.0", - "polkadot-primitives 7.0.0", + "frame-support", + "polkadot-primitives", "smallvec", "sp-runtime 31.0.1", ] @@ -29583,30 +25189,14 @@ dependencies = [ name = "testnet-parachains-constants" version = "1.0.0" dependencies = [ - "cumulus-primitives-core 0.7.0", - "frame-support 28.0.0", - "polkadot-core-primitives 7.0.0", - "rococo-runtime-constants 7.0.0", + "cumulus-primitives-core", + "frame-support", + "polkadot-core-primitives", + "rococo-runtime-constants", "smallvec", "sp-runtime 31.0.1", - "staging-xcm 7.0.0", - "westend-runtime-constants 7.0.0", -] - -[[package]] -name = "testnet-parachains-constants" -version = "10.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94bceae6f7c89d47daff6c7e05f712551a01379f61b07d494661941144878589" -dependencies = [ - "cumulus-primitives-core 0.16.0", - "frame-support 38.0.0", - "polkadot-core-primitives 15.0.0", - "rococo-runtime-constants 17.0.0", - "smallvec", - "sp-runtime 39.0.2", - "staging-xcm 14.2.0", - "westend-runtime-constants 17.0.0", + "staging-xcm", + "westend-runtime-constants", ] [[package]] @@ -29851,9 +25441,20 @@ dependencies = [ name = "tokio-rustls" version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" +checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" +dependencies = [ + "rustls 0.21.7", + "tokio", +] + +[[package]] +name = "tokio-rustls" +version = "0.25.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "775e0c0f0adb3a2f22a00c4745d728b479985fc15ee7ca6a2608388c5569860f" dependencies = [ - "rustls 0.21.7", + "rustls 0.22.4", + "rustls-pki-types", "tokio", ] @@ -29863,7 +25464,7 @@ version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4" dependencies = [ - "rustls 0.23.18", + "rustls 0.23.14", "rustls-pki-types", "tokio", ] @@ -29910,9 +25511,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.12" +version = "0.7.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61e7c3654c13bcd040d4a03abee2c75b1d14a37b423cf5a813ceae1cc903ec6a" +checksum = "9cf6b47b3771c49ac75ad09a6162f53ad4b8088b76ac60e8ec1455b31a189fe1" dependencies = [ "bytes", "futures-core", @@ -29971,7 +25572,7 @@ version = "0.19.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" dependencies = [ - "indexmap 2.7.0", + "indexmap 2.2.3", "serde", "serde_spanned", "toml_datetime", @@ -29984,7 +25585,7 @@ version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d34d383cd00a163b4a5b85053df514d45bc330f6de7737edfe0a93311d1eaa03" dependencies = [ - "indexmap 2.7.0", + "indexmap 2.2.3", "toml_datetime", "winnow 0.5.15", ] @@ -29995,7 +25596,7 @@ version = "0.22.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3328d4f68a705b2a4498da1d580585d39a6510f98318a2cec3018a7ec61ddef" dependencies = [ - "indexmap 2.7.0", + "indexmap 2.2.3", "serde", "serde_spanned", "toml_datetime", @@ -30116,7 +25717,7 @@ name = "tracing-gum" version = "7.0.0" dependencies = [ "coarsetime", - "polkadot-primitives 7.0.0", + "polkadot-primitives", "tracing", "tracing-gum-proc-macro", ] @@ -30219,11 +25820,24 @@ dependencies = [ "keccak-hasher", "memory-db", "parity-scale-codec", - "trie-db", + "trie-db 0.29.1", "trie-root", "trie-standardmap", ] +[[package]] +name = "trie-db" +version = "0.28.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff28e0f815c2fea41ebddf148e008b077d2faddb026c9555b29696114d602642" +dependencies = [ + "hash-db", + "hashbrown 0.13.2", + "log", + "rustc-hex", + "smallvec", +] + [[package]] name = "trie-db" version = "0.29.1" @@ -30255,6 +25869,78 @@ dependencies = [ "keccak-hasher", ] +[[package]] +name = "trust-dns-proto" +version = "0.22.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4f7f83d1e4a0e4358ac54c5c3681e5d7da5efc5a7a632c90bb6d6669ddd9bc26" +dependencies = [ + "async-trait", + "cfg-if", + "data-encoding", + "enum-as-inner 0.5.1", + "futures-channel", + "futures-io", + "futures-util", + "idna 0.2.3", + "ipnet", + "lazy_static", + "rand", + "smallvec", + "socket2 0.4.9", + "thiserror", + "tinyvec", + "tokio", + "tracing", + "url", +] + +[[package]] +name = "trust-dns-proto" +version = "0.23.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3119112651c157f4488931a01e586aa459736e9d6046d3bd9105ffb69352d374" +dependencies = [ + "async-trait", + "cfg-if", + "data-encoding", + "enum-as-inner 0.6.0", + "futures-channel", + "futures-io", + "futures-util", + "idna 0.4.0", + "ipnet", + "once_cell", + "rand", + "smallvec", + "thiserror", + "tinyvec", + "tokio", + "tracing", + "url", +] + +[[package]] +name = "trust-dns-resolver" +version = "0.23.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "10a3e6c3aff1718b3c73e395d1f35202ba2ffa847c6a62eea0db8fb4cfe30be6" +dependencies = [ + "cfg-if", + "futures-util", + "ipconfig", + "lru-cache", + "once_cell", + "parking_lot 0.12.3", + "rand", + "resolv-conf", + "smallvec", + "thiserror", + "tokio", + "tracing", + "trust-dns-proto 0.23.2", +] + [[package]] name = "try-lock" version = "0.2.4" @@ -30464,7 +26150,7 @@ version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6889a77d49f1f013504cec6bf97a2c730394adedaeb1deb5ea08949a50541105" dependencies = [ - "asynchronous-codec 0.6.2", + "asynchronous-codec", "bytes", "futures-io", "futures-util", @@ -30502,7 +26188,7 @@ dependencies = [ "flate2", "log", "once_cell", - "rustls 0.23.18", + "rustls 0.23.14", "rustls-pki-types", "serde", "serde_json", @@ -30675,9 +26361,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.95" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "128d1e363af62632b8eb57219c8fd7877144af57558fb2ef0368d0087bddeb2e" +checksum = "a82edfc16a6c469f5f44dc7b571814045d60404b55a0ee849f9bcfa2e63dd9b5" dependencies = [ "cfg-if", "once_cell", @@ -30688,9 +26374,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.95" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb6dd4d3ca0ddffd1dd1c9c04f94b868c37ff5fac97c30b97cff2d74fce3a358" +checksum = "9de396da306523044d3302746f1208fa71d7532227f15e347e2d93e4145dd77b" dependencies = [ "bumpalo", "log", @@ -30703,9 +26389,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.45" +version = "0.4.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc7ec4f8827a71586374db3e87abdb5a2bb3a15afed140221307c3ec06b1f63b" +checksum = "c02dbc21516f9f1f04f187958890d7e6026df8d16540b7ad9492bc34a67cea03" dependencies = [ "cfg-if", "js-sys", @@ -30715,9 +26401,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.95" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e79384be7f8f5a9dd5d7167216f022090cf1f9ec128e6e6a482a2cb5c5422c56" +checksum = "585c4c91a46b072c92e908d99cb1dcdf95c5218eeb6f3bf1efa991ee7a68cccf" dependencies = [ "quote 1.0.37", "wasm-bindgen-macro-support", @@ -30725,9 +26411,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.95" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26c6ab57572f7a24a4985830b120de1594465e5d500f24afe89e16b4e833ef68" +checksum = "afc340c74d9005395cf9dd098506f7f44e38f2b4a21c6aaacf9a105ea5e1e836" dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", @@ -30738,9 +26424,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.95" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65fc09f10666a9f147042251e0dda9c18f166ff7de300607007e96bdebc1068d" +checksum = "c62a0a307cb4a311d3a07867860911ca130c3494e8c2719593806c08bc5d0484" [[package]] name = "wasm-bindgen-test" @@ -30860,7 +26546,7 @@ dependencies = [ "sp-runtime 37.0.0", "sp-state-machine 0.41.0", "sp-version 35.0.0", - "sp-wasm-interface 21.0.1", + "sp-wasm-interface 21.0.0", "substrate-runtime-proposal-hash", "thiserror", "wasm-loader", @@ -31197,23 +26883,13 @@ dependencies = [ "wasm-bindgen", ] -[[package]] -name = "web-time" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a6580f308b1fad9207618087a65c04e7a10bc77e02c8e84e9b00dd4b12fa0bb" -dependencies = [ - "js-sys", - "wasm-bindgen", -] - [[package]] name = "webpki" version = "0.22.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed63aea5ce73d0ff405984102c42de94fc55a6b75765d621c65262469b3c9b53" dependencies = [ - "ring 0.17.8", + "ring 0.17.7", "untrusted 0.9.0", ] @@ -31237,19 +26913,19 @@ name = "westend-emulated-chain" version = "0.0.0" dependencies = [ "emulated-integration-tests-common", - "pallet-staking 28.0.0", - "parachains-common 7.0.0", - "polkadot-primitives 7.0.0", + "pallet-staking", + "parachains-common", + "polkadot-primitives", "sc-consensus-grandpa", - "sp-authority-discovery 26.0.0", - "sp-consensus-babe 0.32.0", - "sp-consensus-beefy 13.0.0", + "sp-authority-discovery", + "sp-consensus-babe", + "sp-consensus-beefy", "sp-core 28.0.0", "sp-runtime 31.0.1", - "staging-xcm 7.0.0", + "staging-xcm", "westend-runtime", - "westend-runtime-constants 7.0.0", - "xcm-runtime-apis 0.1.0", + "westend-runtime-constants", + "xcm-runtime-apis", ] [[package]] @@ -31257,77 +26933,77 @@ name = "westend-runtime" version = "7.0.0" dependencies = [ "approx", - "binary-merkle-tree 13.0.0", + "binary-merkle-tree", "bitvec", - "frame-benchmarking 28.0.0", - "frame-election-provider-support 28.0.0", - "frame-executive 28.0.0", - "frame-metadata-hash-extension 0.1.0", + "frame-benchmarking", + "frame-election-provider-support", + "frame-executive", + "frame-metadata-hash-extension", "frame-remote-externalities", - "frame-support 28.0.0", - "frame-system 28.0.0", - "frame-system-benchmarking 28.0.0", - "frame-system-rpc-runtime-api 26.0.0", - "frame-try-runtime 0.34.0", + "frame-support", + "frame-system", + "frame-system-benchmarking", + "frame-system-rpc-runtime-api", + "frame-try-runtime", "hex-literal", "log", - "pallet-asset-rate 7.0.0", - "pallet-authority-discovery 28.0.0", - "pallet-authorship 28.0.0", - "pallet-babe 28.0.0", - "pallet-bags-list 27.0.0", - "pallet-balances 28.0.0", - "pallet-beefy 28.0.0", - "pallet-beefy-mmr 28.0.0", - "pallet-collective 28.0.0", - "pallet-conviction-voting 28.0.0", - "pallet-delegated-staking 1.0.0", - "pallet-democracy 28.0.0", - "pallet-election-provider-multi-phase 27.0.0", - "pallet-election-provider-support-benchmarking 27.0.0", - "pallet-elections-phragmen 29.0.0", - "pallet-fast-unstake 27.0.0", - "pallet-grandpa 28.0.0", - "pallet-identity 29.0.0", - "pallet-indices 28.0.0", - "pallet-membership 28.0.0", - "pallet-message-queue 31.0.0", - "pallet-migrations 1.0.0", - "pallet-mmr 27.0.0", - "pallet-multisig 28.0.0", - "pallet-nomination-pools 25.0.0", - "pallet-nomination-pools-benchmarking 26.0.0", - "pallet-nomination-pools-runtime-api 23.0.0", - "pallet-offences 27.0.0", - "pallet-offences-benchmarking 28.0.0", - "pallet-parameters 0.1.0", - "pallet-preimage 28.0.0", - "pallet-proxy 28.0.0", - "pallet-recovery 28.0.0", - "pallet-referenda 28.0.0", - "pallet-root-testing 4.0.0", - "pallet-scheduler 29.0.0", - "pallet-session 28.0.0", - "pallet-session-benchmarking 28.0.0", - "pallet-society 28.0.0", - "pallet-staking 28.0.0", - "pallet-staking-runtime-api 14.0.0", - "pallet-state-trie-migration 29.0.0", - "pallet-sudo 28.0.0", - "pallet-timestamp 27.0.0", - "pallet-transaction-payment 28.0.0", - "pallet-transaction-payment-rpc-runtime-api 28.0.0", - "pallet-treasury 27.0.0", - "pallet-utility 28.0.0", - "pallet-vesting 28.0.0", - "pallet-whitelist 27.0.0", - "pallet-xcm 7.0.0", - "pallet-xcm-benchmarks 7.0.0", - "parity-scale-codec", - "polkadot-parachain-primitives 6.0.0", - "polkadot-primitives 7.0.0", - "polkadot-runtime-common 7.0.0", - "polkadot-runtime-parachains 7.0.0", + "pallet-asset-rate", + "pallet-authority-discovery", + "pallet-authorship", + "pallet-babe", + "pallet-bags-list", + "pallet-balances", + "pallet-beefy", + "pallet-beefy-mmr", + "pallet-collective", + "pallet-conviction-voting", + "pallet-delegated-staking", + "pallet-democracy", + "pallet-election-provider-multi-phase", + "pallet-election-provider-support-benchmarking", + "pallet-elections-phragmen", + "pallet-fast-unstake", + "pallet-grandpa", + "pallet-identity", + "pallet-indices", + "pallet-membership", + "pallet-message-queue", + "pallet-migrations", + "pallet-mmr", + "pallet-multisig", + "pallet-nomination-pools", + "pallet-nomination-pools-benchmarking", + "pallet-nomination-pools-runtime-api", + "pallet-offences", + "pallet-offences-benchmarking", + "pallet-parameters", + "pallet-preimage", + "pallet-proxy", + "pallet-recovery", + "pallet-referenda", + "pallet-root-testing", + "pallet-scheduler", + "pallet-session", + "pallet-session-benchmarking", + "pallet-society", + "pallet-staking", + "pallet-staking-runtime-api", + "pallet-state-trie-migration", + "pallet-sudo", + "pallet-timestamp", + "pallet-transaction-payment", + "pallet-transaction-payment-rpc-runtime-api", + "pallet-treasury", + "pallet-utility", + "pallet-vesting", + "pallet-whitelist", + "pallet-xcm", + "pallet-xcm-benchmarks", + "parity-scale-codec", + "polkadot-parachain-primitives", + "polkadot-primitives", + "polkadot-runtime-common", + "polkadot-runtime-parachains", "scale-info", "serde", "serde_derive", @@ -31336,66 +27012,49 @@ dependencies = [ "sp-api 26.0.0", "sp-application-crypto 30.0.0", "sp-arithmetic 23.0.0", - "sp-authority-discovery 26.0.0", - "sp-block-builder 26.0.0", - "sp-consensus-babe 0.32.0", - "sp-consensus-beefy 13.0.0", - "sp-consensus-grandpa 13.0.0", - "sp-core 28.0.0", - "sp-genesis-builder 0.8.0", - "sp-inherents 26.0.0", + "sp-authority-discovery", + "sp-block-builder", + "sp-consensus-babe", + "sp-consensus-beefy", + "sp-consensus-grandpa", + "sp-core 28.0.0", + "sp-genesis-builder", + "sp-inherents", "sp-io 30.0.0", - "sp-keyring 31.0.0", - "sp-mmr-primitives 26.0.0", - "sp-npos-elections 26.0.0", - "sp-offchain 26.0.0", + "sp-keyring", + "sp-mmr-primitives", + "sp-npos-elections", + "sp-offchain", "sp-runtime 31.0.1", - "sp-session 27.0.0", - "sp-staking 26.0.0", + "sp-session", + "sp-staking", "sp-storage 19.0.0", "sp-tracing 16.0.0", - "sp-transaction-pool 26.0.0", + "sp-transaction-pool", "sp-version 29.0.0", - "staging-xcm 7.0.0", - "staging-xcm-builder 7.0.0", - "staging-xcm-executor 7.0.0", - "substrate-wasm-builder 17.0.0", + "staging-xcm", + "staging-xcm-builder", + "staging-xcm-executor", + "substrate-wasm-builder", "tiny-keccak", "tokio", - "westend-runtime-constants 7.0.0", - "xcm-runtime-apis 0.1.0", + "westend-runtime-constants", + "xcm-runtime-apis", ] [[package]] name = "westend-runtime-constants" version = "7.0.0" dependencies = [ - "frame-support 28.0.0", - "polkadot-primitives 7.0.0", - "polkadot-runtime-common 7.0.0", + "frame-support", + "polkadot-primitives", + "polkadot-runtime-common", "smallvec", "sp-core 28.0.0", "sp-runtime 31.0.1", "sp-weights 27.0.0", - "staging-xcm 7.0.0", - "staging-xcm-builder 7.0.0", -] - -[[package]] -name = "westend-runtime-constants" -version = "17.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06861bf945aadac59f4be23b44c85573029520ea9bd3d6c9ab21c8b306e81cdc" -dependencies = [ - "frame-support 38.0.0", - "polkadot-primitives 16.0.0", - "polkadot-runtime-common 17.0.0", - "smallvec", - "sp-core 34.0.0", - "sp-runtime 39.0.2", - "sp-weights 31.0.0", - "staging-xcm 14.2.0", - "staging-xcm-builder 17.0.1", + "staging-xcm", + "staging-xcm-builder", ] [[package]] @@ -31506,36 +27165,6 @@ dependencies = [ "windows-targets 0.52.6", ] -[[package]] -name = "windows-registry" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e400001bb720a623c1c69032f8e3e4cf09984deec740f007dd2b03ec864804b0" -dependencies = [ - "windows-result", - "windows-strings", - "windows-targets 0.52.6", -] - -[[package]] -name = "windows-result" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d1043d8214f791817bab27572aaa8af63732e11bf84aa21a45a78d6c317ae0e" -dependencies = [ - "windows-targets 0.52.6", -] - -[[package]] -name = "windows-strings" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4cd9b125c486025df0eabcb585e62173c6c9eddcec5d117d3b6e8c30e2ee4d10" -dependencies = [ - "windows-result", - "windows-targets 0.52.6", -] - [[package]] name = "windows-sys" version = "0.45.0" @@ -31778,6 +27407,16 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "winreg" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a277a57398d4bfa075df44f501a17cfdf8542d224f0d36095a2adc7aee4ef0a5" +dependencies = [ + "cfg-if", + "windows-sys 0.48.0", +] + [[package]] name = "wyz" version = "0.5.1" @@ -31799,18 +27438,35 @@ dependencies = [ "zeroize", ] +[[package]] +name = "x509-parser" +version = "0.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7069fba5b66b9193bd2c5d3d4ff12b839118f6bcbef5328efafafb5395cf63da" +dependencies = [ + "asn1-rs 0.5.2", + "data-encoding", + "der-parser 8.2.0", + "lazy_static", + "nom", + "oid-registry 0.6.1", + "rusticata-macros", + "thiserror", + "time", +] + [[package]] name = "x509-parser" version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcbc162f30700d6f3f82a24bf7cc62ffe7caea42c0b2cba8bf7f3ae50cf51f69" dependencies = [ - "asn1-rs", + "asn1-rs 0.6.1", "data-encoding", - "der-parser", + "der-parser 9.0.0", "lazy_static", "nom", - "oid-registry", + "oid-registry 0.7.0", "rusticata-macros", "thiserror", "time", @@ -31830,24 +27486,24 @@ name = "xcm-docs" version = "0.1.0" dependencies = [ "docify", - "pallet-balances 28.0.0", - "pallet-message-queue 31.0.0", - "pallet-xcm 7.0.0", + "pallet-balances", + "pallet-message-queue", + "pallet-xcm", "parity-scale-codec", - "polkadot-parachain-primitives 6.0.0", - "polkadot-primitives 7.0.0", - "polkadot-runtime-parachains 7.0.0", - "polkadot-sdk-frame 0.1.0", + "polkadot-parachain-primitives", + "polkadot-primitives", + "polkadot-runtime-parachains", + "polkadot-sdk-frame", "scale-info", "simple-mermaid 0.1.0", "sp-io 30.0.0", "sp-runtime 31.0.1", "sp-std 14.0.0", - "staging-xcm 7.0.0", - "staging-xcm-builder 7.0.0", - "staging-xcm-executor 7.0.0", + "staging-xcm", + "staging-xcm-builder", + "staging-xcm-executor", "test-log", - "xcm-simulator 7.0.0", + "xcm-simulator", ] [[package]] @@ -31855,23 +27511,23 @@ name = "xcm-emulator" version = "0.5.0" dependencies = [ "array-bytes", - "cumulus-pallet-parachain-system 0.7.0", - "cumulus-pallet-xcmp-queue 0.7.0", - "cumulus-primitives-core 0.7.0", - "cumulus-primitives-parachain-inherent 0.7.0", - "cumulus-test-relay-sproof-builder 0.7.0", - "frame-support 28.0.0", - "frame-system 28.0.0", + "cumulus-pallet-parachain-system", + "cumulus-pallet-xcmp-queue", + "cumulus-primitives-core", + "cumulus-primitives-parachain-inherent", + "cumulus-test-relay-sproof-builder", + "frame-support", + "frame-system", "impl-trait-for-tuples", "log", - "pallet-balances 28.0.0", - "pallet-message-queue 31.0.0", - "parachains-common 7.0.0", + "pallet-balances", + "pallet-message-queue", + "parachains-common", "parity-scale-codec", "paste", - "polkadot-parachain-primitives 6.0.0", - "polkadot-primitives 7.0.0", - "polkadot-runtime-parachains 7.0.0", + "polkadot-parachain-primitives", + "polkadot-primitives", + "polkadot-runtime-parachains", "sp-arithmetic 23.0.0", "sp-core 28.0.0", "sp-crypto-hashing 0.1.0", @@ -31879,33 +27535,30 @@ dependencies = [ "sp-runtime 31.0.1", "sp-std 14.0.0", "sp-tracing 16.0.0", - "staging-xcm 7.0.0", - "staging-xcm-executor 7.0.0", + "staging-xcm", + "staging-xcm-executor", ] [[package]] name = "xcm-executor-integration-tests" version = "1.0.0" dependencies = [ - "frame-support 28.0.0", - "frame-system 28.0.0", + "frame-support", "futures", - "pallet-sudo 28.0.0", - "pallet-transaction-payment 28.0.0", - "pallet-xcm 7.0.0", + "pallet-transaction-payment", + "pallet-xcm", "parity-scale-codec", - "polkadot-runtime-parachains 7.0.0", "polkadot-test-client", "polkadot-test-runtime", "polkadot-test-service", "sp-consensus", "sp-core 28.0.0", - "sp-keyring 31.0.0", + "sp-keyring", "sp-runtime 31.0.1", "sp-state-machine 0.35.0", "sp-tracing 16.0.0", - "staging-xcm 7.0.0", - "staging-xcm-executor 7.0.0", + "staging-xcm", + "staging-xcm-executor", ] [[package]] @@ -31913,133 +27566,82 @@ name = "xcm-procedural" version = "7.0.0" dependencies = [ "Inflector", - "frame-support 28.0.0", "proc-macro2 1.0.86", "quote 1.0.37", - "staging-xcm 7.0.0", + "staging-xcm", "syn 2.0.87", "trybuild", ] -[[package]] -name = "xcm-procedural" -version = "10.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87fb4f14094d65c500a59bcf540cf42b99ee82c706edd6226a92e769ad60563e" -dependencies = [ - "Inflector", - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.87", -] - [[package]] name = "xcm-runtime-apis" version = "0.1.0" dependencies = [ - "frame-executive 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", + "frame-executive", + "frame-support", + "frame-system", "hex-literal", "log", - "pallet-assets 29.1.0", - "pallet-balances 28.0.0", - "pallet-xcm 7.0.0", + "pallet-assets", + "pallet-balances", + "pallet-xcm", "parity-scale-codec", "scale-info", "sp-api 26.0.0", "sp-io 30.0.0", "sp-tracing 16.0.0", "sp-weights 27.0.0", - "staging-xcm 7.0.0", - "staging-xcm-builder 7.0.0", - "staging-xcm-executor 7.0.0", -] - -[[package]] -name = "xcm-runtime-apis" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69d4473a5d157e4d437d9ebcb1b99f9693a64983877ee57d97005f0167869935" -dependencies = [ - "frame-support 38.0.0", - "parity-scale-codec", - "scale-info", - "sp-api 34.0.0", - "sp-weights 31.0.0", - "staging-xcm 14.2.0", - "staging-xcm-executor 17.0.0", + "staging-xcm", + "staging-xcm-builder", + "staging-xcm-executor", ] [[package]] name = "xcm-simulator" version = "7.0.0" dependencies = [ - "frame-support 28.0.0", - "frame-system 28.0.0", + "frame-support", + "frame-system", "parity-scale-codec", "paste", - "polkadot-core-primitives 7.0.0", - "polkadot-parachain-primitives 6.0.0", - "polkadot-primitives 7.0.0", - "polkadot-runtime-parachains 7.0.0", + "polkadot-core-primitives", + "polkadot-parachain-primitives", + "polkadot-primitives", + "polkadot-runtime-parachains", "scale-info", "sp-io 30.0.0", "sp-runtime 31.0.1", "sp-std 14.0.0", - "staging-xcm 7.0.0", - "staging-xcm-builder 7.0.0", - "staging-xcm-executor 7.0.0", -] - -[[package]] -name = "xcm-simulator" -version = "17.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "058e21bfc3e1180bbd83cad3690d0e63f34f43ab309e338afe988160aa776fcf" -dependencies = [ - "frame-support 38.0.0", - "frame-system 38.0.0", - "parity-scale-codec", - "paste", - "polkadot-core-primitives 15.0.0", - "polkadot-parachain-primitives 14.0.0", - "polkadot-primitives 16.0.0", - "polkadot-runtime-parachains 17.0.1", - "scale-info", - "sp-io 38.0.0", - "sp-runtime 39.0.2", - "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "staging-xcm 14.2.0", - "staging-xcm-builder 17.0.1", - "staging-xcm-executor 17.0.0", + "staging-xcm", + "staging-xcm-builder", + "staging-xcm-executor", ] [[package]] name = "xcm-simulator-example" version = "7.0.0" dependencies = [ - "frame-support 28.0.0", - "frame-system 28.0.0", + "frame-support", + "frame-system", "log", - "pallet-balances 28.0.0", - "pallet-message-queue 31.0.0", - "pallet-uniques 28.0.0", - "pallet-xcm 7.0.0", + "pallet-balances", + "pallet-message-queue", + "pallet-uniques", + "pallet-xcm", "parity-scale-codec", - "polkadot-core-primitives 7.0.0", - "polkadot-parachain-primitives 6.0.0", - "polkadot-runtime-parachains 7.0.0", + "polkadot-core-primitives", + "polkadot-parachain-primitives", + "polkadot-runtime-parachains", "scale-info", "sp-core 28.0.0", "sp-io 30.0.0", "sp-runtime 31.0.1", "sp-std 14.0.0", "sp-tracing 16.0.0", - "staging-xcm 7.0.0", - "staging-xcm-builder 7.0.0", - "staging-xcm-executor 7.0.0", - "xcm-simulator 7.0.0", + "staging-xcm", + "staging-xcm-builder", + "staging-xcm-executor", + "xcm-simulator", ] [[package]] @@ -32047,27 +27649,27 @@ name = "xcm-simulator-fuzzer" version = "1.0.0" dependencies = [ "arbitrary", - "frame-executive 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", - "frame-try-runtime 0.34.0", + "frame-executive", + "frame-support", + "frame-system", + "frame-try-runtime", "honggfuzz", - "pallet-balances 28.0.0", - "pallet-message-queue 31.0.0", - "pallet-xcm 7.0.0", + "pallet-balances", + "pallet-message-queue", + "pallet-xcm", "parity-scale-codec", - "polkadot-core-primitives 7.0.0", - "polkadot-parachain-primitives 6.0.0", - "polkadot-runtime-parachains 7.0.0", + "polkadot-core-primitives", + "polkadot-parachain-primitives", + "polkadot-runtime-parachains", "scale-info", "sp-core 28.0.0", "sp-io 30.0.0", "sp-runtime 31.0.1", "sp-std 14.0.0", - "staging-xcm 7.0.0", - "staging-xcm-builder 7.0.0", - "staging-xcm-executor 7.0.0", - "xcm-simulator 7.0.0", + "staging-xcm", + "staging-xcm-builder", + "staging-xcm-executor", + "xcm-simulator", ] [[package]] @@ -32183,7 +27785,7 @@ version = "1.0.0" dependencies = [ "futures-util", "parity-scale-codec", - "reqwest 0.12.9", + "reqwest 0.11.20", "serde", "serde_json", "thiserror", @@ -32195,15 +27797,15 @@ dependencies = [ [[package]] name = "zombienet-configuration" -version = "0.2.19" +version = "0.2.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d716b3ff8112d98ced15f53b0c72454f8cde533fe2b68bb04379228961efbd80" +checksum = "ebbfc98adb25076777967f7aad078e74029e129b102eb0812c425432f8c2be7b" dependencies = [ "anyhow", "lazy_static", "multiaddr 0.18.1", "regex", - "reqwest 0.11.27", + "reqwest 0.11.20", "serde", "serde_json", "thiserror", @@ -32215,21 +27817,21 @@ dependencies = [ [[package]] name = "zombienet-orchestrator" -version = "0.2.19" +version = "0.2.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4098a7d33b729b59e32c41a87aa4d484bd1b8771a059bbd4edfb4d430b3b2d74" +checksum = "5b17f4d1d05b3aedf02818eb0f4d5a76664da0e07bb2f7e7d02613e0ef0f316a" dependencies = [ "anyhow", "async-trait", "futures", "glob-match", "hex", - "libp2p 0.52.4", + "libp2p", "libsecp256k1", "multiaddr 0.18.1", "rand", "regex", - "reqwest 0.11.27", + "reqwest 0.11.20", "serde", "serde_json", "sha2 0.10.8", @@ -32248,9 +27850,9 @@ dependencies = [ [[package]] name = "zombienet-prom-metrics-parser" -version = "0.2.19" +version = "0.2.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "961e30be45b34f6ebeabf29ee2f47b0cd191ea62e40c064752572207509a6f5c" +checksum = "7203390ab88919240da3a3eb06b625b6e300e94f98e04ba5141e9138dc663b7d" dependencies = [ "pest", "pest_derive", @@ -32259,9 +27861,9 @@ dependencies = [ [[package]] name = "zombienet-provider" -version = "0.2.19" +version = "0.2.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab0f7f01780b7c99a6c40539d195d979f234305f32808d547438b50829d44262" +checksum = "ee02ee957ec39b698798fa6dc2a0d5ba4524198471c37d57755e9685b67fb50c" dependencies = [ "anyhow", "async-trait", @@ -32272,7 +27874,7 @@ dependencies = [ "kube", "nix 0.27.1", "regex", - "reqwest 0.11.27", + "reqwest 0.11.20", "serde", "serde_json", "serde_yaml", @@ -32290,15 +27892,14 @@ dependencies = [ [[package]] name = "zombienet-sdk" -version = "0.2.19" +version = "0.2.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99a3c5f2d657235b3ab7dc384677e63cde21983029e99106766ecd49e9f8d7f3" +checksum = "f594e67922182277a3da0926f21b693eb5a0c38b32ca7fd6ef16167809fe5064" dependencies = [ "async-trait", "futures", "lazy_static", "subxt", - "subxt-signer", "tokio", "zombienet-configuration", "zombienet-orchestrator", @@ -32308,9 +27909,9 @@ dependencies = [ [[package]] name = "zombienet-support" -version = "0.2.19" +version = "0.2.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "296f887ea88e07edd771f8e1d0dec5297a58b422f4b884a6292a21ebe03277cb" +checksum = "93d3144537df7c8939bbb355cc5245a6dc0078446a6cdaf9272268bd1043c788" dependencies = [ "anyhow", "async-trait", @@ -32318,7 +27919,7 @@ dependencies = [ "nix 0.27.1", "rand", "regex", - "reqwest 0.11.27", + "reqwest 0.11.20", "thiserror", "tokio", "tracing", diff --git a/Cargo.toml b/Cargo.toml index 0f7c432e8135..56b8ebd2f9ec 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -295,7 +295,6 @@ members = [ "substrate/client/rpc-api", "substrate/client/rpc-servers", "substrate/client/rpc-spec-v2", - "substrate/client/runtime-utilities", "substrate/client/service", "substrate/client/service/test", "substrate/client/state-db", @@ -600,7 +599,8 @@ zero-prefixed-literal = { level = "allow", priority = 2 } # 00_1000_0 Inflector = { version = "0.11.4" } aes-gcm = { version = "0.10" } ahash = { version = "0.8.2" } -alloy-core = { version = "0.8.15", default-features = false } +alloy-primitives = { version = "0.4.2", default-features = false } +alloy-sol-types = { version = "0.4.2", default-features = false } always-assert = { version = "0.1" } anyhow = { version = "1.0.81", default-features = false } approx = { version = "0.5.1" } @@ -642,7 +642,7 @@ bitvec = { version = "1.0.1", default-features = false } blake2 = { version = "0.10.4", default-features = false } blake2b_simd = { version = "1.0.2", default-features = false } blake3 = { version = "1.5" } -bounded-collections = { version = "0.2.2", default-features = false } +bounded-collections = { version = "0.2.0", default-features = false } bounded-vec = { version = "0.7" } bp-asset-hub-rococo = { path = "bridges/chains/chain-asset-hub-rococo", default-features = false } bp-asset-hub-westend = { path = "bridges/chains/chain-asset-hub-westend", default-features = false } @@ -761,7 +761,7 @@ enumn = { version = "0.1.13" } env_logger = { version = "0.11.2" } environmental = { version = "1.1.4", default-features = false } equivocation-detector = { path = "bridges/relays/equivocation" } -ethabi = { version = "2.0.0", default-features = false, package = "ethabi-decode" } +ethabi = { version = "1.0.0", default-features = false, package = "ethabi-decode" } ethbloom = { version = "0.14.1", default-features = false } ethereum-types = { version = "0.15.1", default-features = false } exit-future = { version = "0.2.0" } @@ -786,7 +786,7 @@ frame-benchmarking-pallet-pov = { default-features = false, path = "substrate/fr frame-election-provider-solution-type = { path = "substrate/frame/election-provider-support/solution-type", default-features = false } frame-election-provider-support = { path = "substrate/frame/election-provider-support", default-features = false } frame-executive = { path = "substrate/frame/executive", default-features = false } -frame-metadata = { version = "18.0.0", default-features = false } +frame-metadata = { version = "16.0.0", default-features = false } frame-metadata-hash-extension = { path = "substrate/frame/metadata-hash-extension", default-features = false } frame-support = { path = "substrate/frame/support", default-features = false } frame-support-procedural = { path = "substrate/frame/support/procedural", default-features = false } @@ -848,20 +848,20 @@ kvdb-shared-tests = { version = "0.11.0" } landlock = { version = "0.3.0" } libc = { version = "0.2.155" } libfuzzer-sys = { version = "0.4" } -libp2p = { version = "0.54.1" } +libp2p = { version = "0.52.4" } libp2p-identity = { version = "0.2.9" } libsecp256k1 = { version = "0.7.0", default-features = false } linked-hash-map = { version = "0.5.4" } linked_hash_set = { version = "0.1.4" } linregress = { version = "0.5.1" } lite-json = { version = "0.2.0", default-features = false } -litep2p = { version = "0.8.4", features = ["websocket"] } +litep2p = { version = "0.8.0", features = ["websocket"] } log = { version = "0.4.22", default-features = false } macro_magic = { version = "0.5.1" } maplit = { version = "1.0.2" } memmap2 = { version = "0.9.3" } memory-db = { version = "0.32.0", default-features = false } -merkleized-metadata = { version = "0.2.0" } +merkleized-metadata = { version = "0.1.0" } merlin = { version = "3.0", default-features = false } messages-relay = { path = "bridges/relays/messages" } metered = { version = "0.6.1", default-features = false, package = "prioritized-metered-channel" } @@ -878,7 +878,7 @@ multihash = { version = "0.19.1", default-features = false } multihash-codetable = { version = "0.1.1" } multistream-select = { version = "0.13.0" } names = { version = "0.14.0", default-features = false } -nix = { version = "0.29.0" } +nix = { version = "0.28.0" } node-cli = { path = "substrate/bin/node/cli", package = "staging-node-cli" } node-inspect = { path = "substrate/bin/node/inspect", default-features = false, package = "staging-node-inspect" } node-primitives = { path = "substrate/bin/node/primitives", default-features = false } @@ -1098,9 +1098,9 @@ polkadot-subsystem-bench = { path = "polkadot/node/subsystem-bench" } polkadot-test-client = { path = "polkadot/node/test/client" } polkadot-test-runtime = { path = "polkadot/runtime/test-runtime" } polkadot-test-service = { path = "polkadot/node/test/service" } -polkavm = { version = "0.18.0", default-features = false } -polkavm-derive = "0.18.0" -polkavm-linker = "0.18.0" +polkavm = { version = "0.9.3", default-features = false } +polkavm-derive = "0.9.1" +polkavm-linker = "0.9.2" portpicker = { version = "0.1.1" } pretty_assertions = { version = "1.3.0" } primitive-types = { version = "0.13.1", default-features = false, features = [ @@ -1132,7 +1132,7 @@ regex = { version = "1.10.2" } relay-substrate-client = { path = "bridges/relays/client-substrate" } relay-utils = { path = "bridges/relays/utils" } remote-externalities = { path = "substrate/utils/frame/remote-externalities", default-features = false, package = "frame-remote-externalities" } -reqwest = { version = "0.12.9", default-features = false } +reqwest = { version = "0.11", default-features = false } rlp = { version = "0.6.1", default-features = false } rococo-emulated-chain = { path = "cumulus/parachains/integration-tests/emulated/chains/relays/rococo" } rococo-parachain-runtime = { path = "cumulus/parachains/runtimes/testing/rococo-parachain" } @@ -1145,7 +1145,7 @@ rstest = { version = "0.18.2" } rustc-hash = { version = "1.1.0" } rustc-hex = { version = "2.1.0", default-features = false } rustix = { version = "0.36.7", default-features = false } -rustls = { version = "0.23.18", default-features = false, features = ["logging", "ring", "std", "tls12"] } +rustls = { version = "0.23.14", default-features = false, features = ["logging", "ring", "std", "tls12"] } rustversion = { version = "1.0.17" } rusty-fork = { version = "0.3.0", default-features = false } safe-mix = { version = "1.0", default-features = false } @@ -1193,7 +1193,6 @@ sc-rpc-api = { path = "substrate/client/rpc-api", default-features = false } sc-rpc-server = { path = "substrate/client/rpc-servers", default-features = false } sc-rpc-spec-v2 = { path = "substrate/client/rpc-spec-v2", default-features = false } sc-runtime-test = { path = "substrate/client/executor/runtime-test" } -sc-runtime-utilities = { path = "substrate/client/runtime-utilities", default-features = true } sc-service = { path = "substrate/client/service", default-features = false } sc-service-test = { path = "substrate/client/service/test" } sc-state-db = { path = "substrate/client/state-db", default-features = false } @@ -1207,7 +1206,7 @@ sc-tracing-proc-macro = { path = "substrate/client/tracing/proc-macro", default- sc-transaction-pool = { path = "substrate/client/transaction-pool", default-features = false } sc-transaction-pool-api = { path = "substrate/client/transaction-pool/api", default-features = false } sc-utils = { path = "substrate/client/utils", default-features = false } -scale-info = { version = "2.11.6", default-features = false } +scale-info = { version = "2.11.1", default-features = false } schemars = { version = "0.8.13", default-features = false } schnellru = { version = "0.2.3" } schnorrkel = { version = "0.11.4", default-features = false } @@ -1326,9 +1325,8 @@ substrate-test-runtime-client = { path = "substrate/test-utils/runtime/client" } substrate-test-runtime-transaction-pool = { path = "substrate/test-utils/runtime/transaction-pool" } substrate-test-utils = { path = "substrate/test-utils" } substrate-wasm-builder = { path = "substrate/utils/wasm-builder", default-features = false } -subxt = { version = "0.38", default-features = false } -subxt-metadata = { version = "0.38.0", default-features = false } -subxt-signer = { version = "0.38" } +subxt = { version = "0.37", default-features = false } +subxt-signer = { version = "0.37" } syn = { version = "2.0.87" } sysinfo = { version = "0.30" } tar = { version = "0.4" } @@ -1398,7 +1396,7 @@ xcm-procedural = { path = "polkadot/xcm/procedural", default-features = false } xcm-runtime-apis = { path = "polkadot/xcm/xcm-runtime-apis", default-features = false } xcm-simulator = { path = "polkadot/xcm/xcm-simulator", default-features = false } zeroize = { version = "1.7.0", default-features = false } -zombienet-sdk = { version = "0.2.19" } +zombienet-sdk = { version = "0.2.13" } zstd = { version = "0.12.4", default-features = false } [profile.release] diff --git a/README.md b/README.md index 24352cc28a1a..6c0dfbb2e7e4 100644 --- a/README.md +++ b/README.md @@ -40,9 +40,9 @@ curl --proto '=https' --tlsv1.2 -sSf https://raw.githubusercontent.com/paritytec ![Current Stable Release](https://raw.githubusercontent.com/paritytech/release-registry/main/badges/polkadot-sdk-latest.svg)  ![Next Stable Release](https://raw.githubusercontent.com/paritytech/release-registry/main/badges/polkadot-sdk-next.svg) -The Polkadot SDK is released every three months as a `stableYYMM` release. They are supported for +The Polkadot SDK is released every three months as a `stableYYMMDD` release. They are supported for one year with patches. See the next upcoming versions in the [Release -Registry](https://github.com/paritytech/release-registry/) and more docs in [RELEASE.md](./docs/RELEASE.md). +Registry](https://github.com/paritytech/release-registry/). You can use [`psvm`](https://github.com/paritytech/psvm) to update all dependencies to a specific version without needing to manually select the correct version for each crate. diff --git a/bridges/bin/runtime-common/Cargo.toml b/bridges/bin/runtime-common/Cargo.toml index 49cd086fd3eb..37b56140c289 100644 --- a/bridges/bin/runtime-common/Cargo.toml +++ b/bridges/bin/runtime-common/Cargo.toml @@ -99,7 +99,6 @@ runtime-benchmarks = [ "pallet-utility/runtime-benchmarks", "sp-runtime/runtime-benchmarks", "sp-trie", - "xcm/runtime-benchmarks", ] integrity-test = ["static_assertions"] test-helpers = ["bp-runtime/test-helpers", "sp-trie"] diff --git a/bridges/bin/runtime-common/src/extensions.rs b/bridges/bin/runtime-common/src/extensions.rs index 44e6b40b7e0c..19d1554c668b 100644 --- a/bridges/bin/runtime-common/src/extensions.rs +++ b/bridges/bin/runtime-common/src/extensions.rs @@ -299,7 +299,6 @@ macro_rules! generate_bridge_reject_obsolete_headers_and_messages { _len: usize, _self_implicit: Self::Implicit, _inherited_implication: &impl codec::Encode, - _source: sp_runtime::transaction_validity::TransactionSource, ) -> Result< ( sp_runtime::transaction_validity::ValidTransaction, @@ -391,9 +390,7 @@ mod tests { parameter_types, AsSystemOriginSigner, AsTransactionAuthorizedOrigin, ConstU64, DispatchTransaction, Header as _, TransactionExtension, }, - transaction_validity::{ - InvalidTransaction, TransactionSource::External, TransactionValidity, ValidTransaction, - }, + transaction_validity::{InvalidTransaction, TransactionValidity, ValidTransaction}, DispatchError, }; @@ -613,9 +610,7 @@ mod tests { 42u64.into(), &MockCall { data: 1 }, &(), - 0, - External, - 0, + 0 ), InvalidTransaction::Custom(1) ); @@ -624,8 +619,7 @@ mod tests { 42u64.into(), &MockCall { data: 1 }, &(), - 0, - 0, + 0 ), InvalidTransaction::Custom(1) ); @@ -635,9 +629,7 @@ mod tests { 42u64.into(), &MockCall { data: 2 }, &(), - 0, - External, - 0, + 0 ), InvalidTransaction::Custom(2) ); @@ -646,22 +638,21 @@ mod tests { 42u64.into(), &MockCall { data: 2 }, &(), - 0, - 0, + 0 ), InvalidTransaction::Custom(2) ); assert_eq!( BridgeRejectObsoleteHeadersAndMessages - .validate_only(42u64.into(), &MockCall { data: 3 }, &(), 0, External, 0) + .validate_only(42u64.into(), &MockCall { data: 3 }, &(), 0) .unwrap() .0, ValidTransaction { priority: 3, ..Default::default() }, ); assert_eq!( BridgeRejectObsoleteHeadersAndMessages - .validate_and_prepare(42u64.into(), &MockCall { data: 3 }, &(), 0, 0) + .validate_and_prepare(42u64.into(), &MockCall { data: 3 }, &(), 0) .unwrap() .0 .unwrap(), diff --git a/bridges/bin/runtime-common/src/integrity.rs b/bridges/bin/runtime-common/src/integrity.rs index 535f1a26e5e8..2ff6c4c9165a 100644 --- a/bridges/bin/runtime-common/src/integrity.rs +++ b/bridges/bin/runtime-common/src/integrity.rs @@ -89,11 +89,13 @@ macro_rules! assert_bridge_messages_pallet_types( /// Macro that combines four other macro calls - `assert_chain_types`, `assert_bridge_types`, /// and `assert_bridge_messages_pallet_types`. It may be used -/// at the chain that is implementing standard messages bridge with messages pallets deployed. +/// at the chain that is implementing complete standard messages bridge (i.e. with bridge GRANDPA +/// and messages pallets deployed). #[macro_export] macro_rules! assert_complete_bridge_types( ( runtime: $r:path, + with_bridged_chain_grandpa_instance: $gi:path, with_bridged_chain_messages_instance: $mi:path, this_chain: $this:path, bridged_chain: $bridged:path, @@ -184,55 +186,34 @@ where ); } -/// Parameters for asserting bridge GRANDPA pallet names. +/// Parameters for asserting bridge pallet names. #[derive(Debug)] -struct AssertBridgeGrandpaPalletNames<'a> { +pub struct AssertBridgePalletNames<'a> { /// Name of the GRANDPA pallet, deployed at this chain and used to bridge with the bridged /// chain. pub with_bridged_chain_grandpa_pallet_name: &'a str, + /// Name of the messages pallet, deployed at this chain and used to bridge with the bridged + /// chain. + pub with_bridged_chain_messages_pallet_name: &'a str, } /// Tests that bridge pallet names used in `construct_runtime!()` macro call are matching constants /// from chain primitives crates. -fn assert_bridge_grandpa_pallet_names(params: AssertBridgeGrandpaPalletNames) +fn assert_bridge_pallet_names(params: AssertBridgePalletNames) where - R: pallet_bridge_grandpa::Config, + R: pallet_bridge_grandpa::Config + pallet_bridge_messages::Config, GI: 'static, + MI: 'static, { // check that the bridge GRANDPA pallet has required name assert_eq!( - pallet_bridge_grandpa::PalletOwner::::storage_value_final_key().to_vec(), - bp_runtime::storage_value_key( - params.with_bridged_chain_grandpa_pallet_name, - "PalletOwner", - ) - .0, - ); - assert_eq!( - pallet_bridge_grandpa::PalletOperatingMode::::storage_value_final_key().to_vec(), + pallet_bridge_grandpa::PalletOwner::::storage_value_final_key().to_vec(), bp_runtime::storage_value_key( params.with_bridged_chain_grandpa_pallet_name, - "PalletOperatingMode", - ) - .0, + "PalletOwner", + ).0, ); -} -/// Parameters for asserting bridge messages pallet names. -#[derive(Debug)] -struct AssertBridgeMessagesPalletNames<'a> { - /// Name of the messages pallet, deployed at this chain and used to bridge with the bridged - /// chain. - pub with_bridged_chain_messages_pallet_name: &'a str, -} - -/// Tests that bridge pallet names used in `construct_runtime!()` macro call are matching constants -/// from chain primitives crates. -fn assert_bridge_messages_pallet_names(params: AssertBridgeMessagesPalletNames) -where - R: pallet_bridge_messages::Config, - MI: 'static, -{ // check that the bridge messages pallet has required name assert_eq!( pallet_bridge_messages::PalletOwner::::storage_value_final_key().to_vec(), @@ -242,14 +223,6 @@ where ) .0, ); - assert_eq!( - pallet_bridge_messages::PalletOperatingMode::::storage_value_final_key().to_vec(), - bp_runtime::storage_value_key( - params.with_bridged_chain_messages_pallet_name, - "PalletOperatingMode", - ) - .0, - ); } /// Parameters for asserting complete standard messages bridge. @@ -273,11 +246,9 @@ pub fn assert_complete_with_relay_chain_bridge_constants( assert_chain_constants::(params.this_chain_constants); assert_bridge_grandpa_pallet_constants::(); assert_bridge_messages_pallet_constants::(); - assert_bridge_grandpa_pallet_names::(AssertBridgeGrandpaPalletNames { + assert_bridge_pallet_names::(AssertBridgePalletNames { with_bridged_chain_grandpa_pallet_name: >::BridgedChain::WITH_CHAIN_GRANDPA_PALLET_NAME, - }); - assert_bridge_messages_pallet_names::(AssertBridgeMessagesPalletNames { with_bridged_chain_messages_pallet_name: >::BridgedChain::WITH_CHAIN_MESSAGES_PALLET_NAME, }); @@ -285,43 +256,21 @@ pub fn assert_complete_with_relay_chain_bridge_constants( /// All bridge-related constants tests for the complete standard parachain messages bridge /// (i.e. with bridge GRANDPA, parachains and messages pallets deployed). -pub fn assert_complete_with_parachain_bridge_constants( +pub fn assert_complete_with_parachain_bridge_constants( params: AssertCompleteBridgeConstants, ) where R: frame_system::Config - + pallet_bridge_parachains::Config + + pallet_bridge_grandpa::Config + pallet_bridge_messages::Config, - >::BridgedRelayChain: ChainWithGrandpa, - PI: 'static, - MI: 'static, -{ - assert_chain_constants::(params.this_chain_constants); - assert_bridge_grandpa_pallet_constants::(); - assert_bridge_messages_pallet_constants::(); - assert_bridge_grandpa_pallet_names::( - AssertBridgeGrandpaPalletNames { - with_bridged_chain_grandpa_pallet_name: - <>::BridgedRelayChain>::WITH_CHAIN_GRANDPA_PALLET_NAME, - }, - ); - assert_bridge_messages_pallet_names::(AssertBridgeMessagesPalletNames { - with_bridged_chain_messages_pallet_name: - >::BridgedChain::WITH_CHAIN_MESSAGES_PALLET_NAME, - }); -} - -/// All bridge-related constants tests for the standalone messages bridge deployment (only with -/// messages pallets deployed). -pub fn assert_standalone_messages_bridge_constants(params: AssertCompleteBridgeConstants) -where - R: frame_system::Config + pallet_bridge_messages::Config, + GI: 'static, MI: 'static, + RelayChain: ChainWithGrandpa, { assert_chain_constants::(params.this_chain_constants); + assert_bridge_grandpa_pallet_constants::(); assert_bridge_messages_pallet_constants::(); - assert_bridge_messages_pallet_names::(AssertBridgeMessagesPalletNames { + assert_bridge_pallet_names::(AssertBridgePalletNames { + with_bridged_chain_grandpa_pallet_name: RelayChain::WITH_CHAIN_GRANDPA_PALLET_NAME, with_bridged_chain_messages_pallet_name: >::BridgedChain::WITH_CHAIN_MESSAGES_PALLET_NAME, }); diff --git a/bridges/bin/runtime-common/src/mock.rs b/bridges/bin/runtime-common/src/mock.rs index 88037d9deff5..6cf04b452da7 100644 --- a/bridges/bin/runtime-common/src/mock.rs +++ b/bridges/bin/runtime-common/src/mock.rs @@ -196,7 +196,6 @@ impl pallet_bridge_messages::Config for TestRuntime { type DeliveryConfirmationPayments = pallet_bridge_relayers::DeliveryConfirmationPaymentsAdapter< TestRuntime, (), - (), ConstU64<100_000>, >; type OnMessagesDelivered = (); diff --git a/bridges/chains/chain-asset-hub-rococo/Cargo.toml b/bridges/chains/chain-asset-hub-rococo/Cargo.toml index 4eb93ab52bc9..363a869048aa 100644 --- a/bridges/chains/chain-asset-hub-rococo/Cargo.toml +++ b/bridges/chains/chain-asset-hub-rococo/Cargo.toml @@ -19,14 +19,10 @@ scale-info = { features = ["derive"], workspace = true } # Substrate Dependencies frame-support = { workspace = true } -sp-core = { workspace = true } # Bridge Dependencies bp-xcm-bridge-hub-router = { workspace = true } -# Polkadot dependencies -xcm = { workspace = true } - [features] default = ["std"] std = [ @@ -34,6 +30,4 @@ std = [ "codec/std", "frame-support/std", "scale-info/std", - "sp-core/std", - "xcm/std", ] diff --git a/bridges/chains/chain-asset-hub-rococo/src/lib.rs b/bridges/chains/chain-asset-hub-rococo/src/lib.rs index 4ff7b391acd0..de2e9ae856d1 100644 --- a/bridges/chains/chain-asset-hub-rococo/src/lib.rs +++ b/bridges/chains/chain-asset-hub-rococo/src/lib.rs @@ -18,13 +18,10 @@ #![cfg_attr(not(feature = "std"), no_std)] -extern crate alloc; - use codec::{Decode, Encode}; use scale_info::TypeInfo; pub use bp_xcm_bridge_hub_router::XcmBridgeHubRouterCall; -use xcm::latest::prelude::*; /// `AssetHubRococo` Runtime `Call` enum. /// @@ -47,27 +44,5 @@ frame_support::parameter_types! { pub const XcmBridgeHubRouterTransactCallMaxWeight: frame_support::weights::Weight = frame_support::weights::Weight::from_parts(200_000_000, 6144); } -/// Builds an (un)congestion XCM program with the `report_bridge_status` call for -/// `ToWestendXcmRouter`. -pub fn build_congestion_message( - bridge_id: sp_core::H256, - is_congested: bool, -) -> alloc::vec::Vec> { - alloc::vec![ - UnpaidExecution { weight_limit: Unlimited, check_origin: None }, - Transact { - origin_kind: OriginKind::Xcm, - fallback_max_weight: Some(XcmBridgeHubRouterTransactCallMaxWeight::get()), - call: Call::ToWestendXcmRouter(XcmBridgeHubRouterCall::report_bridge_status { - bridge_id, - is_congested, - }) - .encode() - .into(), - }, - ExpectTransactStatus(MaybeErrorCode::Success), - ] -} - /// Identifier of AssetHubRococo in the Rococo relay chain. pub const ASSET_HUB_ROCOCO_PARACHAIN_ID: u32 = 1000; diff --git a/bridges/chains/chain-asset-hub-westend/Cargo.toml b/bridges/chains/chain-asset-hub-westend/Cargo.toml index 22071399f4d1..430d9b6116cf 100644 --- a/bridges/chains/chain-asset-hub-westend/Cargo.toml +++ b/bridges/chains/chain-asset-hub-westend/Cargo.toml @@ -19,14 +19,10 @@ scale-info = { features = ["derive"], workspace = true } # Substrate Dependencies frame-support = { workspace = true } -sp-core = { workspace = true } # Bridge Dependencies bp-xcm-bridge-hub-router = { workspace = true } -# Polkadot dependencies -xcm = { workspace = true } - [features] default = ["std"] std = [ @@ -34,6 +30,4 @@ std = [ "codec/std", "frame-support/std", "scale-info/std", - "sp-core/std", - "xcm/std", ] diff --git a/bridges/chains/chain-asset-hub-westend/src/lib.rs b/bridges/chains/chain-asset-hub-westend/src/lib.rs index 9d245e08f7cc..9de1c8809894 100644 --- a/bridges/chains/chain-asset-hub-westend/src/lib.rs +++ b/bridges/chains/chain-asset-hub-westend/src/lib.rs @@ -18,13 +18,10 @@ #![cfg_attr(not(feature = "std"), no_std)] -extern crate alloc; - use codec::{Decode, Encode}; use scale_info::TypeInfo; pub use bp_xcm_bridge_hub_router::XcmBridgeHubRouterCall; -use xcm::latest::prelude::*; /// `AssetHubWestend` Runtime `Call` enum. /// @@ -47,27 +44,5 @@ frame_support::parameter_types! { pub const XcmBridgeHubRouterTransactCallMaxWeight: frame_support::weights::Weight = frame_support::weights::Weight::from_parts(200_000_000, 6144); } -/// Builds an (un)congestion XCM program with the `report_bridge_status` call for -/// `ToRococoXcmRouter`. -pub fn build_congestion_message( - bridge_id: sp_core::H256, - is_congested: bool, -) -> alloc::vec::Vec> { - alloc::vec![ - UnpaidExecution { weight_limit: Unlimited, check_origin: None }, - Transact { - origin_kind: OriginKind::Xcm, - fallback_max_weight: Some(XcmBridgeHubRouterTransactCallMaxWeight::get()), - call: Call::ToRococoXcmRouter(XcmBridgeHubRouterCall::report_bridge_status { - bridge_id, - is_congested, - }) - .encode() - .into(), - }, - ExpectTransactStatus(MaybeErrorCode::Success), - ] -} - /// Identifier of AssetHubWestend in the Westend relay chain. pub const ASSET_HUB_WESTEND_PARACHAIN_ID: u32 = 1000; diff --git a/bridges/chains/chain-bridge-hub-cumulus/Cargo.toml b/bridges/chains/chain-bridge-hub-cumulus/Cargo.toml index b9eb1d2d69c1..99ba721991ee 100644 --- a/bridges/chains/chain-bridge-hub-cumulus/Cargo.toml +++ b/bridges/chains/chain-bridge-hub-cumulus/Cargo.toml @@ -16,14 +16,14 @@ workspace = true [dependencies] # Bridge Dependencies -bp-messages = { workspace = true } bp-polkadot-core = { workspace = true } +bp-messages = { workspace = true } bp-runtime = { workspace = true } # Substrate Based Dependencies -frame-support = { workspace = true } frame-system = { workspace = true } +frame-support = { workspace = true } sp-api = { workspace = true } sp-std = { workspace = true } diff --git a/bridges/chains/chain-bridge-hub-kusama/Cargo.toml b/bridges/chains/chain-bridge-hub-kusama/Cargo.toml index 136832d0199d..39f7b44daa55 100644 --- a/bridges/chains/chain-bridge-hub-kusama/Cargo.toml +++ b/bridges/chains/chain-bridge-hub-kusama/Cargo.toml @@ -17,8 +17,8 @@ workspace = true # Bridge Dependencies bp-bridge-hub-cumulus = { workspace = true } -bp-messages = { workspace = true } bp-runtime = { workspace = true } +bp-messages = { workspace = true } # Substrate Based Dependencies diff --git a/bridges/chains/chain-bridge-hub-polkadot/Cargo.toml b/bridges/chains/chain-bridge-hub-polkadot/Cargo.toml index 04ce144b7906..3b0ac96e7cd3 100644 --- a/bridges/chains/chain-bridge-hub-polkadot/Cargo.toml +++ b/bridges/chains/chain-bridge-hub-polkadot/Cargo.toml @@ -18,8 +18,8 @@ workspace = true # Bridge Dependencies bp-bridge-hub-cumulus = { workspace = true } -bp-messages = { workspace = true } bp-runtime = { workspace = true } +bp-messages = { workspace = true } # Substrate Based Dependencies diff --git a/bridges/chains/chain-bridge-hub-rococo/Cargo.toml b/bridges/chains/chain-bridge-hub-rococo/Cargo.toml index 08a704add2b7..23fbd9a2742f 100644 --- a/bridges/chains/chain-bridge-hub-rococo/Cargo.toml +++ b/bridges/chains/chain-bridge-hub-rococo/Cargo.toml @@ -18,8 +18,8 @@ codec = { features = ["derive"], workspace = true } # Bridge Dependencies bp-bridge-hub-cumulus = { workspace = true } -bp-messages = { workspace = true } bp-runtime = { workspace = true } +bp-messages = { workspace = true } bp-xcm-bridge-hub = { workspace = true } # Substrate Based Dependencies diff --git a/bridges/chains/chain-bridge-hub-westend/Cargo.toml b/bridges/chains/chain-bridge-hub-westend/Cargo.toml index 35932371d0a9..61357e6aa6c8 100644 --- a/bridges/chains/chain-bridge-hub-westend/Cargo.toml +++ b/bridges/chains/chain-bridge-hub-westend/Cargo.toml @@ -18,8 +18,8 @@ codec = { features = ["derive"], workspace = true } # Bridge Dependencies bp-bridge-hub-cumulus = { workspace = true } -bp-messages = { workspace = true } bp-runtime = { workspace = true } +bp-messages = { workspace = true } bp-xcm-bridge-hub = { workspace = true } # Substrate Based Dependencies diff --git a/bridges/chains/chain-polkadot-bulletin/src/lib.rs b/bridges/chains/chain-polkadot-bulletin/src/lib.rs index 070bc7b0ba3d..c5c18beb2cad 100644 --- a/bridges/chains/chain-polkadot-bulletin/src/lib.rs +++ b/bridges/chains/chain-polkadot-bulletin/src/lib.rs @@ -225,4 +225,4 @@ impl ChainWithMessages for PolkadotBulletin { } decl_bridge_finality_runtime_apis!(polkadot_bulletin, grandpa); -decl_bridge_messages_runtime_apis!(polkadot_bulletin, bp_messages::LegacyLaneId); +decl_bridge_messages_runtime_apis!(polkadot_bulletin, bp_messages::HashedLaneId); diff --git a/bridges/modules/beefy/Cargo.toml b/bridges/modules/beefy/Cargo.toml index adbf79e28b5a..cffc62d29082 100644 --- a/bridges/modules/beefy/Cargo.toml +++ b/bridges/modules/beefy/Cargo.toml @@ -31,13 +31,13 @@ sp-runtime = { workspace = true } sp-std = { workspace = true } [dev-dependencies] -bp-test-utils = { workspace = true, default-features = true } +sp-consensus-beefy = { workspace = true, default-features = true } mmr-lib = { workspace = true } pallet-beefy-mmr = { workspace = true, default-features = true } pallet-mmr = { workspace = true, default-features = true } rand = { workspace = true, default-features = true } -sp-consensus-beefy = { workspace = true, default-features = true } sp-io = { workspace = true, default-features = true } +bp-test-utils = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/bridges/modules/grandpa/Cargo.toml b/bridges/modules/grandpa/Cargo.toml index fdca48ac6f07..6d1419ae5b03 100644 --- a/bridges/modules/grandpa/Cargo.toml +++ b/bridges/modules/grandpa/Cargo.toml @@ -19,8 +19,8 @@ scale-info = { features = ["derive"], workspace = true } # Bridge Dependencies -bp-header-chain = { workspace = true } bp-runtime = { workspace = true } +bp-header-chain = { workspace = true } # Substrate Dependencies diff --git a/bridges/modules/messages/Cargo.toml b/bridges/modules/messages/Cargo.toml index 6248c9e65e16..9df318587e38 100644 --- a/bridges/modules/messages/Cargo.toml +++ b/bridges/modules/messages/Cargo.toml @@ -33,8 +33,8 @@ bp-runtime = { features = ["test-helpers"], workspace = true } bp-test-utils = { workspace = true } pallet-balances = { workspace = true } pallet-bridge-grandpa = { workspace = true } -sp-core = { workspace = true } sp-io = { workspace = true } +sp-core = { workspace = true } [features] default = ["std"] diff --git a/bridges/modules/relayers/Cargo.toml b/bridges/modules/relayers/Cargo.toml index 97ed61a9004e..04e7b52ed86c 100644 --- a/bridges/modules/relayers/Cargo.toml +++ b/bridges/modules/relayers/Cargo.toml @@ -34,15 +34,15 @@ sp-runtime = { workspace = true } sp-std = { workspace = true } [dev-dependencies] +bp-runtime = { workspace = true } +pallet-balances = { workspace = true, default-features = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } bp-parachains = { workspace = true } bp-polkadot-core = { workspace = true } -bp-runtime = { workspace = true } bp-test-utils = { workspace = true } -pallet-balances = { workspace = true, default-features = true } pallet-utility = { workspace = true } sp-core = { workspace = true } -sp-io = { workspace = true } -sp-runtime = { workspace = true } [features] default = ["std"] diff --git a/bridges/modules/relayers/src/extension/mod.rs b/bridges/modules/relayers/src/extension/mod.rs index d562ed9bcd0e..710533c223a0 100644 --- a/bridges/modules/relayers/src/extension/mod.rs +++ b/bridges/modules/relayers/src/extension/mod.rs @@ -33,7 +33,6 @@ use bp_runtime::{Chain, RangeInclusiveExt, StaticStrProvider}; use codec::{Decode, Encode}; use frame_support::{ dispatch::{DispatchInfo, PostDispatchInfo}, - pallet_prelude::TransactionSource, weights::Weight, CloneNoBound, DefaultNoBound, EqNoBound, PartialEqNoBound, RuntimeDebugNoBound, }; @@ -129,7 +128,7 @@ pub struct BridgeRelayersTransactionExtension( impl BridgeRelayersTransactionExtension where Self: 'static + Send + Sync, - R: RelayersConfig + R: RelayersConfig + BridgeMessagesConfig + TransactionPaymentConfig, C: ExtensionConfig, @@ -250,7 +249,7 @@ where // let's also replace the weight of slashing relayer with the weight of rewarding relayer if call_info.is_receive_messages_proof_call() { post_info_weight = post_info_weight.saturating_sub( - >::WeightInfo::extra_weight_of_successful_receive_messages_proof_call(), + ::WeightInfo::extra_weight_of_successful_receive_messages_proof_call(), ); } @@ -278,7 +277,7 @@ impl TransactionExtension for BridgeRelayersTransactionExtension where Self: 'static + Send + Sync, - R: RelayersConfig + R: RelayersConfig + BridgeMessagesConfig + TransactionPaymentConfig, C: ExtensionConfig, @@ -305,7 +304,6 @@ where _len: usize, _self_implicit: Self::Implicit, _inherited_implication: &impl Encode, - _source: TransactionSource, ) -> ValidateResult { // Prepare relevant data for `prepare` let parsed_call = match C::parse_and_check_for_obsolete_call(call)? { @@ -326,9 +324,7 @@ where }; // we only boost priority if relayer has staked required balance - if !RelayersPallet::::is_registration_active( - &data.relayer, - ) { + if !RelayersPallet::::is_registration_active(&data.relayer) { return Ok((Default::default(), Some(data), origin)) } @@ -384,11 +380,7 @@ where match call_result { RelayerAccountAction::None => (), RelayerAccountAction::Reward(relayer, reward_account, reward) => { - RelayersPallet::::register_relayer_reward( - reward_account, - &relayer, - reward, - ); + RelayersPallet::::register_relayer_reward(reward_account, &relayer, reward); log::trace!( target: LOG_TARGET, @@ -400,7 +392,7 @@ where ); }, RelayerAccountAction::Slash(relayer, slash_account) => - RelayersPallet::::slash_and_deregister( + RelayersPallet::::slash_and_deregister( &relayer, ExplicitOrAccountParams::Params(slash_account), ), @@ -471,9 +463,7 @@ mod tests { use pallet_utility::Call as UtilityCall; use sp_runtime::{ traits::{ConstU64, DispatchTransaction, Header as HeaderT}, - transaction_validity::{ - InvalidTransaction, TransactionSource::External, TransactionValidity, ValidTransaction, - }, + transaction_validity::{InvalidTransaction, TransactionValidity, ValidTransaction}, DispatchError, }; @@ -1086,8 +1076,6 @@ mod tests { &call, &DispatchInfo::default(), 0, - External, - 0, ) .map(|t| t.0) } @@ -1100,8 +1088,6 @@ mod tests { &call, &DispatchInfo::default(), 0, - External, - 0, ) .map(|t| t.0) } @@ -1114,8 +1100,6 @@ mod tests { &call, &DispatchInfo::default(), 0, - External, - 0, ) .map(|t| t.0) } @@ -1141,7 +1125,6 @@ mod tests { &call, &DispatchInfo::default(), 0, - 0, ) .map(|(pre, _)| pre) } @@ -1159,7 +1142,6 @@ mod tests { &call, &DispatchInfo::default(), 0, - 0, ) .map(|(pre, _)| pre) } @@ -1177,7 +1159,6 @@ mod tests { &call, &DispatchInfo::default(), 0, - 0, ) .map(|(pre, _)| pre) } diff --git a/bridges/modules/relayers/src/lib.rs b/bridges/modules/relayers/src/lib.rs index d1c71b6d3051..f06c2e16ac24 100644 --- a/bridges/modules/relayers/src/lib.rs +++ b/bridges/modules/relayers/src/lib.rs @@ -22,9 +22,8 @@ use bp_relayers::{ ExplicitOrAccountParams, PaymentProcedure, Registration, RelayerRewardsKeyProvider, - StakeAndSlash, + RewardsAccountParams, StakeAndSlash, }; -pub use bp_relayers::{RewardsAccountOwner, RewardsAccountParams}; use bp_runtime::StorageDoubleMapKeyProvider; use frame_support::fail; use sp_arithmetic::traits::{AtLeast32BitUnsigned, Zero}; @@ -32,7 +31,7 @@ use sp_runtime::{traits::CheckedSub, Saturating}; use sp_std::marker::PhantomData; pub use pallet::*; -pub use payment_adapter::{DeliveryConfirmationPaymentsAdapter, PayRewardFromAccount}; +pub use payment_adapter::DeliveryConfirmationPaymentsAdapter; pub use stake_adapter::StakeAndSlashNamed; pub use weights::WeightInfo; pub use weights_ext::WeightInfoExt; diff --git a/bridges/modules/relayers/src/mock.rs b/bridges/modules/relayers/src/mock.rs index 7dc213249379..d186e968e648 100644 --- a/bridges/modules/relayers/src/mock.rs +++ b/bridges/modules/relayers/src/mock.rs @@ -171,14 +171,14 @@ pub type TestStakeAndSlash = pallet_bridge_relayers::StakeAndSlashNamed< frame_support::construct_runtime! { pub enum TestRuntime { - System: frame_system, + System: frame_system::{Pallet, Call, Config, Storage, Event}, Utility: pallet_utility, - Balances: pallet_balances, - TransactionPayment: pallet_transaction_payment, - BridgeRelayers: pallet_bridge_relayers, - BridgeGrandpa: pallet_bridge_grandpa, - BridgeParachains: pallet_bridge_parachains, - BridgeMessages: pallet_bridge_messages, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + TransactionPayment: pallet_transaction_payment::{Pallet, Storage, Event}, + BridgeRelayers: pallet_bridge_relayers::{Pallet, Call, Storage, Event}, + BridgeGrandpa: pallet_bridge_grandpa::{Pallet, Call, Storage, Event}, + BridgeParachains: pallet_bridge_parachains::{Pallet, Call, Storage, Event}, + BridgeMessages: pallet_bridge_messages::{Pallet, Call, Storage, Event, Config}, } } @@ -267,7 +267,6 @@ impl pallet_bridge_messages::Config for TestRuntime { type DeliveryConfirmationPayments = pallet_bridge_relayers::DeliveryConfirmationPaymentsAdapter< TestRuntime, (), - (), ConstU64<100_000>, >; type OnMessagesDelivered = (); diff --git a/bridges/modules/relayers/src/payment_adapter.rs b/bridges/modules/relayers/src/payment_adapter.rs index 5af0d8f9dfbf..5383cba5ecbd 100644 --- a/bridges/modules/relayers/src/payment_adapter.rs +++ b/bridges/modules/relayers/src/payment_adapter.rs @@ -22,7 +22,6 @@ use bp_messages::{ source_chain::{DeliveryConfirmationPayments, RelayersRewards}, MessageNonce, }; -pub use bp_relayers::PayRewardFromAccount; use bp_relayers::{RewardsAccountOwner, RewardsAccountParams}; use bp_runtime::Chain; use frame_support::{sp_runtime::SaturatedConversion, traits::Get}; @@ -32,16 +31,15 @@ use sp_std::{collections::vec_deque::VecDeque, marker::PhantomData, ops::RangeIn /// Adapter that allows relayers pallet to be used as a delivery+dispatch payment mechanism /// for the messages pallet. -pub struct DeliveryConfirmationPaymentsAdapter( - PhantomData<(T, MI, RI, DeliveryReward)>, +pub struct DeliveryConfirmationPaymentsAdapter( + PhantomData<(T, MI, DeliveryReward)>, ); -impl DeliveryConfirmationPayments> - for DeliveryConfirmationPaymentsAdapter +impl DeliveryConfirmationPayments> + for DeliveryConfirmationPaymentsAdapter where - T: Config + pallet_bridge_messages::Config>::LaneId>, + T: Config + pallet_bridge_messages::Config::LaneId>, MI: 'static, - RI: 'static, DeliveryReward: Get, { type Error = &'static str; @@ -56,7 +54,7 @@ where bp_messages::calc_relayers_rewards::(messages_relayers, received_range); let rewarded_relayers = relayers_rewards.len(); - register_relayers_rewards::( + register_relayers_rewards::( confirmation_relayer, relayers_rewards, RewardsAccountParams::new( @@ -72,7 +70,7 @@ where } // Update rewards to given relayers, optionally rewarding confirmation relayer. -fn register_relayers_rewards, I: 'static>( +fn register_relayers_rewards( confirmation_relayer: &T::AccountId, relayers_rewards: RelayersRewards, lane_id: RewardsAccountParams, @@ -86,7 +84,7 @@ fn register_relayers_rewards, I: 'static>( let relayer_reward = T::Reward::saturated_from(messages).saturating_mul(delivery_fee); if relayer != *confirmation_relayer { - Pallet::::register_relayer_reward(lane_id, &relayer, relayer_reward); + Pallet::::register_relayer_reward(lane_id, &relayer, relayer_reward); } else { confirmation_relayer_reward = confirmation_relayer_reward.saturating_add(relayer_reward); @@ -94,7 +92,7 @@ fn register_relayers_rewards, I: 'static>( } // finally - pay reward to confirmation relayer - Pallet::::register_relayer_reward( + Pallet::::register_relayer_reward( lane_id, confirmation_relayer, confirmation_relayer_reward, @@ -117,7 +115,7 @@ mod tests { #[test] fn confirmation_relayer_is_rewarded_if_it_has_also_delivered_messages() { run_test(|| { - register_relayers_rewards::( + register_relayers_rewards::( &RELAYER_2, relayers_rewards(), test_reward_account_param(), @@ -138,7 +136,7 @@ mod tests { #[test] fn confirmation_relayer_is_not_rewarded_if_it_has_not_delivered_any_messages() { run_test(|| { - register_relayers_rewards::( + register_relayers_rewards::( &RELAYER_3, relayers_rewards(), test_reward_account_param(), diff --git a/bridges/modules/xcm-bridge-hub-router/Cargo.toml b/bridges/modules/xcm-bridge-hub-router/Cargo.toml index b0286938f36d..55824f6a7fe7 100644 --- a/bridges/modules/xcm-bridge-hub-router/Cargo.toml +++ b/bridges/modules/xcm-bridge-hub-router/Cargo.toml @@ -56,7 +56,6 @@ runtime-benchmarks = [ "frame-system/runtime-benchmarks", "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", - "xcm/runtime-benchmarks", ] try-runtime = [ "frame-support/try-runtime", diff --git a/bridges/modules/xcm-bridge-hub-router/src/benchmarking.rs b/bridges/modules/xcm-bridge-hub-router/src/benchmarking.rs index ff06a1e3c8c5..3c4a10f82e7d 100644 --- a/bridges/modules/xcm-bridge-hub-router/src/benchmarking.rs +++ b/bridges/modules/xcm-bridge-hub-router/src/benchmarking.rs @@ -18,9 +18,9 @@ #![cfg(feature = "runtime-benchmarks")] -use crate::{Bridge, BridgeState, Call, MINIMAL_DELIVERY_FEE_FACTOR}; +use crate::{DeliveryFeeFactor, MINIMAL_DELIVERY_FEE_FACTOR}; use frame_benchmarking::{benchmarks_instance_pallet, BenchmarkError}; -use frame_support::traits::{EnsureOrigin, Get, Hooks, UnfilteredDispatchable}; +use frame_support::traits::{Get, Hooks}; use sp_runtime::traits::Zero; use xcm::prelude::*; @@ -45,35 +45,16 @@ pub trait Config: crate::Config { benchmarks_instance_pallet! { on_initialize_when_non_congested { - Bridge::::put(BridgeState { - is_congested: false, - delivery_fee_factor: MINIMAL_DELIVERY_FEE_FACTOR + MINIMAL_DELIVERY_FEE_FACTOR, - }); + DeliveryFeeFactor::::put(MINIMAL_DELIVERY_FEE_FACTOR + MINIMAL_DELIVERY_FEE_FACTOR); }: { crate::Pallet::::on_initialize(Zero::zero()) } on_initialize_when_congested { - Bridge::::put(BridgeState { - is_congested: false, - delivery_fee_factor: MINIMAL_DELIVERY_FEE_FACTOR + MINIMAL_DELIVERY_FEE_FACTOR, - }); + DeliveryFeeFactor::::put(MINIMAL_DELIVERY_FEE_FACTOR + MINIMAL_DELIVERY_FEE_FACTOR); let _ = T::ensure_bridged_target_destination()?; T::make_congested(); }: { crate::Pallet::::on_initialize(Zero::zero()) } - - report_bridge_status { - Bridge::::put(BridgeState::default()); - - let origin: T::RuntimeOrigin = T::BridgeHubOrigin::try_successful_origin().expect("expected valid BridgeHubOrigin"); - let bridge_id = Default::default(); - let is_congested = true; - - let call = Call::::report_bridge_status { bridge_id, is_congested }; - }: { call.dispatch_bypass_filter(origin)? } - verify { - assert!(Bridge::::get().is_congested); - } } diff --git a/bridges/modules/xcm-bridge-hub-router/src/lib.rs b/bridges/modules/xcm-bridge-hub-router/src/lib.rs index 7361696faba7..fe8f5a2efdfb 100644 --- a/bridges/modules/xcm-bridge-hub-router/src/lib.rs +++ b/bridges/modules/xcm-bridge-hub-router/src/lib.rs @@ -30,10 +30,9 @@ #![cfg_attr(not(feature = "std"), no_std)] -pub use bp_xcm_bridge_hub_router::{BridgeState, XcmChannelStatusProvider}; +pub use bp_xcm_bridge_hub_router::XcmChannelStatusProvider; use codec::Encode; use frame_support::traits::Get; -use sp_core::H256; use sp_runtime::{FixedPointNumber, FixedU128, Saturating}; use sp_std::vec::Vec; use xcm::prelude::*; @@ -99,8 +98,6 @@ pub mod pallet { /// Checks the XCM version for the destination. type DestinationVersion: GetVersion; - /// Origin of the sibling bridge hub that is allowed to report bridge status. - type BridgeHubOrigin: EnsureOrigin; /// Actual message sender (`HRMP` or `DMP`) to the sibling bridge hub location. type ToBridgeHubSender: SendXcm; /// Local XCM channel manager. @@ -123,112 +120,95 @@ pub mod pallet { return T::WeightInfo::on_initialize_when_congested() } - // if bridge has reported congestion, we don't change anything - let mut bridge = Self::bridge(); - if bridge.is_congested { - return T::WeightInfo::on_initialize_when_congested() - } - // if we can't decrease the delivery fee factor anymore, we don't change anything - if bridge.delivery_fee_factor == MINIMAL_DELIVERY_FEE_FACTOR { + let mut delivery_fee_factor = Self::delivery_fee_factor(); + if delivery_fee_factor == MINIMAL_DELIVERY_FEE_FACTOR { return T::WeightInfo::on_initialize_when_congested() } - let previous_factor = bridge.delivery_fee_factor; - bridge.delivery_fee_factor = - MINIMAL_DELIVERY_FEE_FACTOR.max(bridge.delivery_fee_factor / EXPONENTIAL_FEE_BASE); - + let previous_factor = delivery_fee_factor; + delivery_fee_factor = + MINIMAL_DELIVERY_FEE_FACTOR.max(delivery_fee_factor / EXPONENTIAL_FEE_BASE); log::info!( target: LOG_TARGET, "Bridge channel is uncongested. Decreased fee factor from {} to {}", previous_factor, - bridge.delivery_fee_factor, + delivery_fee_factor, ); Self::deposit_event(Event::DeliveryFeeFactorDecreased { - new_value: bridge.delivery_fee_factor, + new_value: delivery_fee_factor, }); - Bridge::::put(bridge); + DeliveryFeeFactor::::put(delivery_fee_factor); T::WeightInfo::on_initialize_when_non_congested() } } - #[pallet::call] - impl, I: 'static> Pallet { - /// Notification about congested bridge queue. - #[pallet::call_index(0)] - #[pallet::weight(T::WeightInfo::report_bridge_status())] - pub fn report_bridge_status( - origin: OriginFor, - // this argument is not currently used, but to ease future migration, we'll keep it - // here - bridge_id: H256, - is_congested: bool, - ) -> DispatchResult { - let _ = T::BridgeHubOrigin::ensure_origin(origin)?; - - log::info!( - target: LOG_TARGET, - "Received bridge status from {:?}: congested = {}", - bridge_id, - is_congested, - ); - - Bridge::::mutate(|bridge| { - bridge.is_congested = is_congested; - }); - Ok(()) - } + /// Initialization value for the delivery fee factor. + #[pallet::type_value] + pub fn InitialFactor() -> FixedU128 { + MINIMAL_DELIVERY_FEE_FACTOR } - /// Bridge that we are using. + /// The number to multiply the base delivery fee by. /// - /// **bridges-v1** assumptions: all outbound messages through this router are using single lane - /// and to single remote consensus. If there is some other remote consensus that uses the same - /// bridge hub, the separate pallet instance shall be used, In `v2` we'll have all required - /// primitives (lane-id aka bridge-id, derived from XCM locations) to support multiple bridges - /// by the same pallet instance. + /// This factor is shared by all bridges, served by this pallet. For example, if this + /// chain (`Config::UniversalLocation`) opens two bridges ( + /// `X2(GlobalConsensus(Config::BridgedNetworkId::get()), Parachain(1000))` and + /// `X2(GlobalConsensus(Config::BridgedNetworkId::get()), Parachain(2000))`), then they + /// both will be sharing the same fee factor. This is because both bridges are sharing + /// the same local XCM channel with the child/sibling bridge hub, which we are using + /// to detect congestion: + /// + /// ```nocompile + /// ThisChain --- Local XCM channel --> Sibling Bridge Hub ------ + /// | | + /// | | + /// | | + /// Lane1 Lane2 + /// | | + /// | | + /// | | + /// \ / | + /// Parachain1 <-- Local XCM channel --- Remote Bridge Hub <------ + /// | + /// | + /// Parachain1 <-- Local XCM channel --------- + /// ``` + /// + /// If at least one of other channels is congested, the local XCM channel with sibling + /// bridge hub eventually becomes congested too. And we have no means to detect - which + /// bridge exactly causes the congestion. So the best solution here is not to make + /// any differences between all bridges, started by this chain. #[pallet::storage] - #[pallet::getter(fn bridge)] - pub type Bridge, I: 'static = ()> = StorageValue<_, BridgeState, ValueQuery>; + #[pallet::getter(fn delivery_fee_factor)] + pub type DeliveryFeeFactor, I: 'static = ()> = + StorageValue<_, FixedU128, ValueQuery, InitialFactor>; impl, I: 'static> Pallet { /// Called when new message is sent (queued to local outbound XCM queue) over the bridge. pub(crate) fn on_message_sent_to_bridge(message_size: u32) { - log::trace!( - target: LOG_TARGET, - "on_message_sent_to_bridge - message_size: {message_size:?}", - ); - let _ = Bridge::::try_mutate(|bridge| { - let is_channel_with_bridge_hub_congested = - T::LocalXcmChannelManager::is_congested(&T::SiblingBridgeHubLocation::get()); - let is_bridge_congested = bridge.is_congested; - - // if outbound queue is not congested AND bridge has not reported congestion, do - // nothing - if !is_channel_with_bridge_hub_congested && !is_bridge_congested { - return Err(()) - } - - // ok - we need to increase the fee factor, let's do that - let message_size_factor = FixedU128::from_u32(message_size.saturating_div(1024)) - .saturating_mul(MESSAGE_SIZE_FEE_BASE); - let total_factor = EXPONENTIAL_FEE_BASE.saturating_add(message_size_factor); - let previous_factor = bridge.delivery_fee_factor; - bridge.delivery_fee_factor = - bridge.delivery_fee_factor.saturating_mul(total_factor); + // if outbound channel is not congested, do nothing + if !T::LocalXcmChannelManager::is_congested(&T::SiblingBridgeHubLocation::get()) { + return + } + // ok - we need to increase the fee factor, let's do that + let message_size_factor = FixedU128::from_u32(message_size.saturating_div(1024)) + .saturating_mul(MESSAGE_SIZE_FEE_BASE); + let total_factor = EXPONENTIAL_FEE_BASE.saturating_add(message_size_factor); + DeliveryFeeFactor::::mutate(|f| { + let previous_factor = *f; + *f = f.saturating_mul(total_factor); log::info!( target: LOG_TARGET, "Bridge channel is congested. Increased fee factor from {} to {}", previous_factor, - bridge.delivery_fee_factor, + f, ); - Self::deposit_event(Event::DeliveryFeeFactorIncreased { - new_value: bridge.delivery_fee_factor, - }); - Ok(()) + Self::deposit_event(Event::DeliveryFeeFactorIncreased { new_value: *f }); + *f }); } } @@ -330,9 +310,9 @@ impl, I: 'static> ExporterFor for Pallet { let message_size = message.encoded_size(); let message_fee = (message_size as u128).saturating_mul(T::ByteFee::get()); let fee_sum = base_fee.saturating_add(message_fee); - let fee_factor = Self::bridge().delivery_fee_factor; - let fee = fee_factor.saturating_mul_int(fee_sum); + let fee_factor = Self::delivery_fee_factor(); + let fee = fee_factor.saturating_mul_int(fee_sum); let fee = if fee > 0 { Some((T::FeeAsset::get(), fee).into()) } else { None }; log::info!( @@ -447,47 +427,24 @@ mod tests { use frame_system::{EventRecord, Phase}; use sp_runtime::traits::One; - fn congested_bridge(delivery_fee_factor: FixedU128) -> BridgeState { - BridgeState { is_congested: true, delivery_fee_factor } - } - - fn uncongested_bridge(delivery_fee_factor: FixedU128) -> BridgeState { - BridgeState { is_congested: false, delivery_fee_factor } - } - #[test] fn initial_fee_factor_is_one() { run_test(|| { - assert_eq!( - Bridge::::get(), - uncongested_bridge(MINIMAL_DELIVERY_FEE_FACTOR), - ); + assert_eq!(DeliveryFeeFactor::::get(), MINIMAL_DELIVERY_FEE_FACTOR); }) } #[test] fn fee_factor_is_not_decreased_from_on_initialize_when_xcm_channel_is_congested() { run_test(|| { - Bridge::::put(uncongested_bridge(FixedU128::from_rational(125, 100))); + DeliveryFeeFactor::::put(FixedU128::from_rational(125, 100)); TestLocalXcmChannelManager::make_congested(&SiblingBridgeHubLocation::get()); // it should not decrease, because queue is congested - let old_delivery = XcmBridgeHubRouter::bridge(); + let old_delivery_fee_factor = XcmBridgeHubRouter::delivery_fee_factor(); XcmBridgeHubRouter::on_initialize(One::one()); - assert_eq!(XcmBridgeHubRouter::bridge(), old_delivery); - assert_eq!(System::events(), vec![]); - }) - } - - #[test] - fn fee_factor_is_not_decreased_from_on_initialize_when_bridge_has_reported_congestion() { - run_test(|| { - Bridge::::put(congested_bridge(FixedU128::from_rational(125, 100))); + assert_eq!(XcmBridgeHubRouter::delivery_fee_factor(), old_delivery_fee_factor); - // it should not decrease, because bridge congested - let old_bridge = XcmBridgeHubRouter::bridge(); - XcmBridgeHubRouter::on_initialize(One::one()); - assert_eq!(XcmBridgeHubRouter::bridge(), old_bridge); assert_eq!(System::events(), vec![]); }) } @@ -496,19 +453,16 @@ mod tests { fn fee_factor_is_decreased_from_on_initialize_when_xcm_channel_is_uncongested() { run_test(|| { let initial_fee_factor = FixedU128::from_rational(125, 100); - Bridge::::put(uncongested_bridge(initial_fee_factor)); + DeliveryFeeFactor::::put(initial_fee_factor); - // it should eventually decrease to one - while XcmBridgeHubRouter::bridge().delivery_fee_factor > MINIMAL_DELIVERY_FEE_FACTOR { + // it shold eventually decreased to one + while XcmBridgeHubRouter::delivery_fee_factor() > MINIMAL_DELIVERY_FEE_FACTOR { XcmBridgeHubRouter::on_initialize(One::one()); } - // verify that it doesn't decrease anymore + // verify that it doesn't decreases anymore XcmBridgeHubRouter::on_initialize(One::one()); - assert_eq!( - XcmBridgeHubRouter::bridge(), - uncongested_bridge(MINIMAL_DELIVERY_FEE_FACTOR) - ); + assert_eq!(XcmBridgeHubRouter::delivery_fee_factor(), MINIMAL_DELIVERY_FEE_FACTOR); // check emitted event let first_system_event = System::events().first().cloned(); @@ -628,7 +582,7 @@ mod tests { // but when factor is larger than one, it increases the fee, so it becomes: // `(BASE_FEE + BYTE_FEE * msg_size) * F + HRMP_FEE` let factor = FixedU128::from_rational(125, 100); - Bridge::::put(uncongested_bridge(factor)); + DeliveryFeeFactor::::put(factor); let expected_fee = (FixedU128::saturating_from_integer(BASE_FEE + BYTE_FEE * (msg_size as u128)) * factor) @@ -644,7 +598,7 @@ mod tests { #[test] fn sent_message_doesnt_increase_factor_if_queue_is_uncongested() { run_test(|| { - let old_bridge = XcmBridgeHubRouter::bridge(); + let old_delivery_fee_factor = XcmBridgeHubRouter::delivery_fee_factor(); assert_eq!( send_xcm::( Location::new(2, [GlobalConsensus(BridgedNetworkId::get()), Parachain(1000)]), @@ -655,7 +609,7 @@ mod tests { ); assert!(TestToBridgeHubSender::is_message_sent()); - assert_eq!(old_bridge, XcmBridgeHubRouter::bridge()); + assert_eq!(old_delivery_fee_factor, XcmBridgeHubRouter::delivery_fee_factor()); assert_eq!(System::events(), vec![]); }); @@ -666,39 +620,7 @@ mod tests { run_test(|| { TestLocalXcmChannelManager::make_congested(&SiblingBridgeHubLocation::get()); - let old_bridge = XcmBridgeHubRouter::bridge(); - assert_ok!(send_xcm::( - Location::new(2, [GlobalConsensus(BridgedNetworkId::get()), Parachain(1000)]), - vec![ClearOrigin].into(), - ) - .map(drop)); - - assert!(TestToBridgeHubSender::is_message_sent()); - assert!( - old_bridge.delivery_fee_factor < XcmBridgeHubRouter::bridge().delivery_fee_factor - ); - - // check emitted event - let first_system_event = System::events().first().cloned(); - assert!(matches!( - first_system_event, - Some(EventRecord { - phase: Phase::Initialization, - event: RuntimeEvent::XcmBridgeHubRouter( - Event::DeliveryFeeFactorIncreased { .. } - ), - .. - }) - )); - }); - } - - #[test] - fn sent_message_increases_factor_if_bridge_has_reported_congestion() { - run_test(|| { - Bridge::::put(congested_bridge(MINIMAL_DELIVERY_FEE_FACTOR)); - - let old_bridge = XcmBridgeHubRouter::bridge(); + let old_delivery_fee_factor = XcmBridgeHubRouter::delivery_fee_factor(); assert_ok!(send_xcm::( Location::new(2, [GlobalConsensus(BridgedNetworkId::get()), Parachain(1000)]), vec![ClearOrigin].into(), @@ -706,9 +628,7 @@ mod tests { .map(drop)); assert!(TestToBridgeHubSender::is_message_sent()); - assert!( - old_bridge.delivery_fee_factor < XcmBridgeHubRouter::bridge().delivery_fee_factor - ); + assert!(old_delivery_fee_factor < XcmBridgeHubRouter::delivery_fee_factor()); // check emitted event let first_system_event = System::events().first().cloned(); diff --git a/bridges/modules/xcm-bridge-hub-router/src/mock.rs b/bridges/modules/xcm-bridge-hub-router/src/mock.rs index ac642e108c2a..095572883920 100644 --- a/bridges/modules/xcm-bridge-hub-router/src/mock.rs +++ b/bridges/modules/xcm-bridge-hub-router/src/mock.rs @@ -80,7 +80,6 @@ impl pallet_xcm_bridge_hub_router::Config<()> for TestRuntime { type DestinationVersion = LatestOrNoneForLocationVersionChecker>; - type BridgeHubOrigin = frame_system::EnsureRoot; type ToBridgeHubSender = TestToBridgeHubSender; type LocalXcmChannelManager = TestLocalXcmChannelManager; diff --git a/bridges/modules/xcm-bridge-hub-router/src/weights.rs b/bridges/modules/xcm-bridge-hub-router/src/weights.rs index 8f5012c9de26..d9a0426fecaf 100644 --- a/bridges/modules/xcm-bridge-hub-router/src/weights.rs +++ b/bridges/modules/xcm-bridge-hub-router/src/weights.rs @@ -52,7 +52,6 @@ use sp_std::marker::PhantomData; pub trait WeightInfo { fn on_initialize_when_non_congested() -> Weight; fn on_initialize_when_congested() -> Weight; - fn report_bridge_status() -> Weight; } /// Weights for `pallet_xcm_bridge_hub_router` that are generated using one of the Bridge testnets. @@ -86,19 +85,6 @@ impl WeightInfo for BridgeWeight { // Minimum execution time: 4_239 nanoseconds. Weight::from_parts(4_383_000, 3547).saturating_add(T::DbWeight::get().reads(1_u64)) } - /// Storage: `XcmBridgeHubRouter::Bridge` (r:1 w:1) - /// - /// Proof: `XcmBridgeHubRouter::Bridge` (`max_values`: Some(1), `max_size`: Some(17), added: - /// 512, mode: `MaxEncodedLen`) - fn report_bridge_status() -> Weight { - // Proof Size summary in bytes: - // Measured: `53` - // Estimated: `1502` - // Minimum execution time: 10_427 nanoseconds. - Weight::from_parts(10_682_000, 1502) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } } // For backwards compatibility and tests @@ -134,17 +120,4 @@ impl WeightInfo for () { // Minimum execution time: 4_239 nanoseconds. Weight::from_parts(4_383_000, 3547).saturating_add(RocksDbWeight::get().reads(1_u64)) } - /// Storage: `XcmBridgeHubRouter::Bridge` (r:1 w:1) - /// - /// Proof: `XcmBridgeHubRouter::Bridge` (`max_values`: Some(1), `max_size`: Some(17), added: - /// 512, mode: `MaxEncodedLen`) - fn report_bridge_status() -> Weight { - // Proof Size summary in bytes: - // Measured: `53` - // Estimated: `1502` - // Minimum execution time: 10_427 nanoseconds. - Weight::from_parts(10_682_000, 1502) - .saturating_add(RocksDbWeight::get().reads(1_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } } diff --git a/bridges/modules/xcm-bridge-hub/Cargo.toml b/bridges/modules/xcm-bridge-hub/Cargo.toml index b5e365874443..fe58b910a94e 100644 --- a/bridges/modules/xcm-bridge-hub/Cargo.toml +++ b/bridges/modules/xcm-bridge-hub/Cargo.toml @@ -34,13 +34,12 @@ xcm-builder = { workspace = true } xcm-executor = { workspace = true } [dev-dependencies] -bp-header-chain = { workspace = true } -bp-runtime = { workspace = true } -bp-xcm-bridge-hub-router = { workspace = true } pallet-balances = { workspace = true } +sp-io = { workspace = true } +bp-runtime = { workspace = true } +bp-header-chain = { workspace = true } pallet-xcm-bridge-hub-router = { workspace = true } polkadot-parachain-primitives = { workspace = true } -sp-io = { workspace = true } [features] default = ["std"] @@ -48,7 +47,6 @@ std = [ "bp-header-chain/std", "bp-messages/std", "bp-runtime/std", - "bp-xcm-bridge-hub-router/std", "bp-xcm-bridge-hub/std", "codec/std", "frame-support/std", @@ -77,7 +75,6 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", - "xcm/runtime-benchmarks", ] try-runtime = [ "frame-support/try-runtime", diff --git a/bridges/modules/xcm-bridge-hub/src/exporter.rs b/bridges/modules/xcm-bridge-hub/src/exporter.rs index 93b6093b42af..5afb9f36bc94 100644 --- a/bridges/modules/xcm-bridge-hub/src/exporter.rs +++ b/bridges/modules/xcm-bridge-hub/src/exporter.rs @@ -364,7 +364,7 @@ mod tests { use bp_runtime::RangeInclusiveExt; use bp_xcm_bridge_hub::{Bridge, BridgeLocations, BridgeState}; - use frame_support::{assert_ok, traits::EnsureOrigin}; + use frame_support::assert_ok; use pallet_bridge_messages::InboundLaneStorage; use xcm_builder::{NetworkExportTable, UnpaidRemoteExporter}; use xcm_executor::traits::{export_xcm, ConvertLocation}; @@ -381,8 +381,9 @@ mod tests { BridgedUniversalDestination::get() } - fn open_lane(origin: RuntimeOrigin) -> (BridgeLocations, TestLaneIdType) { + fn open_lane() -> (BridgeLocations, TestLaneIdType) { // open expected outbound lane + let origin = OpenBridgeOrigin::sibling_parachain_origin(); let with = bridged_asset_hub_universal_location(); let locations = XcmOverBridge::bridge_locations_from_origin(origin, Box::new(with.into())).unwrap(); @@ -438,7 +439,7 @@ mod tests { } fn open_lane_and_send_regular_message() -> (BridgeId, TestLaneIdType) { - let (locations, lane_id) = open_lane(OpenBridgeOrigin::sibling_parachain_origin()); + let (locations, lane_id) = open_lane(); // now let's try to enqueue message using our `ExportXcm` implementation export_xcm::( @@ -472,7 +473,7 @@ mod tests { fn exporter_does_not_suspend_the_bridge_if_outbound_bridge_queue_is_not_congested() { run_test(|| { let (bridge_id, _) = open_lane_and_send_regular_message(); - assert!(!TestLocalXcmChannelManager::is_bridge_suspended(&bridge_id)); + assert!(!TestLocalXcmChannelManager::is_bridge_suspened()); assert_eq!(XcmOverBridge::bridge(&bridge_id).unwrap().state, BridgeState::Opened); }); } @@ -489,7 +490,7 @@ mod tests { } open_lane_and_send_regular_message(); - assert!(!TestLocalXcmChannelManager::is_bridge_suspended(&bridge_id)); + assert!(!TestLocalXcmChannelManager::is_bridge_suspened()); }); } @@ -501,11 +502,11 @@ mod tests { open_lane_and_send_regular_message(); } - assert!(!TestLocalXcmChannelManager::is_bridge_suspended(&bridge_id)); + assert!(!TestLocalXcmChannelManager::is_bridge_suspened()); assert_eq!(XcmOverBridge::bridge(&bridge_id).unwrap().state, BridgeState::Opened); open_lane_and_send_regular_message(); - assert!(TestLocalXcmChannelManager::is_bridge_suspended(&bridge_id)); + assert!(TestLocalXcmChannelManager::is_bridge_suspened()); assert_eq!(XcmOverBridge::bridge(&bridge_id).unwrap().state, BridgeState::Suspended); }); } @@ -522,7 +523,7 @@ mod tests { OUTBOUND_LANE_UNCONGESTED_THRESHOLD + 1, ); - assert!(!TestLocalXcmChannelManager::is_bridge_resumed(&bridge_id)); + assert!(!TestLocalXcmChannelManager::is_bridge_resumed()); assert_eq!(XcmOverBridge::bridge(&bridge_id).unwrap().state, BridgeState::Suspended); }); } @@ -536,7 +537,7 @@ mod tests { OUTBOUND_LANE_UNCONGESTED_THRESHOLD, ); - assert!(!TestLocalXcmChannelManager::is_bridge_resumed(&bridge_id)); + assert!(!TestLocalXcmChannelManager::is_bridge_resumed()); assert_eq!(XcmOverBridge::bridge(&bridge_id).unwrap().state, BridgeState::Opened); }); } @@ -553,7 +554,7 @@ mod tests { OUTBOUND_LANE_UNCONGESTED_THRESHOLD, ); - assert!(TestLocalXcmChannelManager::is_bridge_resumed(&bridge_id)); + assert!(TestLocalXcmChannelManager::is_bridge_resumed()); assert_eq!(XcmOverBridge::bridge(&bridge_id).unwrap().state, BridgeState::Opened); }); } @@ -647,10 +648,7 @@ mod tests { let dest = Location::new(2, BridgedUniversalDestination::get()); // open bridge - let origin = OpenBridgeOrigin::sibling_parachain_origin(); - let origin_as_location = - OpenBridgeOriginOf::::try_origin(origin.clone()).unwrap(); - let (_, expected_lane_id) = open_lane(origin); + let (_, expected_lane_id) = open_lane(); // check before - no messages assert_eq!( @@ -664,24 +662,18 @@ mod tests { ); // send `ExportMessage(message)` by `UnpaidRemoteExporter`. - ExecuteXcmOverSendXcm::set_origin_for_execute(origin_as_location); + TestExportXcmWithXcmOverBridge::set_origin_for_execute(SiblingLocation::get()); assert_ok!(send_xcm::< UnpaidRemoteExporter< NetworkExportTable, - ExecuteXcmOverSendXcm, + TestExportXcmWithXcmOverBridge, UniversalLocation, >, >(dest.clone(), Xcm::<()>::default())); - // we need to set `UniversalLocation` for `sibling_parachain_origin` for - // `XcmOverBridgeWrappedWithExportMessageRouterInstance`. - ExportMessageOriginUniversalLocation::set(Some(SiblingUniversalLocation::get())); // send `ExportMessage(message)` by `pallet_xcm_bridge_hub_router`. - ExecuteXcmOverSendXcm::set_origin_for_execute(SiblingLocation::get()); - assert_ok!(send_xcm::( - dest.clone(), - Xcm::<()>::default() - )); + TestExportXcmWithXcmOverBridge::set_origin_for_execute(SiblingLocation::get()); + assert_ok!(send_xcm::(dest.clone(), Xcm::<()>::default())); // check after - a message ready to be relayed assert_eq!( @@ -773,7 +765,7 @@ mod tests { ); // ok - let _ = open_lane(OpenBridgeOrigin::sibling_parachain_origin()); + let _ = open_lane(); let mut dest_wrapper = Some(bridged_relative_destination()); assert_ok!(XcmOverBridge::validate( BridgedRelayNetwork::get(), @@ -788,77 +780,4 @@ mod tests { assert_eq!(None, dest_wrapper); }); } - - #[test] - fn congestion_with_pallet_xcm_bridge_hub_router_works() { - run_test(|| { - // valid routable destination - let dest = Location::new(2, BridgedUniversalDestination::get()); - - fn router_bridge_state() -> pallet_xcm_bridge_hub_router::BridgeState { - pallet_xcm_bridge_hub_router::Bridge::< - TestRuntime, - XcmOverBridgeWrappedWithExportMessageRouterInstance, - >::get() - } - - // open two bridges - let origin = OpenBridgeOrigin::sibling_parachain_origin(); - let origin_as_location = - OpenBridgeOriginOf::::try_origin(origin.clone()).unwrap(); - let (bridge_1, expected_lane_id_1) = open_lane(origin); - - // we need to set `UniversalLocation` for `sibling_parachain_origin` for - // `XcmOverBridgeWrappedWithExportMessageRouterInstance`. - ExportMessageOriginUniversalLocation::set(Some(SiblingUniversalLocation::get())); - - // check before - // bridges are opened - assert_eq!( - XcmOverBridge::bridge(bridge_1.bridge_id()).unwrap().state, - BridgeState::Opened - ); - - // the router is uncongested - assert!(!router_bridge_state().is_congested); - assert!(!TestLocalXcmChannelManager::is_bridge_suspended(bridge_1.bridge_id())); - assert!(!TestLocalXcmChannelManager::is_bridge_resumed(bridge_1.bridge_id())); - - // make bridges congested with sending too much messages - for _ in 1..(OUTBOUND_LANE_CONGESTED_THRESHOLD + 2) { - // send `ExportMessage(message)` by `pallet_xcm_bridge_hub_router`. - ExecuteXcmOverSendXcm::set_origin_for_execute(origin_as_location.clone()); - assert_ok!(send_xcm::( - dest.clone(), - Xcm::<()>::default() - )); - } - - // checks after - // bridges are suspended - assert_eq!( - XcmOverBridge::bridge(bridge_1.bridge_id()).unwrap().state, - BridgeState::Suspended, - ); - // the router is congested - assert!(router_bridge_state().is_congested); - assert!(TestLocalXcmChannelManager::is_bridge_suspended(bridge_1.bridge_id())); - assert!(!TestLocalXcmChannelManager::is_bridge_resumed(bridge_1.bridge_id())); - - // make bridges uncongested to trigger resume signal - XcmOverBridge::on_bridge_messages_delivered( - expected_lane_id_1, - OUTBOUND_LANE_UNCONGESTED_THRESHOLD, - ); - - // bridge is again opened - assert_eq!( - XcmOverBridge::bridge(bridge_1.bridge_id()).unwrap().state, - BridgeState::Opened - ); - // the router is uncongested - assert!(!router_bridge_state().is_congested); - assert!(TestLocalXcmChannelManager::is_bridge_resumed(bridge_1.bridge_id())); - }) - } } diff --git a/bridges/modules/xcm-bridge-hub/src/lib.rs b/bridges/modules/xcm-bridge-hub/src/lib.rs index 682db811efa7..1b2536598a20 100644 --- a/bridges/modules/xcm-bridge-hub/src/lib.rs +++ b/bridges/modules/xcm-bridge-hub/src/lib.rs @@ -145,8 +145,8 @@ use bp_messages::{LaneState, MessageNonce}; use bp_runtime::{AccountIdOf, BalanceOf, RangeInclusiveExt}; -pub use bp_xcm_bridge_hub::{Bridge, BridgeId, BridgeState, LocalXcmChannelManager}; -use bp_xcm_bridge_hub::{BridgeLocations, BridgeLocationsError}; +pub use bp_xcm_bridge_hub::{Bridge, BridgeId, BridgeState}; +use bp_xcm_bridge_hub::{BridgeLocations, BridgeLocationsError, LocalXcmChannelManager}; use frame_support::{traits::fungible::MutateHold, DefaultNoBound}; use frame_system::Config as SystemConfig; use pallet_bridge_messages::{Config as BridgeMessagesConfig, LanesManagerError}; diff --git a/bridges/modules/xcm-bridge-hub/src/mock.rs b/bridges/modules/xcm-bridge-hub/src/mock.rs index d186507dab17..9f06b99ef6d5 100644 --- a/bridges/modules/xcm-bridge-hub/src/mock.rs +++ b/bridges/modules/xcm-bridge-hub/src/mock.rs @@ -24,10 +24,10 @@ use bp_messages::{ }; use bp_runtime::{messages::MessageDispatchResult, Chain, ChainId, HashOf}; use bp_xcm_bridge_hub::{BridgeId, LocalXcmChannelManager}; -use codec::{Decode, Encode}; +use codec::Encode; use frame_support::{ assert_ok, derive_impl, parameter_types, - traits::{EnsureOrigin, Equals, Everything, Get, OriginTrait}, + traits::{EnsureOrigin, Equals, Everything, OriginTrait}, weights::RuntimeDbWeight, }; use polkadot_parachain_primitives::primitives::Sibling; @@ -44,7 +44,7 @@ use xcm_builder::{ InspectMessageQueues, NetworkExportTable, NetworkExportTableItem, ParentIsPreset, SiblingParachainConvertsVia, }; -use xcm_executor::{traits::ConvertOrigin, XcmExecutor}; +use xcm_executor::XcmExecutor; pub type AccountId = AccountId32; pub type Balance = u64; @@ -63,7 +63,7 @@ frame_support::construct_runtime! { Balances: pallet_balances::{Pallet, Event}, Messages: pallet_bridge_messages::{Pallet, Call, Event}, XcmOverBridge: pallet_xcm_bridge_hub::{Pallet, Call, HoldReason, Event}, - XcmOverBridgeWrappedWithExportMessageRouter: pallet_xcm_bridge_hub_router = 57, + XcmOverBridgeRouter: pallet_xcm_bridge_hub_router, } } @@ -208,27 +208,17 @@ impl pallet_xcm_bridge_hub::Config for TestRuntime { type BlobDispatcher = TestBlobDispatcher; } -/// A router instance simulates a scenario where the router is deployed on a different chain than -/// the `MessageExporter`. This means that the router sends an `ExportMessage`. -pub type XcmOverBridgeWrappedWithExportMessageRouterInstance = (); -impl pallet_xcm_bridge_hub_router::Config - for TestRuntime -{ +impl pallet_xcm_bridge_hub_router::Config<()> for TestRuntime { type RuntimeEvent = RuntimeEvent; type WeightInfo = (); - type UniversalLocation = ExportMessageOriginUniversalLocation; + type UniversalLocation = UniversalLocation; type SiblingBridgeHubLocation = BridgeHubLocation; type BridgedNetworkId = BridgedRelayNetwork; type Bridges = NetworkExportTable; type DestinationVersion = AlwaysLatest; - // We convert to root `here` location with `BridgeHubLocationXcmOriginAsRoot` - type BridgeHubOrigin = frame_system::EnsureRoot; - // **Note**: The crucial part is that `ExportMessage` is processed by `XcmExecutor`, which - // calls the `ExportXcm` implementation of `pallet_xcm_bridge_hub` as the - // `MessageExporter`. - type ToBridgeHubSender = ExecuteXcmOverSendXcm; + type ToBridgeHubSender = TestExportXcmWithXcmOverBridge; type LocalXcmChannelManager = TestLocalXcmChannelManager; type ByteFee = ConstU128<0>; @@ -240,7 +230,7 @@ impl xcm_executor::Config for XcmConfig { type RuntimeCall = RuntimeCall; type XcmSender = (); type AssetTransactor = (); - type OriginConverter = BridgeHubLocationXcmOriginAsRoot; + type OriginConverter = (); type IsReserve = (); type IsTeleporter = (); type UniversalLocation = UniversalLocation; @@ -280,8 +270,8 @@ thread_local! { /// /// Note: The crucial part is that `ExportMessage` is processed by `XcmExecutor`, which calls the /// `ExportXcm` implementation of `pallet_xcm_bridge_hub` as `MessageExporter`. -pub struct ExecuteXcmOverSendXcm; -impl SendXcm for ExecuteXcmOverSendXcm { +pub struct TestExportXcmWithXcmOverBridge; +impl SendXcm for TestExportXcmWithXcmOverBridge { type Ticket = Xcm<()>; fn validate( @@ -308,7 +298,7 @@ impl SendXcm for ExecuteXcmOverSendXcm { Ok(hash) } } -impl InspectMessageQueues for ExecuteXcmOverSendXcm { +impl InspectMessageQueues for TestExportXcmWithXcmOverBridge { fn clear_messages() { todo!() } @@ -317,51 +307,12 @@ impl InspectMessageQueues for ExecuteXcmOverSendXcm { todo!() } } -impl ExecuteXcmOverSendXcm { +impl TestExportXcmWithXcmOverBridge { pub fn set_origin_for_execute(origin: Location) { EXECUTE_XCM_ORIGIN.with(|o| *o.borrow_mut() = Some(origin)); } } -/// A dynamic way to set different universal location for the origin which sends `ExportMessage`. -pub struct ExportMessageOriginUniversalLocation; -impl ExportMessageOriginUniversalLocation { - pub(crate) fn set(universal_location: Option) { - EXPORT_MESSAGE_ORIGIN_UNIVERSAL_LOCATION.with(|o| *o.borrow_mut() = universal_location); - } -} -impl Get for ExportMessageOriginUniversalLocation { - fn get() -> InteriorLocation { - EXPORT_MESSAGE_ORIGIN_UNIVERSAL_LOCATION.with(|o| { - o.borrow() - .clone() - .expect("`EXPORT_MESSAGE_ORIGIN_UNIVERSAL_LOCATION` is not set!") - }) - } -} -thread_local! { - pub static EXPORT_MESSAGE_ORIGIN_UNIVERSAL_LOCATION: RefCell> = RefCell::new(None); -} - -pub struct BridgeHubLocationXcmOriginAsRoot( - sp_std::marker::PhantomData, -); -impl ConvertOrigin - for BridgeHubLocationXcmOriginAsRoot -{ - fn convert_origin( - origin: impl Into, - kind: OriginKind, - ) -> Result { - let origin = origin.into(); - if kind == OriginKind::Xcm && origin.eq(&BridgeHubLocation::get()) { - Ok(RuntimeOrigin::root()) - } else { - Err(origin) - } - } -} - /// Type for specifying how a `Location` can be converted into an `AccountId`. This is used /// when determining ownership of accounts for asset transacting and when attempting to use XCM /// `Transact` in order to determine the dispatch Origin. @@ -445,9 +396,6 @@ impl EnsureOrigin for OpenBridgeOrigin { } } -pub(crate) type OpenBridgeOriginOf = - >::OpenBridgeOrigin; - pub struct TestLocalXcmChannelManager; impl TestLocalXcmChannelManager { @@ -455,82 +403,30 @@ impl TestLocalXcmChannelManager { frame_support::storage::unhashed::put(b"TestLocalXcmChannelManager.Congested", &true); } - fn suspended_key(bridge: &BridgeId) -> Vec { - [b"TestLocalXcmChannelManager.Suspended", bridge.encode().as_slice()].concat() - } - fn resumed_key(bridge: &BridgeId) -> Vec { - [b"TestLocalXcmChannelManager.Resumed", bridge.encode().as_slice()].concat() - } - - pub fn is_bridge_suspended(bridge: &BridgeId) -> bool { - frame_support::storage::unhashed::get_or_default(&Self::suspended_key(bridge)) + pub fn is_bridge_suspened() -> bool { + frame_support::storage::unhashed::get_or_default(b"TestLocalXcmChannelManager.Suspended") } - pub fn is_bridge_resumed(bridge: &BridgeId) -> bool { - frame_support::storage::unhashed::get_or_default(&Self::resumed_key(bridge)) - } - - fn build_congestion_message(bridge: &BridgeId, is_congested: bool) -> Vec> { - use bp_xcm_bridge_hub_router::XcmBridgeHubRouterCall; - #[allow(clippy::large_enum_variant)] - #[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, scale_info::TypeInfo)] - enum Call { - #[codec(index = 57)] - XcmOverBridgeWrappedWithExportMessageRouter(XcmBridgeHubRouterCall), - } - - sp_std::vec![ - UnpaidExecution { weight_limit: Unlimited, check_origin: None }, - Transact { - origin_kind: OriginKind::Xcm, - fallback_max_weight: None, - call: Call::XcmOverBridgeWrappedWithExportMessageRouter( - XcmBridgeHubRouterCall::report_bridge_status { - bridge_id: bridge.inner(), - is_congested, - } - ) - .encode() - .into(), - }, - ExpectTransactStatus(MaybeErrorCode::Success), - ] - } - - fn report_bridge_status( - local_origin: &Location, - bridge: &BridgeId, - is_congested: bool, - key: Vec, - ) -> Result<(), SendError> { - // send as BridgeHub would send to sibling chain - ExecuteXcmOverSendXcm::set_origin_for_execute(BridgeHubLocation::get()); - let result = send_xcm::( - local_origin.clone(), - Self::build_congestion_message(&bridge, is_congested).into(), - ); - - if result.is_ok() { - frame_support::storage::unhashed::put(&key, &true); - } - - result.map(|_| ()) + pub fn is_bridge_resumed() -> bool { + frame_support::storage::unhashed::get_or_default(b"TestLocalXcmChannelManager.Resumed") } } impl LocalXcmChannelManager for TestLocalXcmChannelManager { - type Error = SendError; + type Error = (); fn is_congested(_with: &Location) -> bool { frame_support::storage::unhashed::get_or_default(b"TestLocalXcmChannelManager.Congested") } - fn suspend_bridge(local_origin: &Location, bridge: BridgeId) -> Result<(), Self::Error> { - Self::report_bridge_status(local_origin, &bridge, true, Self::suspended_key(&bridge)) + fn suspend_bridge(_local_origin: &Location, _bridge: BridgeId) -> Result<(), Self::Error> { + frame_support::storage::unhashed::put(b"TestLocalXcmChannelManager.Suspended", &true); + Ok(()) } - fn resume_bridge(local_origin: &Location, bridge: BridgeId) -> Result<(), Self::Error> { - Self::report_bridge_status(local_origin, &bridge, false, Self::resumed_key(&bridge)) + fn resume_bridge(_local_origin: &Location, _bridge: BridgeId) -> Result<(), Self::Error> { + frame_support::storage::unhashed::put(b"TestLocalXcmChannelManager.Resumed", &true); + Ok(()) } } diff --git a/bridges/primitives/beefy/Cargo.toml b/bridges/primitives/beefy/Cargo.toml index b32cf1e407eb..404acaff30af 100644 --- a/bridges/primitives/beefy/Cargo.toml +++ b/bridges/primitives/beefy/Cargo.toml @@ -23,10 +23,10 @@ bp-runtime = { workspace = true } # Substrate Dependencies binary-merkle-tree = { workspace = true } +sp-consensus-beefy = { workspace = true } frame-support = { workspace = true } pallet-beefy-mmr = { workspace = true } pallet-mmr = { workspace = true } -sp-consensus-beefy = { workspace = true } sp-runtime = { workspace = true } sp-std = { workspace = true } diff --git a/bridges/primitives/header-chain/Cargo.toml b/bridges/primitives/header-chain/Cargo.toml index b17dcb2f7491..081bda479495 100644 --- a/bridges/primitives/header-chain/Cargo.toml +++ b/bridges/primitives/header-chain/Cargo.toml @@ -23,8 +23,8 @@ bp-runtime = { workspace = true } # Substrate Dependencies frame-support = { workspace = true } -sp-consensus-grandpa = { features = ["serde"], workspace = true } sp-core = { features = ["serde"], workspace = true } +sp-consensus-grandpa = { features = ["serde"], workspace = true } sp-runtime = { features = ["serde"], workspace = true } sp-std = { workspace = true } diff --git a/bridges/primitives/messages/Cargo.toml b/bridges/primitives/messages/Cargo.toml index dd1bd083371f..87c8cbe88180 100644 --- a/bridges/primitives/messages/Cargo.toml +++ b/bridges/primitives/messages/Cargo.toml @@ -16,19 +16,19 @@ scale-info = { features = ["bit-vec", "derive"], workspace = true } serde = { features = ["alloc", "derive"], workspace = true } # Bridge dependencies -bp-header-chain = { workspace = true } bp-runtime = { workspace = true } +bp-header-chain = { workspace = true } # Substrate Dependencies frame-support = { workspace = true } sp-core = { workspace = true } -sp-io = { workspace = true } sp-std = { workspace = true } +sp-io = { workspace = true } [dev-dependencies] -bp-runtime = { workspace = true } hex = { workspace = true, default-features = true } hex-literal = { workspace = true, default-features = true } +bp-runtime = { workspace = true } [features] default = ["std"] diff --git a/bridges/primitives/relayers/Cargo.toml b/bridges/primitives/relayers/Cargo.toml index 9219bae1e131..34be38bed4ac 100644 --- a/bridges/primitives/relayers/Cargo.toml +++ b/bridges/primitives/relayers/Cargo.toml @@ -21,8 +21,8 @@ bp-parachains = { workspace = true } bp-runtime = { workspace = true } # Substrate Dependencies -frame-support = { workspace = true } frame-system = { workspace = true } +frame-support = { workspace = true } pallet-utility = { workspace = true } sp-runtime = { workspace = true } sp-std = { workspace = true } diff --git a/bridges/primitives/xcm-bridge-hub-router/Cargo.toml b/bridges/primitives/xcm-bridge-hub-router/Cargo.toml index b8a21ec35024..ba0c51152bd2 100644 --- a/bridges/primitives/xcm-bridge-hub-router/Cargo.toml +++ b/bridges/primitives/xcm-bridge-hub-router/Cargo.toml @@ -15,8 +15,8 @@ codec = { features = ["bit-vec", "derive"], workspace = true } scale-info = { features = ["bit-vec", "derive"], workspace = true } # Substrate Dependencies -sp-core = { workspace = true } sp-runtime = { workspace = true } +sp-core = { workspace = true } # Polkadot Dependencies xcm = { workspace = true } diff --git a/bridges/primitives/xcm-bridge-hub/Cargo.toml b/bridges/primitives/xcm-bridge-hub/Cargo.toml index 800e2a3da3a3..79201a8756f9 100644 --- a/bridges/primitives/xcm-bridge-hub/Cargo.toml +++ b/bridges/primitives/xcm-bridge-hub/Cargo.toml @@ -20,10 +20,10 @@ bp-messages = { workspace = true } bp-runtime = { workspace = true } # Substrate Dependencies -frame-support = { workspace = true } -sp-core = { workspace = true } -sp-io = { workspace = true } sp-std = { workspace = true } +sp-io = { workspace = true } +sp-core = { workspace = true } +frame-support = { workspace = true } # Polkadot Dependencies xcm = { workspace = true } diff --git a/bridges/primitives/xcm-bridge-hub/src/lib.rs b/bridges/primitives/xcm-bridge-hub/src/lib.rs index 471cf402c34f..63beb1bc3041 100644 --- a/bridges/primitives/xcm-bridge-hub/src/lib.rs +++ b/bridges/primitives/xcm-bridge-hub/src/lib.rs @@ -87,11 +87,6 @@ impl BridgeId { .into(), ) } - - /// Access the inner representation. - pub fn inner(&self) -> H256 { - self.0 - } } impl core::fmt::Debug for BridgeId { diff --git a/bridges/relays/client-substrate/Cargo.toml b/bridges/relays/client-substrate/Cargo.toml index 6a59688b2d8c..6065c23773e3 100644 --- a/bridges/relays/client-substrate/Cargo.toml +++ b/bridges/relays/client-substrate/Cargo.toml @@ -18,16 +18,16 @@ futures = { workspace = true } jsonrpsee = { features = ["macros", "ws-client"], workspace = true } log = { workspace = true } num-traits = { workspace = true, default-features = true } -quick_cache = { workspace = true } rand = { workspace = true, default-features = true } +serde_json = { workspace = true } scale-info = { features = [ "derive", ], workspace = true, default-features = true } -serde_json = { workspace = true } -thiserror = { workspace = true } tokio = { features = [ "rt-multi-thread", ], workspace = true, default-features = true } +thiserror = { workspace = true } +quick_cache = { workspace = true } # Bridge dependencies diff --git a/bridges/relays/lib-substrate-relay/Cargo.toml b/bridges/relays/lib-substrate-relay/Cargo.toml index b418a2a3abb8..b0f93e5b5485 100644 --- a/bridges/relays/lib-substrate-relay/Cargo.toml +++ b/bridges/relays/lib-substrate-relay/Cargo.toml @@ -32,29 +32,29 @@ bp-relayers = { workspace = true, default-features = true } equivocation-detector = { workspace = true } finality-relay = { workspace = true } -messages-relay = { workspace = true } parachains-relay = { workspace = true } -relay-substrate-client = { workspace = true } relay-utils = { workspace = true } +messages-relay = { workspace = true } +relay-substrate-client = { workspace = true } pallet-bridge-grandpa = { workspace = true, default-features = true } pallet-bridge-messages = { workspace = true, default-features = true } pallet-bridge-parachains = { workspace = true, default-features = true } -bp-messages = { workspace = true, default-features = true } bp-runtime = { workspace = true, default-features = true } +bp-messages = { workspace = true, default-features = true } # Substrate Dependencies frame-support = { workspace = true, default-features = true } frame-system = { workspace = true, default-features = true } pallet-balances = { workspace = true, default-features = true } pallet-grandpa = { workspace = true, default-features = true } -sp-consensus-grandpa = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } +sp-consensus-grandpa = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } sp-trie = { workspace = true } [dev-dependencies] +scale-info = { features = ["derive"], workspace = true } pallet-transaction-payment = { workspace = true, default-features = true } relay-substrate-client = { features = ["test-helpers"], workspace = true } -scale-info = { features = ["derive"], workspace = true } diff --git a/bridges/relays/utils/Cargo.toml b/bridges/relays/utils/Cargo.toml index 8592ca780eaa..4c25566607dc 100644 --- a/bridges/relays/utils/Cargo.toml +++ b/bridges/relays/utils/Cargo.toml @@ -16,18 +16,18 @@ async-std = { workspace = true } async-trait = { workspace = true } backoff = { workspace = true } console = { workspace = true } -futures = { workspace = true } isahc = { workspace = true } +sp-tracing = { workspace = true, default-features = true } +futures = { workspace = true } jsonpath_lib = { workspace = true } log = { workspace = true } num-traits = { workspace = true, default-features = true } parking_lot = { workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } -sp-tracing = { workspace = true, default-features = true } sysinfo = { workspace = true } -thiserror = { workspace = true } time = { features = ["formatting", "local-offset", "std"], workspace = true } tokio = { features = ["rt"], workspace = true, default-features = true } +thiserror = { workspace = true } # Bridge dependencies @@ -35,5 +35,5 @@ bp-runtime = { workspace = true, default-features = true } # Substrate dependencies -prometheus-endpoint = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } +prometheus-endpoint = { workspace = true, default-features = true } diff --git a/bridges/relays/utils/src/initialize.rs b/bridges/relays/utils/src/initialize.rs index deb9b9d059d5..564ed1f0e5cc 100644 --- a/bridges/relays/utils/src/initialize.rs +++ b/bridges/relays/utils/src/initialize.rs @@ -52,10 +52,9 @@ pub fn initialize_logger(with_timestamp: bool) { format, ); - let env_filter = EnvFilter::builder() - .with_default_directive(Level::WARN.into()) - .with_default_directive("bridge=info".parse().expect("static filter string is valid")) - .from_env_lossy(); + let env_filter = EnvFilter::from_default_env() + .add_directive(Level::WARN.into()) + .add_directive("bridge=info".parse().expect("static filter string is valid")); let builder = SubscriberBuilder::default().with_env_filter(env_filter); diff --git a/bridges/snowbridge/pallets/ethereum-client/Cargo.toml b/bridges/snowbridge/pallets/ethereum-client/Cargo.toml index ebd8a1c6ed11..262d9a7f380d 100644 --- a/bridges/snowbridge/pallets/ethereum-client/Cargo.toml +++ b/bridges/snowbridge/pallets/ethereum-client/Cargo.toml @@ -15,37 +15,37 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] +serde = { optional = true, workspace = true, default-features = true } +serde_json = { optional = true, workspace = true, default-features = true } codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } hex-literal = { optional = true, workspace = true, default-features = true } log = { workspace = true } -scale-info = { features = ["derive"], workspace = true } -serde = { optional = true, workspace = true, default-features = true } -serde_json = { optional = true, workspace = true, default-features = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } sp-core = { workspace = true } -sp-io = { optional = true, workspace = true } -sp-runtime = { workspace = true } sp-std = { workspace = true } +sp-runtime = { workspace = true } +sp-io = { optional = true, workspace = true } -pallet-timestamp = { optional = true, workspace = true } -snowbridge-beacon-primitives = { workspace = true } snowbridge-core = { workspace = true } snowbridge-ethereum = { workspace = true } snowbridge-pallet-ethereum-client-fixtures = { optional = true, workspace = true } +snowbridge-beacon-primitives = { workspace = true } static_assertions = { workspace = true } +pallet-timestamp = { optional = true, workspace = true } [dev-dependencies] -hex-literal = { workspace = true, default-features = true } -pallet-timestamp = { workspace = true, default-features = true } rand = { workspace = true, default-features = true } -serde = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } +hex-literal = { workspace = true, default-features = true } +pallet-timestamp = { workspace = true, default-features = true } snowbridge-pallet-ethereum-client-fixtures = { workspace = true, default-features = true } sp-io = { workspace = true, default-features = true } -sp-keyring = { workspace = true, default-features = true } +serde = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/bridges/snowbridge/pallets/ethereum-client/fixtures/Cargo.toml b/bridges/snowbridge/pallets/ethereum-client/fixtures/Cargo.toml index 74bfe580ec36..87f0cf9a5513 100644 --- a/bridges/snowbridge/pallets/ethereum-client/fixtures/Cargo.toml +++ b/bridges/snowbridge/pallets/ethereum-client/fixtures/Cargo.toml @@ -16,10 +16,10 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] hex-literal = { workspace = true, default-features = true } -snowbridge-beacon-primitives = { workspace = true } -snowbridge-core = { workspace = true } sp-core = { workspace = true } sp-std = { workspace = true } +snowbridge-core = { workspace = true } +snowbridge-beacon-primitives = { workspace = true } [features] default = ["std"] diff --git a/bridges/snowbridge/pallets/inbound-queue/Cargo.toml b/bridges/snowbridge/pallets/inbound-queue/Cargo.toml index 5d4e8ad67662..1b08bb39b434 100644 --- a/bridges/snowbridge/pallets/inbound-queue/Cargo.toml +++ b/bridges/snowbridge/pallets/inbound-queue/Cargo.toml @@ -15,40 +15,42 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -alloy-core = { workspace = true, features = ["sol-types"] } +serde = { optional = true, workspace = true, default-features = true } codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } hex-literal = { optional = true, workspace = true, default-features = true } log = { workspace = true } -scale-info = { features = ["derive"], workspace = true } -serde = { optional = true, workspace = true, default-features = true } +alloy-primitives = { features = ["rlp"], workspace = true } +alloy-sol-types = { workspace = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } pallet-balances = { workspace = true } sp-core = { workspace = true } +sp-std = { workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } -sp-std = { workspace = true } xcm = { workspace = true } xcm-executor = { workspace = true } -snowbridge-beacon-primitives = { workspace = true } snowbridge-core = { workspace = true } -snowbridge-pallet-inbound-queue-fixtures = { optional = true, workspace = true } snowbridge-router-primitives = { workspace = true } +snowbridge-beacon-primitives = { workspace = true } +snowbridge-pallet-inbound-queue-fixtures = { optional = true, workspace = true } [dev-dependencies] frame-benchmarking = { workspace = true, default-features = true } -hex-literal = { workspace = true, default-features = true } -snowbridge-pallet-ethereum-client = { workspace = true, default-features = true } sp-keyring = { workspace = true, default-features = true } +snowbridge-pallet-ethereum-client = { workspace = true, default-features = true } +hex-literal = { workspace = true, default-features = true } [features] default = ["std"] std = [ - "alloy-core/std", + "alloy-primitives/std", + "alloy-sol-types/std", "codec/std", "frame-benchmarking/std", "frame-support/std", @@ -81,7 +83,6 @@ runtime-benchmarks = [ "snowbridge-router-primitives/runtime-benchmarks", "sp-runtime/runtime-benchmarks", "xcm-executor/runtime-benchmarks", - "xcm/runtime-benchmarks", ] try-runtime = [ "frame-support/try-runtime", diff --git a/bridges/snowbridge/pallets/inbound-queue/fixtures/Cargo.toml b/bridges/snowbridge/pallets/inbound-queue/fixtures/Cargo.toml index c698dbbf1003..6162a17728b6 100644 --- a/bridges/snowbridge/pallets/inbound-queue/fixtures/Cargo.toml +++ b/bridges/snowbridge/pallets/inbound-queue/fixtures/Cargo.toml @@ -16,10 +16,10 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] hex-literal = { workspace = true, default-features = true } -snowbridge-beacon-primitives = { workspace = true } -snowbridge-core = { workspace = true } sp-core = { workspace = true } sp-std = { workspace = true } +snowbridge-core = { workspace = true } +snowbridge-beacon-primitives = { workspace = true } [features] default = ["std"] diff --git a/bridges/snowbridge/pallets/inbound-queue/src/envelope.rs b/bridges/snowbridge/pallets/inbound-queue/src/envelope.rs index d213c8aad648..31a8992442d8 100644 --- a/bridges/snowbridge/pallets/inbound-queue/src/envelope.rs +++ b/bridges/snowbridge/pallets/inbound-queue/src/envelope.rs @@ -5,7 +5,8 @@ use snowbridge_core::{inbound::Log, ChannelId}; use sp_core::{RuntimeDebug, H160, H256}; use sp_std::prelude::*; -use alloy_core::{primitives::B256, sol, sol_types::SolEvent}; +use alloy_primitives::B256; +use alloy_sol_types::{sol, SolEvent}; sol! { event OutboundMessageAccepted(bytes32 indexed channel_id, uint64 nonce, bytes32 indexed message_id, bytes payload); @@ -35,7 +36,7 @@ impl TryFrom<&Log> for Envelope { fn try_from(log: &Log) -> Result { let topics: Vec = log.topics.iter().map(|x| B256::from_slice(x.as_ref())).collect(); - let event = OutboundMessageAccepted::decode_raw_log(topics, &log.data, true) + let event = OutboundMessageAccepted::decode_log(topics, &log.data, true) .map_err(|_| EnvelopeDecodeError)?; Ok(Self { @@ -43,7 +44,7 @@ impl TryFrom<&Log> for Envelope { channel_id: ChannelId::from(event.channel_id.as_ref()), nonce: event.nonce, message_id: H256::from(event.message_id.as_ref()), - payload: event.payload.into(), + payload: event.payload, }) } } diff --git a/bridges/snowbridge/pallets/inbound-queue/src/mock.rs b/bridges/snowbridge/pallets/inbound-queue/src/mock.rs index eed0656e9ca7..675d4b691593 100644 --- a/bridges/snowbridge/pallets/inbound-queue/src/mock.rs +++ b/bridges/snowbridge/pallets/inbound-queue/src/mock.rs @@ -248,6 +248,20 @@ impl inbound_queue::Config for Test { type AssetTransactor = SuccessfulTransactor; } +pub fn last_events(n: usize) -> Vec { + frame_system::Pallet::::events() + .into_iter() + .rev() + .take(n) + .rev() + .map(|e| e.event) + .collect() +} + +pub fn expect_events(e: Vec) { + assert_eq!(last_events(e.len()), e); +} + pub fn setup() { System::set_block_number(1); Balances::mint_into( diff --git a/bridges/snowbridge/pallets/inbound-queue/src/test.rs b/bridges/snowbridge/pallets/inbound-queue/src/test.rs index aa99d63b4bf9..76d0b98e9eb4 100644 --- a/bridges/snowbridge/pallets/inbound-queue/src/test.rs +++ b/bridges/snowbridge/pallets/inbound-queue/src/test.rs @@ -5,11 +5,11 @@ use super::*; use frame_support::{assert_noop, assert_ok}; use hex_literal::hex; use snowbridge_core::{inbound::Proof, ChannelId}; -use sp_keyring::Sr25519Keyring as Keyring; +use sp_keyring::AccountKeyring as Keyring; use sp_runtime::DispatchError; use sp_std::convert::From; -use crate::Error; +use crate::{Error, Event as InboundQueueEvent}; use crate::mock::*; @@ -35,16 +35,17 @@ fn test_submit_happy_path() { assert_eq!(Balances::balance(&channel_sovereign), initial_fund); assert_ok!(InboundQueue::submit(origin.clone(), message.clone())); - - let events = frame_system::Pallet::::events(); - assert!( - events.iter().any(|event| matches!( - event.event, - RuntimeEvent::InboundQueue(Event::MessageReceived { nonce, ..}) - if nonce == 1 - )), - "no event emit." - ); + expect_events(vec![InboundQueueEvent::MessageReceived { + channel_id: hex!("c173fac324158e77fb5840738a1a541f633cbec8884c6a601c567d2b376a0539") + .into(), + nonce: 1, + message_id: [ + 11, 25, 133, 51, 23, 68, 111, 211, 132, 94, 254, 17, 194, 252, 198, 233, 10, 193, + 156, 93, 72, 140, 65, 69, 79, 155, 154, 28, 141, 166, 171, 255, + ], + fee_burned: 110000000000, + } + .into()]); let delivery_cost = InboundQueue::calculate_delivery_cost(message.encode().len() as u32); assert!( diff --git a/bridges/snowbridge/pallets/outbound-queue/Cargo.toml b/bridges/snowbridge/pallets/outbound-queue/Cargo.toml index f4910e6e6457..78546e258daa 100644 --- a/bridges/snowbridge/pallets/outbound-queue/Cargo.toml +++ b/bridges/snowbridge/pallets/outbound-queue/Cargo.toml @@ -15,24 +15,24 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] +serde = { features = ["alloc", "derive"], workspace = true } codec = { features = ["derive"], workspace = true } scale-info = { features = ["derive"], workspace = true } -serde = { features = ["alloc", "derive"], workspace = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } -sp-arithmetic = { workspace = true } sp-core = { workspace = true } -sp-io = { workspace = true } -sp-runtime = { workspace = true } sp-std = { workspace = true } +sp-runtime = { workspace = true } +sp-io = { workspace = true } +sp-arithmetic = { workspace = true } bridge-hub-common = { workspace = true } -ethabi = { workspace = true } snowbridge-core = { features = ["serde"], workspace = true } snowbridge-outbound-queue-merkle-tree = { workspace = true } +ethabi = { workspace = true } [dev-dependencies] pallet-message-queue = { workspace = true } diff --git a/bridges/snowbridge/pallets/outbound-queue/merkle-tree/Cargo.toml b/bridges/snowbridge/pallets/outbound-queue/merkle-tree/Cargo.toml index 2a0616b4f954..16241428df80 100644 --- a/bridges/snowbridge/pallets/outbound-queue/merkle-tree/Cargo.toml +++ b/bridges/snowbridge/pallets/outbound-queue/merkle-tree/Cargo.toml @@ -22,9 +22,9 @@ sp-core = { workspace = true } sp-runtime = { workspace = true } [dev-dependencies] -array-bytes = { workspace = true, default-features = true } -hex = { workspace = true, default-features = true } hex-literal = { workspace = true, default-features = true } +hex = { workspace = true, default-features = true } +array-bytes = { workspace = true, default-features = true } sp-crypto-hashing = { workspace = true, default-features = true } sp-tracing = { workspace = true, default-features = true } diff --git a/bridges/snowbridge/pallets/outbound-queue/runtime-api/Cargo.toml b/bridges/snowbridge/pallets/outbound-queue/runtime-api/Cargo.toml index 18f7dde22c93..d35bdde5a81e 100644 --- a/bridges/snowbridge/pallets/outbound-queue/runtime-api/Cargo.toml +++ b/bridges/snowbridge/pallets/outbound-queue/runtime-api/Cargo.toml @@ -16,11 +16,11 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { features = ["derive"], workspace = true } +sp-std = { workspace = true } +sp-api = { workspace = true } frame-support = { workspace = true } -snowbridge-core = { workspace = true } snowbridge-outbound-queue-merkle-tree = { workspace = true } -sp-api = { workspace = true } -sp-std = { workspace = true } +snowbridge-core = { workspace = true } [features] default = ["std"] diff --git a/bridges/snowbridge/pallets/system/Cargo.toml b/bridges/snowbridge/pallets/system/Cargo.toml index 3544925956b4..f1e749afb997 100644 --- a/bridges/snowbridge/pallets/system/Cargo.toml +++ b/bridges/snowbridge/pallets/system/Cargo.toml @@ -18,16 +18,16 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { features = [ "derive", ], workspace = true } +scale-info = { features = ["derive"], workspace = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } log = { workspace = true } -scale-info = { features = ["derive"], workspace = true } sp-core = { workspace = true } +sp-std = { workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } -sp-std = { workspace = true } xcm = { workspace = true } xcm-executor = { workspace = true } @@ -38,10 +38,10 @@ snowbridge-core = { workspace = true } hex = { workspace = true, default-features = true } hex-literal = { workspace = true, default-features = true } pallet-balances = { workspace = true, default-features = true } -pallet-message-queue = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } polkadot-primitives = { workspace = true, default-features = true } +pallet-message-queue = { workspace = true, default-features = true } snowbridge-pallet-outbound-queue = { workspace = true, default-features = true } -sp-keyring = { workspace = true, default-features = true } [features] default = ["std"] @@ -71,7 +71,6 @@ runtime-benchmarks = [ "snowbridge-pallet-outbound-queue/runtime-benchmarks", "sp-runtime/runtime-benchmarks", "xcm-executor/runtime-benchmarks", - "xcm/runtime-benchmarks", ] try-runtime = [ "frame-support/try-runtime", diff --git a/bridges/snowbridge/pallets/system/runtime-api/Cargo.toml b/bridges/snowbridge/pallets/system/runtime-api/Cargo.toml index fc377b460d33..7c524dd2edad 100644 --- a/bridges/snowbridge/pallets/system/runtime-api/Cargo.toml +++ b/bridges/snowbridge/pallets/system/runtime-api/Cargo.toml @@ -18,10 +18,10 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { features = [ "derive", ], workspace = true } -snowbridge-core = { workspace = true } -sp-api = { workspace = true } sp-std = { workspace = true } +sp-api = { workspace = true } xcm = { workspace = true } +snowbridge-core = { workspace = true } [features] default = ["std"] diff --git a/bridges/snowbridge/primitives/beacon/Cargo.toml b/bridges/snowbridge/primitives/beacon/Cargo.toml index bf5d6838f7bb..9ced99fbf3fd 100644 --- a/bridges/snowbridge/primitives/beacon/Cargo.toml +++ b/bridges/snowbridge/primitives/beacon/Cargo.toml @@ -12,24 +12,24 @@ categories = ["cryptography::cryptocurrencies"] workspace = true [dependencies] -codec = { workspace = true } +serde = { optional = true, features = ["derive"], workspace = true, default-features = true } hex = { workspace = true } -rlp = { workspace = true } +codec = { workspace = true } scale-info = { features = ["derive"], workspace = true } -serde = { optional = true, features = ["derive"], workspace = true, default-features = true } +rlp = { workspace = true } frame-support = { workspace = true } -sp-core = { workspace = true } -sp-io = { workspace = true } sp-runtime = { workspace = true } +sp-core = { workspace = true } sp-std = { workspace = true } +sp-io = { workspace = true } -byte-slice-cast = { workspace = true } ssz_rs = { workspace = true } ssz_rs_derive = { workspace = true } +byte-slice-cast = { workspace = true } -milagro-bls = { workspace = true } snowbridge-ethereum = { workspace = true } +milagro-bls = { workspace = true } [dev-dependencies] hex-literal = { workspace = true, default-features = true } diff --git a/bridges/snowbridge/primitives/core/Cargo.toml b/bridges/snowbridge/primitives/core/Cargo.toml index 514579400aca..fa37c795b2d1 100644 --- a/bridges/snowbridge/primitives/core/Cargo.toml +++ b/bridges/snowbridge/primitives/core/Cargo.toml @@ -12,10 +12,10 @@ categories = ["cryptography::cryptocurrencies"] workspace = true [dependencies] +serde = { optional = true, features = ["alloc", "derive"], workspace = true } codec = { workspace = true } -hex-literal = { workspace = true, default-features = true } scale-info = { features = ["derive"], workspace = true } -serde = { optional = true, features = ["alloc", "derive"], workspace = true } +hex-literal = { workspace = true, default-features = true } polkadot-parachain-primitives = { workspace = true } xcm = { workspace = true } @@ -23,11 +23,11 @@ xcm-builder = { workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } -sp-arithmetic = { workspace = true } -sp-core = { workspace = true } -sp-io = { workspace = true } sp-runtime = { workspace = true } sp-std = { workspace = true } +sp-io = { workspace = true } +sp-core = { workspace = true } +sp-arithmetic = { workspace = true } snowbridge-beacon-primitives = { workspace = true } @@ -64,5 +64,4 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", - "xcm/runtime-benchmarks", ] diff --git a/bridges/snowbridge/primitives/ethereum/Cargo.toml b/bridges/snowbridge/primitives/ethereum/Cargo.toml index 44ea2d0d222b..764ce90b8139 100644 --- a/bridges/snowbridge/primitives/ethereum/Cargo.toml +++ b/bridges/snowbridge/primitives/ethereum/Cargo.toml @@ -12,26 +12,26 @@ categories = ["cryptography::cryptocurrencies"] workspace = true [dependencies] +serde = { optional = true, features = ["derive"], workspace = true, default-features = true } +serde-big-array = { optional = true, features = ["const-generics"], workspace = true } codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } ethbloom = { workspace = true } ethereum-types = { features = ["codec", "rlp", "serialize"], workspace = true } hex-literal = { workspace = true } parity-bytes = { workspace = true } rlp = { workspace = true } -scale-info = { features = ["derive"], workspace = true } -serde = { optional = true, features = ["derive"], workspace = true, default-features = true } -serde-big-array = { optional = true, features = ["const-generics"], workspace = true } sp-io = { workspace = true } -sp-runtime = { workspace = true } sp-std = { workspace = true } +sp-runtime = { workspace = true } ethabi = { workspace = true } [dev-dependencies] +wasm-bindgen-test = { workspace = true } rand = { workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } -wasm-bindgen-test = { workspace = true } [features] default = ["std"] diff --git a/bridges/snowbridge/primitives/router/Cargo.toml b/bridges/snowbridge/primitives/router/Cargo.toml index e44cca077ef3..ee8d481cec12 100644 --- a/bridges/snowbridge/primitives/router/Cargo.toml +++ b/bridges/snowbridge/primitives/router/Cargo.toml @@ -13,8 +13,8 @@ workspace = true [dependencies] codec = { workspace = true } -log = { workspace = true } scale-info = { features = ["derive"], workspace = true } +log = { workspace = true } frame-support = { workspace = true } sp-core = { workspace = true } @@ -51,5 +51,4 @@ runtime-benchmarks = [ "snowbridge-core/runtime-benchmarks", "sp-runtime/runtime-benchmarks", "xcm-executor/runtime-benchmarks", - "xcm/runtime-benchmarks", ] diff --git a/bridges/snowbridge/primitives/router/src/inbound/mod.rs b/bridges/snowbridge/primitives/router/src/inbound/mod.rs index bc5d401cd4f7..e03560f66e24 100644 --- a/bridges/snowbridge/primitives/router/src/inbound/mod.rs +++ b/bridges/snowbridge/primitives/router/src/inbound/mod.rs @@ -279,7 +279,6 @@ where // Call create_asset on foreign assets pallet. Transact { origin_kind: OriginKind::Xcm, - fallback_max_weight: Some(Weight::from_parts(400_000_000, 8_000)), call: ( create_call_index, asset_id, @@ -358,9 +357,7 @@ where }])), // Perform a deposit reserve to send to destination chain. DepositReserveAsset { - // Send over assets and unspent fees, XCM delivery fee will be charged from - // here. - assets: Wild(AllCounted(2)), + assets: Definite(vec![dest_para_fee_asset.clone(), asset].into()), dest: Location::new(1, [Parachain(dest_para_id)]), xcm: vec![ // Buy execution on target. diff --git a/bridges/snowbridge/runtime/runtime-common/Cargo.toml b/bridges/snowbridge/runtime/runtime-common/Cargo.toml index 23cd0adf1226..d47cb3cb7101 100644 --- a/bridges/snowbridge/runtime/runtime-common/Cargo.toml +++ b/bridges/snowbridge/runtime/runtime-common/Cargo.toml @@ -12,11 +12,11 @@ categories = ["cryptography::cryptocurrencies"] workspace = true [dependencies] +log = { workspace = true } codec = { workspace = true } frame-support = { workspace = true } -log = { workspace = true } -sp-arithmetic = { workspace = true } sp-std = { workspace = true } +sp-arithmetic = { workspace = true } xcm = { workspace = true } xcm-builder = { workspace = true } xcm-executor = { workspace = true } @@ -43,5 +43,4 @@ runtime-benchmarks = [ "snowbridge-core/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", - "xcm/runtime-benchmarks", ] diff --git a/bridges/snowbridge/runtime/test-common/Cargo.toml b/bridges/snowbridge/runtime/test-common/Cargo.toml index 184a0ff2329f..6f8e586bf5ff 100644 --- a/bridges/snowbridge/runtime/test-common/Cargo.toml +++ b/bridges/snowbridge/runtime/test-common/Cargo.toml @@ -6,8 +6,6 @@ authors = ["Snowfork "] edition.workspace = true license = "Apache-2.0" categories = ["cryptography::cryptocurrencies"] -homepage.workspace = true -repository.workspace = true [lints] workspace = true @@ -19,8 +17,8 @@ codec = { features = ["derive"], workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } pallet-balances = { workspace = true } -pallet-message-queue = { workspace = true } pallet-session = { workspace = true } +pallet-message-queue = { workspace = true } pallet-timestamp = { workspace = true } pallet-utility = { workspace = true } sp-core = { workspace = true } @@ -92,6 +90,5 @@ runtime-benchmarks = [ "snowbridge-pallet-system/runtime-benchmarks", "sp-runtime/runtime-benchmarks", "xcm-executor/runtime-benchmarks", - "xcm/runtime-benchmarks", ] fast-runtime = [] diff --git a/bridges/snowbridge/runtime/test-common/src/lib.rs b/bridges/snowbridge/runtime/test-common/src/lib.rs index 5441dd822cac..dca5062ab310 100644 --- a/bridges/snowbridge/runtime/test-common/src/lib.rs +++ b/bridges/snowbridge/runtime/test-common/src/lib.rs @@ -13,7 +13,7 @@ use parachains_runtimes_test_utils::{ use snowbridge_core::{ChannelId, ParaId}; use snowbridge_pallet_ethereum_client_fixtures::*; use sp_core::{Get, H160, U256}; -use sp_keyring::Sr25519Keyring::*; +use sp_keyring::AccountKeyring::*; use sp_runtime::{traits::Header, AccountId32, DigestItem, SaturatedConversion, Saturating}; use xcm::latest::prelude::*; use xcm_executor::XcmExecutor; @@ -431,7 +431,7 @@ pub fn ethereum_extrinsic( collator_session_key: CollatorSessionKeys, runtime_para_id: u32, construct_and_apply_extrinsic: fn( - sp_keyring::Sr25519Keyring, + sp_keyring::AccountKeyring, ::RuntimeCall, ) -> sp_runtime::DispatchOutcome, ) where @@ -567,7 +567,7 @@ pub fn ethereum_to_polkadot_message_extrinsics_work( collator_session_key: CollatorSessionKeys, runtime_para_id: u32, construct_and_apply_extrinsic: fn( - sp_keyring::Sr25519Keyring, + sp_keyring::AccountKeyring, ::RuntimeCall, ) -> sp_runtime::DispatchOutcome, ) where diff --git a/bridges/testing/environments/rococo-westend/bridges_rococo_westend.sh b/bridges/testing/environments/rococo-westend/bridges_rococo_westend.sh index 321f4d9f26d0..e7848fe7163c 100755 --- a/bridges/testing/environments/rococo-westend/bridges_rococo_westend.sh +++ b/bridges/testing/environments/rococo-westend/bridges_rococo_westend.sh @@ -7,52 +7,47 @@ source "$FRAMEWORK_PATH/utils/bridges.sh" # # Generated by: # -##[test] -#fn generate_sovereign_accounts() { -# use polkadot_parachain_primitives::primitives::Sibling; -# use sp_core::crypto::Ss58Codec; -# use staging_xcm_builder::{GlobalConsensusConvertsFor, SiblingParachainConvertsVia}; -# use xcm::latest::{prelude::*, ROCOCO_GENESIS_HASH, WESTEND_GENESIS_HASH}; -# use xcm_executor::traits::ConvertLocation; +# #[test] +# fn generate_sovereign_accounts() { +# use sp_core::crypto::Ss58Codec; +# use polkadot_parachain_primitives::primitives::Sibling; # -# const Rococo: NetworkId = NetworkId::ByGenesis(ROCOCO_GENESIS_HASH); -# const Westend: NetworkId = NetworkId::ByGenesis(WESTEND_GENESIS_HASH); -# frame_support::parameter_types! { -# pub UniversalLocationAHR: InteriorLocation = [GlobalConsensus(Rococo), Parachain(1000)].into(); -# pub UniversalLocationAHW: InteriorLocation = [GlobalConsensus(Westend), Parachain(1000)].into(); -# } +# parameter_types! { +# pub UniversalLocationAHR: InteriorMultiLocation = X2(GlobalConsensus(Rococo), Parachain(1000)); +# pub UniversalLocationAHW: InteriorMultiLocation = X2(GlobalConsensus(Westend), Parachain(1000)); +# } # -# // SS58=42 -# println!("GLOBAL_CONSENSUS_ROCOCO_SOVEREIGN_ACCOUNT=\"{}\"", -# frame_support::sp_runtime::AccountId32::new( -# GlobalConsensusConvertsFor::::convert_location( -# &Location { parents: 2, interior: GlobalConsensus(Rococo).into() }).unwrap() -# ).to_ss58check_with_version(42_u16.into()) -# ); -# println!("ASSET_HUB_WESTEND_SOVEREIGN_ACCOUNT_AT_BRIDGE_HUB_WESTEND=\"{}\"", -# frame_support::sp_runtime::AccountId32::new( -# SiblingParachainConvertsVia::::convert_location( -# &Location { parents: 1, interior: Parachain(1000).into() }).unwrap() -# ).to_ss58check_with_version(42_u16.into()) -# ); +# // SS58=42 +# println!("GLOBAL_CONSENSUS_ROCOCO_SOVEREIGN_ACCOUNT=\"{}\"", +# frame_support::sp_runtime::AccountId32::new( +# GlobalConsensusConvertsFor::::convert_location( +# &MultiLocation { parents: 2, interior: X1(GlobalConsensus(Rococo)) }).unwrap() +# ).to_ss58check_with_version(42_u16.into()) +# ); +# println!("ASSET_HUB_WESTEND_SOVEREIGN_ACCOUNT_AT_BRIDGE_HUB_WESTEND=\"{}\"", +# frame_support::sp_runtime::AccountId32::new( +# SiblingParachainConvertsVia::::convert_location( +# &MultiLocation { parents: 1, interior: X1(Parachain(1000)) }).unwrap() +# ).to_ss58check_with_version(42_u16.into()) +# ); # -# // SS58=42 -# println!("GLOBAL_CONSENSUS_WESTEND_SOVEREIGN_ACCOUNT=\"{}\"", -# frame_support::sp_runtime::AccountId32::new( -# GlobalConsensusConvertsFor::::convert_location( -# &Location { parents: 2, interior: GlobalConsensus(Westend).into() }).unwrap() -# ).to_ss58check_with_version(42_u16.into()) -# ); -# println!("ASSET_HUB_ROCOCO_SOVEREIGN_ACCOUNT_AT_BRIDGE_HUB_ROCOCO=\"{}\"", -# frame_support::sp_runtime::AccountId32::new( -# SiblingParachainConvertsVia::::convert_location( -# &Location { parents: 1, interior: Parachain(1000).into() }).unwrap() -# ).to_ss58check_with_version(42_u16.into()) -# ); -#} -GLOBAL_CONSENSUS_ROCOCO_SOVEREIGN_ACCOUNT="5HmYPhRNAenHN6xnDLQDLZq71d4BgzPrdJ2sNZo8o1KXi9wr" +# // SS58=42 +# println!("GLOBAL_CONSENSUS_WESTEND_SOVEREIGN_ACCOUNT=\"{}\"", +# frame_support::sp_runtime::AccountId32::new( +# GlobalConsensusConvertsFor::::convert_location( +# &MultiLocation { parents: 2, interior: X1(GlobalConsensus(Westend)) }).unwrap() +# ).to_ss58check_with_version(42_u16.into()) +# ); +# println!("ASSET_HUB_ROCOCO_SOVEREIGN_ACCOUNT_AT_BRIDGE_HUB_ROCOCO=\"{}\"", +# frame_support::sp_runtime::AccountId32::new( +# SiblingParachainConvertsVia::::convert_location( +# &MultiLocation { parents: 1, interior: X1(Parachain(1000)) }).unwrap() +# ).to_ss58check_with_version(42_u16.into()) +# ); +# } +GLOBAL_CONSENSUS_ROCOCO_SOVEREIGN_ACCOUNT="5GxRGwT8bU1JeBPTUXc7LEjZMxNrK8MyL2NJnkWFQJTQ4sii" ASSET_HUB_WESTEND_SOVEREIGN_ACCOUNT_AT_BRIDGE_HUB_WESTEND="5Eg2fntNprdN3FgH4sfEaaZhYtddZQSQUqvYJ1f2mLtinVhV" -GLOBAL_CONSENSUS_WESTEND_SOVEREIGN_ACCOUNT="5CtHyjQE8fbPaQeBrwaGph6qsSEtnMFBAZcAkxwnEfQkkYAq" +GLOBAL_CONSENSUS_WESTEND_SOVEREIGN_ACCOUNT="5He2Qdztyxxa4GoagY6q1jaiLMmKy1gXS7PdZkhfj8ZG9hk5" ASSET_HUB_ROCOCO_SOVEREIGN_ACCOUNT_AT_BRIDGE_HUB_ROCOCO="5Eg2fntNprdN3FgH4sfEaaZhYtddZQSQUqvYJ1f2mLtinVhV" # Expected sovereign accounts for rewards on BridgeHubs. @@ -120,11 +115,7 @@ ON_BRIDGE_HUB_WESTEND_SOVEREIGN_ACCOUNT_FOR_LANE_00000002_bhro_ThisChain="5EHnXa ON_BRIDGE_HUB_WESTEND_SOVEREIGN_ACCOUNT_FOR_LANE_00000002_bhro_BridgedChain="5EHnXaT5Tnt3VGpEvc6jSgYwVToDGxLRMuYoZ8coo6GHyWbR" LANE_ID="00000002" -XCM_VERSION=5 -# 6408de7737c59c238890533af25896a2c20608d8b380bb01029acb392781063e -ROCOCO_GENESIS_HASH=[100,8,222,119,55,197,156,35,136,144,83,58,242,88,150,162,194,6,8,216,179,128,187,1,2,154,203,57,39,129,6,62] -# e143f23803ac50e8f6f8e62695d1ce9e4e1d68aa36c1cd2cfd15340213f3423e -WESTEND_GENESIS_HASH=[225,67,242,56,3,172,80,232,246,248,230,38,149,209,206,158,78,29,104,170,54,193,205,44,253,21,52,2,19,243,66,62] +XCM_VERSION=3 function init_ro_wnd() { local relayer_path=$(ensure_relayer) @@ -279,7 +270,7 @@ case "$1" in "//Alice" \ 1000 \ "ws://127.0.0.1:9910" \ - "$(jq --null-input '{ "parents": 2, "interior": { "X1": [{ "GlobalConsensus": { ByGenesis: '$WESTEND_GENESIS_HASH' } }] } }')" \ + "$(jq --null-input '{ "parents": 2, "interior": { "X1": [{ "GlobalConsensus": "Westend" }] } }')" \ "$GLOBAL_CONSENSUS_WESTEND_SOVEREIGN_ACCOUNT" \ 10000000000 \ true @@ -298,7 +289,7 @@ case "$1" in "//Alice" \ 1000 \ "ws://127.0.0.1:9910" \ - "$(jq --null-input '{ "parents": 2, "interior": { "X2": [ { "GlobalConsensus": { ByGenesis: '$WESTEND_GENESIS_HASH' } }, { "Parachain": 1000 } ] } }')" \ + "$(jq --null-input '{ "parents": 2, "interior": { "X2": [ { "GlobalConsensus": "Westend" }, { "Parachain": 1000 } ] } }')" \ $XCM_VERSION ;; init-bridge-hub-rococo-local) @@ -327,7 +318,7 @@ case "$1" in "//Alice" \ 1013 \ "ws://127.0.0.1:8943" \ - "$(jq --null-input '{ "parents": 2, "interior": { "X2": [ { "GlobalConsensus": { ByGenesis: '$WESTEND_GENESIS_HASH' } }, { "Parachain": 1002 } ] } }')" \ + "$(jq --null-input '{ "parents": 2, "interior": { "X2": [ { "GlobalConsensus": "Westend" }, { "Parachain": 1002 } ] } }')" \ $XCM_VERSION ;; init-asset-hub-westend-local) @@ -338,7 +329,7 @@ case "$1" in "//Alice" \ 1000 \ "ws://127.0.0.1:9010" \ - "$(jq --null-input '{ "parents": 2, "interior": { "X1": [{ "GlobalConsensus": { ByGenesis: '$ROCOCO_GENESIS_HASH' } }] } }')" \ + "$(jq --null-input '{ "parents": 2, "interior": { "X1": [{ "GlobalConsensus": "Rococo" }] } }')" \ "$GLOBAL_CONSENSUS_ROCOCO_SOVEREIGN_ACCOUNT" \ 10000000000 \ true @@ -357,7 +348,7 @@ case "$1" in "//Alice" \ 1000 \ "ws://127.0.0.1:9010" \ - "$(jq --null-input '{ "parents": 2, "interior": { "X2": [ { "GlobalConsensus": { ByGenesis: '$ROCOCO_GENESIS_HASH' } }, { "Parachain": 1000 } ] } }')" \ + "$(jq --null-input '{ "parents": 2, "interior": { "X2": [ { "GlobalConsensus": "Rococo" }, { "Parachain": 1000 } ] } }')" \ $XCM_VERSION ;; init-bridge-hub-westend-local) @@ -385,7 +376,7 @@ case "$1" in "//Alice" \ 1002 \ "ws://127.0.0.1:8945" \ - "$(jq --null-input '{ "parents": 2, "interior": { "X2": [ { "GlobalConsensus": { ByGenesis: '$ROCOCO_GENESIS_HASH' } }, { "Parachain": 1013 } ] } }')" \ + "$(jq --null-input '{ "parents": 2, "interior": { "X2": [ { "GlobalConsensus": "Rococo" }, { "Parachain": 1013 } ] } }')" \ $XCM_VERSION ;; reserve-transfer-assets-from-asset-hub-rococo-local) @@ -395,9 +386,9 @@ case "$1" in limited_reserve_transfer_assets \ "ws://127.0.0.1:9910" \ "//Alice" \ - "$(jq --null-input '{ "V5": { "parents": 2, "interior": { "X2": [ { "GlobalConsensus": { ByGenesis: '$WESTEND_GENESIS_HASH' } }, { "Parachain": 1000 } ] } } }')" \ - "$(jq --null-input '{ "V5": { "parents": 0, "interior": { "X1": [{ "AccountId32": { "id": [212, 53, 147, 199, 21, 253, 211, 28, 97, 20, 26, 189, 4, 169, 159, 214, 130, 44, 133, 88, 133, 76, 205, 227, 154, 86, 132, 231, 165, 109, 162, 125] } }] } } }')" \ - "$(jq --null-input '{ "V5": [ { "id": { "parents": 1, "interior": "Here" }, "fun": { "Fungible": '$amount' } } ] }')" \ + "$(jq --null-input '{ "V3": { "parents": 2, "interior": { "X2": [ { "GlobalConsensus": "Westend" }, { "Parachain": 1000 } ] } } }')" \ + "$(jq --null-input '{ "V3": { "parents": 0, "interior": { "X1": { "AccountId32": { "id": [212, 53, 147, 199, 21, 253, 211, 28, 97, 20, 26, 189, 4, 169, 159, 214, 130, 44, 133, 88, 133, 76, 205, 227, 154, 86, 132, 231, 165, 109, 162, 125] } } } } }')" \ + "$(jq --null-input '{ "V3": [ { "id": { "Concrete": { "parents": 1, "interior": "Here" } }, "fun": { "Fungible": '$amount' } } ] }')" \ 0 \ "Unlimited" ;; @@ -408,9 +399,9 @@ case "$1" in limited_reserve_transfer_assets \ "ws://127.0.0.1:9910" \ "//Alice" \ - "$(jq --null-input '{ "V5": { "parents": 2, "interior": { "X2": [ { "GlobalConsensus": { ByGenesis: '$WESTEND_GENESIS_HASH' } }, { "Parachain": 1000 } ] } } }')" \ - "$(jq --null-input '{ "V5": { "parents": 0, "interior": { "X1": [{ "AccountId32": { "id": [212, 53, 147, 199, 21, 253, 211, 28, 97, 20, 26, 189, 4, 169, 159, 214, 130, 44, 133, 88, 133, 76, 205, 227, 154, 86, 132, 231, 165, 109, 162, 125] } }] } } }')" \ - "$(jq --null-input '{ "V5": [ { "id": { "parents": 2, "interior": { "X1": [{ "GlobalConsensus": { ByGenesis: '$WESTEND_GENESIS_HASH' } }] } }, "fun": { "Fungible": '$amount' } } ] }')" \ + "$(jq --null-input '{ "V3": { "parents": 2, "interior": { "X2": [ { "GlobalConsensus": "Westend" }, { "Parachain": 1000 } ] } } }')" \ + "$(jq --null-input '{ "V3": { "parents": 0, "interior": { "X1": { "AccountId32": { "id": [212, 53, 147, 199, 21, 253, 211, 28, 97, 20, 26, 189, 4, 169, 159, 214, 130, 44, 133, 88, 133, 76, 205, 227, 154, 86, 132, 231, 165, 109, 162, 125] } } } } }')" \ + "$(jq --null-input '{ "V3": [ { "id": { "Concrete": { "parents": 2, "interior": { "X1": { "GlobalConsensus": "Westend" } } } }, "fun": { "Fungible": '$amount' } } ] }')" \ 0 \ "Unlimited" ;; @@ -421,9 +412,9 @@ case "$1" in limited_reserve_transfer_assets \ "ws://127.0.0.1:9010" \ "//Alice" \ - "$(jq --null-input '{ "V5": { "parents": 2, "interior": { "X2": [ { "GlobalConsensus": { ByGenesis: '$ROCOCO_GENESIS_HASH' } }, { "Parachain": 1000 } ] } } }')" \ - "$(jq --null-input '{ "V5": { "parents": 0, "interior": { "X1": [{ "AccountId32": { "id": [212, 53, 147, 199, 21, 253, 211, 28, 97, 20, 26, 189, 4, 169, 159, 214, 130, 44, 133, 88, 133, 76, 205, 227, 154, 86, 132, 231, 165, 109, 162, 125] } }] } } }')" \ - "$(jq --null-input '{ "V5": [ { "id": { "parents": 1, "interior": "Here" }, "fun": { "Fungible": '$amount' } } ] }')" \ + "$(jq --null-input '{ "V3": { "parents": 2, "interior": { "X2": [ { "GlobalConsensus": "Rococo" }, { "Parachain": 1000 } ] } } }')" \ + "$(jq --null-input '{ "V3": { "parents": 0, "interior": { "X1": { "AccountId32": { "id": [212, 53, 147, 199, 21, 253, 211, 28, 97, 20, 26, 189, 4, 169, 159, 214, 130, 44, 133, 88, 133, 76, 205, 227, 154, 86, 132, 231, 165, 109, 162, 125] } } } } }')" \ + "$(jq --null-input '{ "V3": [ { "id": { "Concrete": { "parents": 1, "interior": "Here" } }, "fun": { "Fungible": '$amount' } } ] }')" \ 0 \ "Unlimited" ;; @@ -434,9 +425,9 @@ case "$1" in limited_reserve_transfer_assets \ "ws://127.0.0.1:9010" \ "//Alice" \ - "$(jq --null-input '{ "V5": { "parents": 2, "interior": { "X2": [ { "GlobalConsensus": { ByGenesis: '$ROCOCO_GENESIS_HASH' } }, { "Parachain": 1000 } ] } } }')" \ - "$(jq --null-input '{ "V5": { "parents": 0, "interior": { "X1": [{ "AccountId32": { "id": [212, 53, 147, 199, 21, 253, 211, 28, 97, 20, 26, 189, 4, 169, 159, 214, 130, 44, 133, 88, 133, 76, 205, 227, 154, 86, 132, 231, 165, 109, 162, 125] } }] } } }')" \ - "$(jq --null-input '{ "V5": [ { "id": { "parents": 2, "interior": { "X1": [{ "GlobalConsensus": { ByGenesis: '$ROCOCO_GENESIS_HASH' } }] } }, "fun": { "Fungible": '$amount' } } ] }')" \ + "$(jq --null-input '{ "V3": { "parents": 2, "interior": { "X2": [ { "GlobalConsensus": "Rococo" }, { "Parachain": 1000 } ] } } }')" \ + "$(jq --null-input '{ "V3": { "parents": 0, "interior": { "X1": { "AccountId32": { "id": [212, 53, 147, 199, 21, 253, 211, 28, 97, 20, 26, 189, 4, 169, 159, 214, 130, 44, 133, 88, 133, 76, 205, 227, 154, 86, 132, 231, 165, 109, 162, 125] } } } } }')" \ + "$(jq --null-input '{ "V3": [ { "id": { "Concrete": { "parents": 2, "interior": { "X1": { "GlobalConsensus": "Rococo" } } } }, "fun": { "Fungible": '$amount' } } ] }')" \ 0 \ "Unlimited" ;; diff --git a/bridges/testing/framework/js-helpers/wrapped-assets-balance.js b/bridges/testing/framework/js-helpers/wrapped-assets-balance.js index 837b3a3b1dbc..7b343ed97a88 100644 --- a/bridges/testing/framework/js-helpers/wrapped-assets-balance.js +++ b/bridges/testing/framework/js-helpers/wrapped-assets-balance.js @@ -3,15 +3,17 @@ async function run(nodeName, networkInfo, args) { const api = await zombie.connect(wsUri, userDefinedTypes); // TODO: could be replaced with https://github.com/polkadot-js/api/issues/4930 (depends on metadata v15) later - const accountAddress = args.accountAddress; - const expectedAssetId = args.expectedAssetId; - const expectedAssetBalance = BigInt(args.expectedAssetBalance); - + const accountAddress = args[0]; + const expectedForeignAssetBalance = BigInt(args[1]); + const bridgedNetworkName = args[2]; while (true) { - const foreignAssetAccount = await api.query.foreignAssets.account(expectedAssetId, accountAddress); + const foreignAssetAccount = await api.query.foreignAssets.account( + { parents: 2, interior: { X1: [{ GlobalConsensus: bridgedNetworkName }] } }, + accountAddress + ); if (foreignAssetAccount.isSome) { const foreignAssetAccountBalance = foreignAssetAccount.unwrap().balance.toBigInt(); - if (foreignAssetAccountBalance > expectedAssetBalance) { + if (foreignAssetAccountBalance > expectedForeignAssetBalance) { return foreignAssetAccountBalance; } } diff --git a/bridges/testing/framework/utils/bridges.sh b/bridges/testing/framework/utils/bridges.sh index 3d7b37b4ffc2..07d9e4cd50b1 100755 --- a/bridges/testing/framework/utils/bridges.sh +++ b/bridges/testing/framework/utils/bridges.sh @@ -114,7 +114,7 @@ function send_governance_transact() { local dest=$(jq --null-input \ --arg para_id "$para_id" \ - '{ "V4": { "parents": 0, "interior": { "X1": [{ "Parachain": $para_id }] } } }') + '{ "V3": { "parents": 0, "interior": { "X1": { "Parachain": $para_id } } } }') local message=$(jq --null-input \ --argjson hex_encoded_data $hex_encoded_data \ @@ -122,7 +122,7 @@ function send_governance_transact() { --arg require_weight_at_most_proof_size "$require_weight_at_most_proof_size" \ ' { - "V4": [ + "V3": [ { "UnpaidExecution": { "weight_limit": "Unlimited" diff --git a/bridges/testing/tests/0001-asset-transfer/roc-reaches-westend.zndsl b/bridges/testing/tests/0001-asset-transfer/roc-reaches-westend.zndsl index b3cafc993e54..6e26632fd9f9 100644 --- a/bridges/testing/tests/0001-asset-transfer/roc-reaches-westend.zndsl +++ b/bridges/testing/tests/0001-asset-transfer/roc-reaches-westend.zndsl @@ -6,7 +6,7 @@ Creds: config asset-hub-westend-collator1: run {{ENV_PATH}}/helper.sh with "auto-log reserve-transfer-assets-from-asset-hub-rococo-local 5000000000000" within 120 seconds # check that //Alice received at least 4.8 ROC on Westend AH -asset-hub-westend-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/wrapped-assets-balance.js with '{ "accountAddress": "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY", "expectedAssetBalance": 4800000000000, "expectedAssetId": { "parents": 2, "interior": { "X1": [{ "GlobalConsensus": { "ByGenesis": [100,8,222,119,55,197,156,35,136,144,83,58,242,88,150,162,194,6,8,216,179,128,187,1,2,154,203,57,39,129,6,62] } }] }}}' within 600 seconds +asset-hub-westend-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/wrapped-assets-balance.js with "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY,4800000000000,Rococo" within 600 seconds # relayer //Ferdie is rewarded for delivering messages from Rococo BH bridge-hub-westend-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/relayer-rewards.js with "5HGjWAeFDfFCWPsjFQdVV2Msvz2XtMktvgocEZcCj68kUMaw,0x00000002,0x6268726F,ThisChain,0" within 300 seconds diff --git a/bridges/testing/tests/0001-asset-transfer/wnd-reaches-rococo.zndsl b/bridges/testing/tests/0001-asset-transfer/wnd-reaches-rococo.zndsl index eacac98982ab..5a8d6dabc20e 100644 --- a/bridges/testing/tests/0001-asset-transfer/wnd-reaches-rococo.zndsl +++ b/bridges/testing/tests/0001-asset-transfer/wnd-reaches-rococo.zndsl @@ -6,7 +6,7 @@ Creds: config asset-hub-rococo-collator1: run {{ENV_PATH}}/helper.sh with "auto-log reserve-transfer-assets-from-asset-hub-westend-local 5000000000000" within 120 seconds # check that //Alice received at least 4.8 WND on Rococo AH -asset-hub-rococo-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/wrapped-assets-balance.js with '{ "accountAddress": "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY", "expectedAssetBalance": 4800000000000, "expectedAssetId": { "parents": 2, "interior": { "X1": [{ "GlobalConsensus": { "ByGenesis": [225,67,242,56,3,172,80,232,246,248,230,38,149,209,206,158,78,29,104,170,54,193,205,44,253,21,52,2,19,243,66,62] } }] }}}' within 600 seconds +asset-hub-rococo-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/wrapped-assets-balance.js with "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY,4800000000000,Westend" within 600 seconds # relayer //Eve is rewarded for delivering messages from Westend BH bridge-hub-rococo-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/relayer-rewards.js with "5CiPPseXPECbkjWCa6MnjNokrgYjMqmKndv2rSnekmSK2DjL,0x00000002,0x62687764,ThisChain,0" within 300 seconds diff --git a/cumulus/README.md b/cumulus/README.md index 400f9481c3fb..7e145ad7b4ab 100644 --- a/cumulus/README.md +++ b/cumulus/README.md @@ -4,7 +4,7 @@ This repository contains both the Cumulus SDK and also specific chains implemented on top of this SDK. -If you only want to run a **Polkadot Parachain Node**, check out our [container section](../docs/contributor/container.md). +If you only want to run a **Polkadot Parachain Node**, check out our [container section](./docs/contributor/container.md). ## Cumulus SDK @@ -34,7 +34,7 @@ A Polkadot [collator](https://wiki.polkadot.network/docs/en/learn-collator) for `polkadot-parachain` binary (previously called `polkadot-collator`). You may run `polkadot-parachain` locally after building it or using one of the container option described -[here](../docs/contributor/container.md). +[here](./docs/contributor/container.md). ### Relay Chain Interaction To operate a parachain node, a connection to the corresponding relay chain is necessary. This can be achieved in one of @@ -60,7 +60,7 @@ polkadot-parachain \ ``` #### External Relay Chain Node -An external relay chain node is connected via WebSocket RPC by using the `--relay-chain-rpc-urls` command line +An external relay chain node is connected via WebsSocket RPC by using the `--relay-chain-rpc-urls` command line argument. This option accepts one or more space-separated WebSocket URLs to a full relay chain node. By default, only the first URL will be used, with the rest as a backup in case the connection to the first node is lost. diff --git a/cumulus/bin/pov-validator/Cargo.toml b/cumulus/bin/pov-validator/Cargo.toml index d7af29a6bcb2..9be92960ad77 100644 --- a/cumulus/bin/pov-validator/Cargo.toml +++ b/cumulus/bin/pov-validator/Cargo.toml @@ -9,18 +9,18 @@ homepage.workspace = true description = "A tool for validating PoVs locally" [dependencies] -anyhow.workspace = true -clap = { workspace = true, features = ["derive"] } codec.workspace = true -polkadot-node-primitives.workspace = true -polkadot-parachain-primitives.workspace = true -polkadot-primitives.workspace = true +clap = { workspace = true, features = ["derive"] } sc-executor.workspace = true -sp-core.workspace = true sp-io.workspace = true +sp-core.workspace = true sp-maybe-compressed-blob.workspace = true -tracing-subscriber.workspace = true +polkadot-node-primitives.workspace = true +polkadot-parachain-primitives.workspace = true +polkadot-primitives.workspace = true +anyhow.workspace = true tracing.workspace = true +tracing-subscriber.workspace = true [lints] workspace = true diff --git a/cumulus/client/cli/Cargo.toml b/cumulus/client/cli/Cargo.toml index bdc0236e368f..9b6f6b73960b 100644 --- a/cumulus/client/cli/Cargo.toml +++ b/cumulus/client/cli/Cargo.toml @@ -5,8 +5,6 @@ authors.workspace = true edition.workspace = true description = "Parachain node CLI utilities." license = "GPL-3.0-or-later WITH Classpath-exception-2.0" -homepage.workspace = true -repository.workspace = true [lints] workspace = true @@ -17,10 +15,10 @@ codec = { workspace = true, default-features = true } url = { workspace = true } # Substrate -sc-chain-spec = { workspace = true, default-features = true } sc-cli = { workspace = true, default-features = true } sc-client-api = { workspace = true, default-features = true } +sc-chain-spec = { workspace = true, default-features = true } sc-service = { workspace = true, default-features = true } -sp-blockchain = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } diff --git a/cumulus/client/collator/Cargo.toml b/cumulus/client/collator/Cargo.toml index ff591c2d6e3a..6ebde0c2c653 100644 --- a/cumulus/client/collator/Cargo.toml +++ b/cumulus/client/collator/Cargo.toml @@ -5,22 +5,20 @@ authors.workspace = true edition.workspace = true description = "Common node-side functionality and glue code to collate parachain blocks." license = "GPL-3.0-or-later WITH Classpath-exception-2.0" -homepage.workspace = true -repository.workspace = true [lints] workspace = true [dependencies] +parking_lot = { workspace = true, default-features = true } codec = { features = ["derive"], workspace = true, default-features = true } futures = { workspace = true } -parking_lot = { workspace = true, default-features = true } tracing = { workspace = true, default-features = true } # Substrate sc-client-api = { workspace = true, default-features = true } -sp-api = { workspace = true, default-features = true } sp-consensus = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } @@ -48,5 +46,5 @@ polkadot-node-subsystem-test-helpers = { workspace = true } # Cumulus cumulus-test-client = { workspace = true } -cumulus-test-relay-sproof-builder = { workspace = true, default-features = true } cumulus-test-runtime = { workspace = true } +cumulus-test-relay-sproof-builder = { workspace = true, default-features = true } diff --git a/cumulus/client/consensus/aura/Cargo.toml b/cumulus/client/consensus/aura/Cargo.toml index 702230938645..0bb2de6bb9b8 100644 --- a/cumulus/client/consensus/aura/Cargo.toml +++ b/cumulus/client/consensus/aura/Cargo.toml @@ -5,8 +5,6 @@ version = "0.7.0" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" -homepage.workspace = true -repository.workspace = true [lints] workspace = true @@ -16,19 +14,18 @@ async-trait = { workspace = true } codec = { features = ["derive"], workspace = true, default-features = true } futures = { workspace = true } parking_lot = { workspace = true } +tracing = { workspace = true, default-features = true } schnellru = { workspace = true } tokio = { workspace = true, features = ["macros"] } -tracing = { workspace = true, default-features = true } # Substrate -prometheus-endpoint = { workspace = true, default-features = true } sc-client-api = { workspace = true, default-features = true } sc-consensus = { workspace = true, default-features = true } sc-consensus-aura = { workspace = true, default-features = true } sc-consensus-babe = { workspace = true, default-features = true } sc-consensus-slots = { workspace = true, default-features = true } -sc-telemetry = { workspace = true, default-features = true } sc-utils = { workspace = true, default-features = true } +sc-telemetry = { workspace = true, default-features = true } sp-api = { workspace = true, default-features = true } sp-application-crypto = { workspace = true, default-features = true } sp-block-builder = { workspace = true, default-features = true } @@ -39,25 +36,25 @@ sp-core = { workspace = true, default-features = true } sp-inherents = { workspace = true, default-features = true } sp-keystore = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } -sp-state-machine = { workspace = true, default-features = true } sp-timestamp = { workspace = true, default-features = true } -sp-trie = { workspace = true, default-features = true } +sp-state-machine = { workspace = true, default-features = true } +prometheus-endpoint = { workspace = true, default-features = true } # Cumulus -cumulus-client-collator = { workspace = true, default-features = true } cumulus-client-consensus-common = { workspace = true, default-features = true } +cumulus-relay-chain-interface = { workspace = true, default-features = true } cumulus-client-consensus-proposer = { workspace = true, default-features = true } cumulus-client-parachain-inherent = { workspace = true, default-features = true } cumulus-primitives-aura = { workspace = true, default-features = true } cumulus-primitives-core = { workspace = true, default-features = true } -cumulus-relay-chain-interface = { workspace = true, default-features = true } +cumulus-client-collator = { workspace = true, default-features = true } # Polkadot +polkadot-primitives = { workspace = true, default-features = true } polkadot-node-primitives = { workspace = true, default-features = true } polkadot-node-subsystem = { workspace = true, default-features = true } polkadot-node-subsystem-util = { workspace = true, default-features = true } polkadot-overseer = { workspace = true, default-features = true } -polkadot-primitives = { workspace = true, default-features = true } [features] # Allows collator to use full PoV size for block building diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs index 41751f1db530..425151230704 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs @@ -23,32 +23,30 @@ use cumulus_primitives_aura::AuraUnincludedSegmentApi; use cumulus_primitives_core::{GetCoreSelectorApi, PersistedValidationData}; use cumulus_relay_chain_interface::RelayChainInterface; -use polkadot_primitives::Id as ParaId; +use polkadot_primitives::{ + vstaging::{ClaimQueueOffset, CoreSelector, DEFAULT_CLAIM_QUEUE_OFFSET}, + BlockId, CoreIndex, Hash as RelayHash, Header as RelayHeader, Id as ParaId, + OccupiedCoreAssumption, +}; use futures::prelude::*; use sc_client_api::{backend::AuxStore, BlockBackend, BlockOf, UsageProvider}; use sc_consensus::BlockImport; -use sp_api::ProvideRuntimeApi; +use sp_api::{ApiExt, ProvideRuntimeApi}; use sp_application_crypto::AppPublic; use sp_blockchain::HeaderBackend; use sp_consensus_aura::{AuraApi, Slot}; -use sp_core::crypto::Pair; +use sp_core::{crypto::Pair, U256}; use sp_inherents::CreateInherentDataProviders; use sp_keystore::KeystorePtr; -use sp_runtime::traits::{Block as BlockT, Header as HeaderT, Member}; +use sp_runtime::traits::{Block as BlockT, Header as HeaderT, Member, One}; use sp_timestamp::Timestamp; -use std::{sync::Arc, time::Duration}; +use std::{collections::BTreeSet, sync::Arc, time::Duration}; use super::CollatorMessage; use crate::{ collator::{self as collator_util}, - collators::{ - check_validation_code_or_log, - slot_based::{ - core_selector, - relay_chain_data_cache::{RelayChainData, RelayChainDataCache}, - }, - }, + collators::{check_validation_code_or_log, cores_scheduled_for_para}, LOG_TARGET, }; @@ -220,7 +218,7 @@ where collator_util::Collator::::new(params) }; - let mut relay_chain_data_cache = RelayChainDataCache::new(relay_client.clone(), para_id); + let mut relay_chain_fetcher = RelayChainCachingFetcher::new(relay_client.clone(), para_id); loop { // We wait here until the next slot arrives. @@ -244,7 +242,7 @@ where // Retrieve the core selector. let (core_selector, claim_queue_offset) = - match core_selector(&*para_client, parent.hash, *parent.header.number()) { + match core_selector(&*para_client, &parent).await { Ok(core_selector) => core_selector, Err(err) => { tracing::trace!( @@ -261,7 +259,7 @@ where max_pov_size, scheduled_cores, claimed_cores, - }) = relay_chain_data_cache + }) = relay_chain_fetcher .get_mut_relay_chain_data(relay_parent, claim_queue_offset) .await else { @@ -421,3 +419,119 @@ where } } } + +/// Contains relay chain data necessary for parachain block building. +#[derive(Clone)] +struct RelayChainData { + /// Current relay chain parent header. + pub relay_parent_header: RelayHeader, + /// The cores on which the para is scheduled at the configured claim queue offset. + pub scheduled_cores: Vec, + /// Maximum configured PoV size on the relay chain. + pub max_pov_size: u32, + /// The claimed cores at a relay parent. + pub claimed_cores: BTreeSet, +} + +/// Simple helper to fetch relay chain data and cache it based on the current relay chain best block +/// hash. +struct RelayChainCachingFetcher { + relay_client: RI, + para_id: ParaId, + last_data: Option<(RelayHash, RelayChainData)>, +} + +impl RelayChainCachingFetcher +where + RI: RelayChainInterface + Clone + 'static, +{ + pub fn new(relay_client: RI, para_id: ParaId) -> Self { + Self { relay_client, para_id, last_data: None } + } + + /// Fetch required [`RelayChainData`] from the relay chain. + /// If this data has been fetched in the past for the incoming hash, it will reuse + /// cached data. + pub async fn get_mut_relay_chain_data( + &mut self, + relay_parent: RelayHash, + claim_queue_offset: ClaimQueueOffset, + ) -> Result<&mut RelayChainData, ()> { + match &self.last_data { + Some((last_seen_hash, _)) if *last_seen_hash == relay_parent => { + tracing::trace!(target: crate::LOG_TARGET, %relay_parent, "Using cached data for relay parent."); + Ok(&mut self.last_data.as_mut().expect("last_data is Some").1) + }, + _ => { + tracing::trace!(target: crate::LOG_TARGET, %relay_parent, "Relay chain best block changed, fetching new data from relay chain."); + let data = self.update_for_relay_parent(relay_parent, claim_queue_offset).await?; + self.last_data = Some((relay_parent, data)); + Ok(&mut self.last_data.as_mut().expect("last_data was just set above").1) + }, + } + } + + /// Fetch fresh data from the relay chain for the given relay parent hash. + async fn update_for_relay_parent( + &self, + relay_parent: RelayHash, + claim_queue_offset: ClaimQueueOffset, + ) -> Result { + let scheduled_cores = cores_scheduled_for_para( + relay_parent, + self.para_id, + &self.relay_client, + claim_queue_offset, + ) + .await; + + let Ok(Some(relay_parent_header)) = + self.relay_client.header(BlockId::Hash(relay_parent)).await + else { + tracing::warn!(target: crate::LOG_TARGET, "Unable to fetch latest relay chain block header."); + return Err(()) + }; + + let max_pov_size = match self + .relay_client + .persisted_validation_data(relay_parent, self.para_id, OccupiedCoreAssumption::Included) + .await + { + Ok(None) => return Err(()), + Ok(Some(pvd)) => pvd.max_pov_size, + Err(err) => { + tracing::error!(target: crate::LOG_TARGET, ?err, "Failed to gather information from relay-client"); + return Err(()) + }, + }; + + Ok(RelayChainData { + relay_parent_header, + scheduled_cores, + max_pov_size, + claimed_cores: BTreeSet::new(), + }) + } +} + +async fn core_selector( + para_client: &Client, + parent: &consensus_common::PotentialParent, +) -> Result<(CoreSelector, ClaimQueueOffset), sp_api::ApiError> +where + Client: ProvideRuntimeApi + Send + Sync, + Client::Api: GetCoreSelectorApi, +{ + let block_hash = parent.hash; + let runtime_api = para_client.runtime_api(); + + if runtime_api.has_api::>(block_hash)? { + Ok(runtime_api.core_selector(block_hash)?) + } else { + let next_block_number: U256 = (*parent.header.number() + One::one()).into(); + + // If the runtime API does not support the core selector API, fallback to some default + // values. + Ok((CoreSelector(next_block_number.byte(0)), ClaimQueueOffset(DEFAULT_CLAIM_QUEUE_OFFSET))) + } +} diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/block_import.rs b/cumulus/client/consensus/aura/src/collators/slot_based/block_import.rs deleted file mode 100644 index 9c53da6a6b7d..000000000000 --- a/cumulus/client/consensus/aura/src/collators/slot_based/block_import.rs +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . - -use futures::{stream::FusedStream, StreamExt}; -use sc_consensus::{BlockImport, StateAction}; -use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; -use sp_api::{ApiExt, CallApiAt, CallContext, Core, ProvideRuntimeApi, StorageProof}; -use sp_runtime::traits::{Block as BlockT, Header as _}; -use sp_trie::proof_size_extension::ProofSizeExt; -use std::sync::Arc; - -/// Handle for receiving the block and the storage proof from the [`SlotBasedBlockImport`]. -/// -/// This handle should be passed to [`Params`](super::Params) or can also be dropped if the node is -/// not running as collator. -pub struct SlotBasedBlockImportHandle { - receiver: TracingUnboundedReceiver<(Block, StorageProof)>, -} - -impl SlotBasedBlockImportHandle { - /// Returns the next item. - /// - /// The future will never return when the internal channel is closed. - pub async fn next(&mut self) -> (Block, StorageProof) { - loop { - if self.receiver.is_terminated() { - futures::pending!() - } else if let Some(res) = self.receiver.next().await { - return res - } - } - } -} - -/// Special block import for the slot based collator. -pub struct SlotBasedBlockImport { - inner: BI, - client: Arc, - sender: TracingUnboundedSender<(Block, StorageProof)>, -} - -impl SlotBasedBlockImport { - /// Create a new instance. - /// - /// The returned [`SlotBasedBlockImportHandle`] needs to be passed to the - /// [`Params`](super::Params), so that this block import instance can communicate with the - /// collation task. If the node is not running as a collator, just dropping the handle is fine. - pub fn new(inner: BI, client: Arc) -> (Self, SlotBasedBlockImportHandle) { - let (sender, receiver) = tracing_unbounded("SlotBasedBlockImportChannel", 1000); - - (Self { sender, client, inner }, SlotBasedBlockImportHandle { receiver }) - } -} - -impl Clone for SlotBasedBlockImport { - fn clone(&self) -> Self { - Self { inner: self.inner.clone(), client: self.client.clone(), sender: self.sender.clone() } - } -} - -#[async_trait::async_trait] -impl BlockImport for SlotBasedBlockImport -where - Block: BlockT, - BI: BlockImport + Send + Sync, - BI::Error: Into, - Client: ProvideRuntimeApi + CallApiAt + Send + Sync, - Client::StateBackend: Send, - Client::Api: Core, -{ - type Error = sp_consensus::Error; - - async fn check_block( - &self, - block: sc_consensus::BlockCheckParams, - ) -> Result { - self.inner.check_block(block).await.map_err(Into::into) - } - - async fn import_block( - &self, - mut params: sc_consensus::BlockImportParams, - ) -> Result { - // If the channel exists and it is required to execute the block, we will execute the block - // here. This is done to collect the storage proof and to prevent re-execution, we push - // downwards the state changes. `StateAction::ApplyChanges` is ignored, because it either - // means that the node produced the block itself or the block was imported via state sync. - if !self.sender.is_closed() && !matches!(params.state_action, StateAction::ApplyChanges(_)) - { - let mut runtime_api = self.client.runtime_api(); - - runtime_api.set_call_context(CallContext::Onchain); - - runtime_api.record_proof(); - let recorder = runtime_api - .proof_recorder() - .expect("Proof recording is enabled in the line above; qed."); - runtime_api.register_extension(ProofSizeExt::new(recorder)); - - let parent_hash = *params.header.parent_hash(); - - let block = Block::new(params.header.clone(), params.body.clone().unwrap_or_default()); - - runtime_api - .execute_block(parent_hash, block.clone()) - .map_err(|e| Box::new(e) as Box<_>)?; - - let storage_proof = - runtime_api.extract_proof().expect("Proof recording was enabled above; qed"); - - let state = self.client.state_at(parent_hash).map_err(|e| Box::new(e) as Box<_>)?; - let gen_storage_changes = runtime_api - .into_storage_changes(&state, parent_hash) - .map_err(sp_consensus::Error::ChainLookup)?; - - if params.header.state_root() != &gen_storage_changes.transaction_storage_root { - return Err(sp_consensus::Error::Other(Box::new( - sp_blockchain::Error::InvalidStateRoot, - ))) - } - - params.state_action = StateAction::ApplyChanges(sc_consensus::StorageChanges::Changes( - gen_storage_changes, - )); - - let _ = self.sender.unbounded_send((block, storage_proof)); - } - - self.inner.import_block(params).await.map_err(Into::into) - } -} diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/collation_task.rs b/cumulus/client/consensus/aura/src/collators/slot_based/collation_task.rs index abaeb8319a40..5b8151f6302c 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/collation_task.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/collation_task.rs @@ -47,8 +47,6 @@ pub struct Params { pub collator_service: CS, /// Receiver channel for communication with the block builder task. pub collator_receiver: TracingUnboundedReceiver>, - /// The handle from the special slot based block import. - pub block_import_handle: super::SlotBasedBlockImportHandle, } /// Asynchronously executes the collation task for a parachain. @@ -57,49 +55,28 @@ pub struct Params { /// collations to the relay chain. It listens for new best relay chain block notifications and /// handles collator messages. If our parachain is scheduled on a core and we have a candidate, /// the task will build a collation and send it to the relay chain. -pub async fn run_collation_task( - Params { - relay_client, - collator_key, - para_id, - reinitialize, - collator_service, - mut collator_receiver, - mut block_import_handle, - }: Params, -) where +pub async fn run_collation_task(mut params: Params) +where Block: BlockT, CS: CollatorServiceInterface + Send + Sync + 'static, RClient: RelayChainInterface + Clone + 'static, { - let Ok(mut overseer_handle) = relay_client.overseer_handle() else { + let Ok(mut overseer_handle) = params.relay_client.overseer_handle() else { tracing::error!(target: LOG_TARGET, "Failed to get overseer handle."); return }; cumulus_client_collator::initialize_collator_subsystems( &mut overseer_handle, - collator_key, - para_id, - reinitialize, + params.collator_key, + params.para_id, + params.reinitialize, ) .await; - loop { - futures::select! { - collator_message = collator_receiver.next() => { - let Some(message) = collator_message else { - return; - }; - - handle_collation_message(message, &collator_service, &mut overseer_handle).await; - }, - block_import_msg = block_import_handle.next().fuse() => { - // TODO: Implement me. - // Issue: https://github.com/paritytech/polkadot-sdk/issues/6495 - let _ = block_import_msg; - } - } + let collator_service = params.collator_service; + while let Some(collator_message) = params.collator_receiver.next().await { + handle_collation_message(collator_message, &collator_service, &mut overseer_handle).await; } } diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs b/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs index ab78b31fbd80..7453d3c89d08 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs @@ -28,42 +28,40 @@ //! during the relay chain block. After the block is built, the block builder task sends it to //! the collation task which compresses it and submits it to the collation-generation subsystem. -use self::{block_builder_task::run_block_builder, collation_task::run_collation_task}; use codec::Codec; use consensus_common::ParachainCandidate; use cumulus_client_collator::service::ServiceInterface as CollatorServiceInterface; use cumulus_client_consensus_common::{self as consensus_common, ParachainBlockImportMarker}; use cumulus_client_consensus_proposer::ProposerInterface; use cumulus_primitives_aura::AuraUnincludedSegmentApi; -use cumulus_primitives_core::{ClaimQueueOffset, CoreSelector, GetCoreSelectorApi}; +use cumulus_primitives_core::GetCoreSelectorApi; use cumulus_relay_chain_interface::RelayChainInterface; -use futures::FutureExt; use polkadot_primitives::{ - vstaging::DEFAULT_CLAIM_QUEUE_OFFSET, CollatorPair, CoreIndex, Hash as RelayHash, Id as ParaId, - ValidationCodeHash, + CollatorPair, CoreIndex, Hash as RelayHash, Id as ParaId, ValidationCodeHash, }; + use sc_client_api::{backend::AuxStore, BlockBackend, BlockOf, UsageProvider}; use sc_consensus::BlockImport; use sc_utils::mpsc::tracing_unbounded; -use sp_api::{ApiExt, ProvideRuntimeApi}; + +use sp_api::ProvideRuntimeApi; use sp_application_crypto::AppPublic; use sp_blockchain::HeaderBackend; use sp_consensus_aura::AuraApi; -use sp_core::{crypto::Pair, traits::SpawnNamed, U256}; +use sp_core::crypto::Pair; use sp_inherents::CreateInherentDataProviders; use sp_keystore::KeystorePtr; -use sp_runtime::traits::{Block as BlockT, Member, NumberFor, One}; +use sp_runtime::traits::{Block as BlockT, Member}; + use std::{sync::Arc, time::Duration}; -pub use block_import::{SlotBasedBlockImport, SlotBasedBlockImportHandle}; +use self::{block_builder_task::run_block_builder, collation_task::run_collation_task}; mod block_builder_task; -mod block_import; mod collation_task; -mod relay_chain_data_cache; /// Parameters for [`run`]. -pub struct Params { +pub struct Params { /// Inherent data providers. Only non-consensus inherent data should be provided, i.e. /// the timestamp, slot, and paras inherents should be omitted, as they are set by this /// collator. @@ -95,33 +93,13 @@ pub struct Params, - /// Spawner for spawning futures. - pub spawner: Spawner, } /// Run aura-based block building and collation task. -pub fn run( - Params { - create_inherent_data_providers, - block_import, - para_client, - para_backend, - relay_client, - code_hash_provider, - keystore, - collator_key, - para_id, - proposer, - collator_service, - authoring_duration, - reinitialize, - slot_drift, - block_import_handle, - spawner, - }: Params, -) where +pub fn run( + params: Params, +) -> (impl futures::Future, impl futures::Future) +where Block: BlockT, Client: ProvideRuntimeApi + BlockOf @@ -145,50 +123,39 @@ pub fn run> + Member + Codec, - Spawner: SpawnNamed, { let (tx, rx) = tracing_unbounded("mpsc_builder_to_collator", 100); let collator_task_params = collation_task::Params { - relay_client: relay_client.clone(), - collator_key, - para_id, - reinitialize, - collator_service: collator_service.clone(), + relay_client: params.relay_client.clone(), + collator_key: params.collator_key, + para_id: params.para_id, + reinitialize: params.reinitialize, + collator_service: params.collator_service.clone(), collator_receiver: rx, - block_import_handle, }; let collation_task_fut = run_collation_task::(collator_task_params); let block_builder_params = block_builder_task::BuilderTaskParams { - create_inherent_data_providers, - block_import, - para_client, - para_backend, - relay_client, - code_hash_provider, - keystore, - para_id, - proposer, - collator_service, - authoring_duration, + create_inherent_data_providers: params.create_inherent_data_providers, + block_import: params.block_import, + para_client: params.para_client, + para_backend: params.para_backend, + relay_client: params.relay_client, + code_hash_provider: params.code_hash_provider, + keystore: params.keystore, + para_id: params.para_id, + proposer: params.proposer, + collator_service: params.collator_service, + authoring_duration: params.authoring_duration, collator_sender: tx, - slot_drift, + slot_drift: params.slot_drift, }; let block_builder_fut = run_block_builder::(block_builder_params); - spawner.spawn_blocking( - "slot-based-block-builder", - Some("slot-based-collator"), - block_builder_fut.boxed(), - ); - spawner.spawn_blocking( - "slot-based-collation", - Some("slot-based-collator"), - collation_task_fut.boxed(), - ); + (collation_task_fut, block_builder_fut) } /// Message to be sent from the block builder to the collation task. @@ -206,26 +173,3 @@ struct CollatorMessage { /// Core index that this block should be submitted on pub core_index: CoreIndex, } - -/// Fetch the `CoreSelector` and `ClaimQueueOffset` for `parent_hash`. -fn core_selector( - para_client: &Client, - parent_hash: Block::Hash, - parent_number: NumberFor, -) -> Result<(CoreSelector, ClaimQueueOffset), sp_api::ApiError> -where - Client: ProvideRuntimeApi + Send + Sync, - Client::Api: GetCoreSelectorApi, -{ - let runtime_api = para_client.runtime_api(); - - if runtime_api.has_api::>(parent_hash)? { - Ok(runtime_api.core_selector(parent_hash)?) - } else { - let next_block_number: U256 = (parent_number + One::one()).into(); - - // If the runtime API does not support the core selector API, fallback to some default - // values. - Ok((CoreSelector(next_block_number.byte(0)), ClaimQueueOffset(DEFAULT_CLAIM_QUEUE_OFFSET))) - } -} diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/relay_chain_data_cache.rs b/cumulus/client/consensus/aura/src/collators/slot_based/relay_chain_data_cache.rs deleted file mode 100644 index be30ec2f747d..000000000000 --- a/cumulus/client/consensus/aura/src/collators/slot_based/relay_chain_data_cache.rs +++ /dev/null @@ -1,127 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . - -//! Utility for caching [`RelayChainData`] for different relay blocks. - -use crate::collators::cores_scheduled_for_para; -use cumulus_primitives_core::ClaimQueueOffset; -use cumulus_relay_chain_interface::RelayChainInterface; -use polkadot_primitives::{ - CoreIndex, Hash as RelayHash, Header as RelayHeader, Id as ParaId, OccupiedCoreAssumption, -}; -use sp_runtime::generic::BlockId; -use std::collections::BTreeSet; - -/// Contains relay chain data necessary for parachain block building. -#[derive(Clone)] -pub struct RelayChainData { - /// Current relay chain parent header. - pub relay_parent_header: RelayHeader, - /// The cores on which the para is scheduled at the configured claim queue offset. - pub scheduled_cores: Vec, - /// Maximum configured PoV size on the relay chain. - pub max_pov_size: u32, - /// The claimed cores at a relay parent. - pub claimed_cores: BTreeSet, -} - -/// Simple helper to fetch relay chain data and cache it based on the current relay chain best block -/// hash. -pub struct RelayChainDataCache { - relay_client: RI, - para_id: ParaId, - cached_data: schnellru::LruMap, -} - -impl RelayChainDataCache -where - RI: RelayChainInterface + Clone + 'static, -{ - pub fn new(relay_client: RI, para_id: ParaId) -> Self { - Self { - relay_client, - para_id, - // 50 cached relay chain blocks should be more than enough. - cached_data: schnellru::LruMap::new(schnellru::ByLength::new(50)), - } - } - - /// Fetch required [`RelayChainData`] from the relay chain. - /// If this data has been fetched in the past for the incoming hash, it will reuse - /// cached data. - pub async fn get_mut_relay_chain_data( - &mut self, - relay_parent: RelayHash, - claim_queue_offset: ClaimQueueOffset, - ) -> Result<&mut RelayChainData, ()> { - let insert_data = if self.cached_data.peek(&relay_parent).is_some() { - tracing::trace!(target: crate::LOG_TARGET, %relay_parent, "Using cached data for relay parent."); - None - } else { - tracing::trace!(target: crate::LOG_TARGET, %relay_parent, "Relay chain best block changed, fetching new data from relay chain."); - Some(self.update_for_relay_parent(relay_parent, claim_queue_offset).await?) - }; - - Ok(self - .cached_data - .get_or_insert(relay_parent, || { - insert_data.expect("`insert_data` exists if not cached yet; qed") - }) - .expect("There is space for at least one element; qed")) - } - - /// Fetch fresh data from the relay chain for the given relay parent hash. - async fn update_for_relay_parent( - &self, - relay_parent: RelayHash, - claim_queue_offset: ClaimQueueOffset, - ) -> Result { - let scheduled_cores = cores_scheduled_for_para( - relay_parent, - self.para_id, - &self.relay_client, - claim_queue_offset, - ) - .await; - - let Ok(Some(relay_parent_header)) = - self.relay_client.header(BlockId::Hash(relay_parent)).await - else { - tracing::warn!(target: crate::LOG_TARGET, "Unable to fetch latest relay chain block header."); - return Err(()) - }; - - let max_pov_size = match self - .relay_client - .persisted_validation_data(relay_parent, self.para_id, OccupiedCoreAssumption::Included) - .await - { - Ok(None) => return Err(()), - Ok(Some(pvd)) => pvd.max_pov_size, - Err(err) => { - tracing::error!(target: crate::LOG_TARGET, ?err, "Failed to gather information from relay-client"); - return Err(()) - }, - }; - - Ok(RelayChainData { - relay_parent_header, - scheduled_cores, - max_pov_size, - claimed_cores: BTreeSet::new(), - }) - } -} diff --git a/cumulus/client/consensus/common/Cargo.toml b/cumulus/client/consensus/common/Cargo.toml index 5bc5160601e7..4bc2f1d1e600 100644 --- a/cumulus/client/consensus/common/Cargo.toml +++ b/cumulus/client/consensus/common/Cargo.toml @@ -5,8 +5,6 @@ version = "0.7.0" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" -homepage.workspace = true -repository.workspace = true [lints] workspace = true @@ -20,7 +18,6 @@ log = { workspace = true, default-features = true } tracing = { workspace = true, default-features = true } # Substrate -prometheus-endpoint = { workspace = true, default-features = true } sc-client-api = { workspace = true, default-features = true } sc-consensus = { workspace = true, default-features = true } sc-consensus-babe = { workspace = true, default-features = true } @@ -32,14 +29,15 @@ sp-runtime = { workspace = true, default-features = true } sp-timestamp = { workspace = true, default-features = true } sp-trie = { workspace = true, default-features = true } sp-version = { workspace = true, default-features = true } +prometheus-endpoint = { workspace = true, default-features = true } # Polkadot polkadot-primitives = { workspace = true, default-features = true } # Cumulus -cumulus-client-pov-recovery = { workspace = true, default-features = true } cumulus-primitives-core = { workspace = true, default-features = true } cumulus-relay-chain-interface = { workspace = true, default-features = true } +cumulus-client-pov-recovery = { workspace = true, default-features = true } schnellru = { workspace = true } [dev-dependencies] diff --git a/cumulus/client/consensus/proposer/Cargo.toml b/cumulus/client/consensus/proposer/Cargo.toml index e391481bc445..bb760ae03f4d 100644 --- a/cumulus/client/consensus/proposer/Cargo.toml +++ b/cumulus/client/consensus/proposer/Cargo.toml @@ -5,8 +5,6 @@ version = "0.7.0" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" -homepage.workspace = true -repository.workspace = true [lints] workspace = true diff --git a/cumulus/client/consensus/relay-chain/Cargo.toml b/cumulus/client/consensus/relay-chain/Cargo.toml index fdc343dc65de..f3ee6fc2f7d2 100644 --- a/cumulus/client/consensus/relay-chain/Cargo.toml +++ b/cumulus/client/consensus/relay-chain/Cargo.toml @@ -5,8 +5,6 @@ version = "0.7.0" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" -homepage.workspace = true -repository.workspace = true [lints] workspace = true @@ -18,7 +16,6 @@ parking_lot = { workspace = true, default-features = true } tracing = { workspace = true, default-features = true } # Substrate -prometheus-endpoint = { workspace = true, default-features = true } sc-consensus = { workspace = true, default-features = true } sp-api = { workspace = true, default-features = true } sp-block-builder = { workspace = true, default-features = true } @@ -27,6 +24,7 @@ sp-consensus = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } sp-inherents = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } +prometheus-endpoint = { workspace = true, default-features = true } # Cumulus cumulus-client-consensus-common = { workspace = true, default-features = true } diff --git a/cumulus/client/network/Cargo.toml b/cumulus/client/network/Cargo.toml index 11025f8f62e6..bc67678eedeb 100644 --- a/cumulus/client/network/Cargo.toml +++ b/cumulus/client/network/Cargo.toml @@ -5,8 +5,6 @@ authors.workspace = true description = "Cumulus-specific networking protocol" edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" -homepage.workspace = true -repository.workspace = true [lints] workspace = true @@ -21,28 +19,28 @@ tracing = { workspace = true, default-features = true } # Substrate sc-client-api = { workspace = true, default-features = true } -sp-api = { workspace = true, default-features = true } sp-blockchain = { workspace = true, default-features = true } sp-consensus = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } sp-state-machine = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } sp-version = { workspace = true, default-features = true } # Polkadot polkadot-node-primitives = { workspace = true, default-features = true } -polkadot-node-subsystem = { workspace = true, default-features = true } polkadot-parachain-primitives = { workspace = true, default-features = true } polkadot-primitives = { workspace = true, default-features = true } +polkadot-node-subsystem = { workspace = true, default-features = true } # Cumulus cumulus-relay-chain-interface = { workspace = true, default-features = true } [dev-dependencies] portpicker = { workspace = true } -rstest = { workspace = true } tokio = { features = ["macros"], workspace = true, default-features = true } url = { workspace = true } +rstest = { workspace = true } # Substrate sc-cli = { workspace = true, default-features = true } diff --git a/cumulus/client/parachain-inherent/Cargo.toml b/cumulus/client/parachain-inherent/Cargo.toml index 4f53e2bc1bc2..0d82cf648743 100644 --- a/cumulus/client/parachain-inherent/Cargo.toml +++ b/cumulus/client/parachain-inherent/Cargo.toml @@ -5,8 +5,6 @@ authors.workspace = true edition.workspace = true description = "Inherent that needs to be present in every parachain block. Contains messages and a relay chain storage-proof." license = "Apache-2.0" -homepage.workspace = true -repository.workspace = true [dependencies] async-trait = { workspace = true } diff --git a/cumulus/client/parachain-inherent/src/mock.rs b/cumulus/client/parachain-inherent/src/mock.rs index e08aca932564..950cba2aaa7d 100644 --- a/cumulus/client/parachain-inherent/src/mock.rs +++ b/cumulus/client/parachain-inherent/src/mock.rs @@ -17,17 +17,17 @@ use crate::{ParachainInherentData, INHERENT_IDENTIFIER}; use codec::Decode; use cumulus_primitives_core::{ - relay_chain, relay_chain::UpgradeGoAhead, InboundDownwardMessage, InboundHrmpMessage, ParaId, - PersistedValidationData, + relay_chain, InboundDownwardMessage, InboundHrmpMessage, ParaId, PersistedValidationData, }; use cumulus_primitives_parachain_inherent::MessageQueueChain; -use cumulus_test_relay_sproof_builder::RelayStateSproofBuilder; use sc_client_api::{Backend, StorageProvider}; use sp_crypto_hashing::twox_128; use sp_inherents::{InherentData, InherentDataProvider}; use sp_runtime::traits::Block; use std::collections::BTreeMap; +use cumulus_test_relay_sproof_builder::RelayStateSproofBuilder; + /// Relay chain slot duration, in milliseconds. pub const RELAY_CHAIN_SLOT_DURATION_MILLIS: u32 = 6000; @@ -68,12 +68,10 @@ pub struct MockValidationDataInherentDataProvider { pub xcm_config: MockXcmConfig, /// Inbound downward XCM messages to be injected into the block. pub raw_downward_messages: Vec>, - /// Inbound Horizontal messages sorted by channel. + // Inbound Horizontal messages sorted by channel. pub raw_horizontal_messages: Vec<(ParaId, Vec)>, - /// Additional key-value pairs that should be injected. + // Additional key-value pairs that should be injected. pub additional_key_values: Option, Vec)>>, - /// Whether upgrade go ahead should be set. - pub upgrade_go_ahead: Option, } /// Something that can generate randomness. @@ -178,7 +176,6 @@ impl> InherentDataProvider sproof_builder.current_slot = ((relay_parent_number / RELAY_CHAIN_SLOT_DURATION_MILLIS) as u64).into(); - sproof_builder.upgrade_go_ahead = self.upgrade_go_ahead; // Process the downward messages and set up the correct head let mut downward_messages = Vec::new(); let mut dmq_mqc = MessageQueueChain::new(self.xcm_config.starting_dmq_mqc_head); diff --git a/cumulus/client/pov-recovery/Cargo.toml b/cumulus/client/pov-recovery/Cargo.toml index 7e7da7244a86..3127dd26fcaa 100644 --- a/cumulus/client/pov-recovery/Cargo.toml +++ b/cumulus/client/pov-recovery/Cargo.toml @@ -5,8 +5,6 @@ authors.workspace = true description = "Parachain PoV recovery" edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" -homepage.workspace = true -repository.workspace = true [lints] workspace = true @@ -21,10 +19,10 @@ tracing = { workspace = true, default-features = true } # Substrate sc-client-api = { workspace = true, default-features = true } sc-consensus = { workspace = true, default-features = true } -sp-api = { workspace = true, default-features = true } sp-consensus = { workspace = true, default-features = true } sp-maybe-compressed-blob = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } sp-version = { workspace = true, default-features = true } # Polkadot @@ -34,19 +32,19 @@ polkadot-overseer = { workspace = true, default-features = true } polkadot-primitives = { workspace = true, default-features = true } # Cumulus -async-trait = { workspace = true } cumulus-primitives-core = { workspace = true, default-features = true } cumulus-relay-chain-interface = { workspace = true, default-features = true } +async-trait = { workspace = true } [dev-dependencies] -assert_matches = { workspace = true } -cumulus-test-client = { workspace = true } -portpicker = { workspace = true } rstest = { workspace = true } -sc-utils = { workspace = true, default-features = true } +tokio = { features = ["macros"], workspace = true, default-features = true } +portpicker = { workspace = true } sp-blockchain = { workspace = true, default-features = true } +cumulus-test-client = { workspace = true } +sc-utils = { workspace = true, default-features = true } sp-tracing = { workspace = true, default-features = true } -tokio = { features = ["macros"], workspace = true, default-features = true } +assert_matches = { workspace = true } # Cumulus cumulus-test-service = { workspace = true } diff --git a/cumulus/client/relay-chain-inprocess-interface/Cargo.toml b/cumulus/client/relay-chain-inprocess-interface/Cargo.toml index 2a590bbca562..6f1b74191be7 100644 --- a/cumulus/client/relay-chain-inprocess-interface/Cargo.toml +++ b/cumulus/client/relay-chain-inprocess-interface/Cargo.toml @@ -5,8 +5,6 @@ version = "0.7.0" edition.workspace = true description = "Implementation of the RelayChainInterface trait for Polkadot full-nodes." license = "GPL-3.0-or-later WITH Classpath-exception-2.0" -homepage.workspace = true -repository.workspace = true [lints] workspace = true @@ -19,9 +17,9 @@ futures-timer = { workspace = true } # Substrate sc-cli = { workspace = true, default-features = true } sc-client-api = { workspace = true, default-features = true } -sc-sysinfo = { workspace = true, default-features = true } sc-telemetry = { workspace = true, default-features = true } sc-tracing = { workspace = true, default-features = true } +sc-sysinfo = { workspace = true, default-features = true } sp-api = { workspace = true, default-features = true } sp-consensus = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } @@ -42,9 +40,9 @@ cumulus-relay-chain-interface = { workspace = true, default-features = true } sp-keyring = { workspace = true, default-features = true } # Polkadot -metered = { features = ["futures_channel"], workspace = true } polkadot-primitives = { workspace = true, default-features = true } polkadot-test-client = { workspace = true } +metered = { features = ["futures_channel"], workspace = true } # Cumulus cumulus-test-service = { workspace = true } diff --git a/cumulus/client/relay-chain-interface/Cargo.toml b/cumulus/client/relay-chain-interface/Cargo.toml index 659d3b0f5b27..a496fab050dd 100644 --- a/cumulus/client/relay-chain-interface/Cargo.toml +++ b/cumulus/client/relay-chain-interface/Cargo.toml @@ -5,8 +5,6 @@ version = "0.7.0" edition.workspace = true description = "Common interface for different relay chain datasources." license = "GPL-3.0-or-later WITH Classpath-exception-2.0" -homepage.workspace = true -repository.workspace = true [lints] workspace = true @@ -16,14 +14,14 @@ polkadot-overseer = { workspace = true, default-features = true } cumulus-primitives-core = { workspace = true, default-features = true } -sc-client-api = { workspace = true, default-features = true } sp-api = { workspace = true, default-features = true } sp-blockchain = { workspace = true, default-features = true } sp-state-machine = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } sp-version = { workspace = true } -async-trait = { workspace = true } -codec = { workspace = true, default-features = true } futures = { workspace = true } -jsonrpsee-core = { workspace = true } +async-trait = { workspace = true } thiserror = { workspace = true } +jsonrpsee-core = { workspace = true } +codec = { workspace = true, default-features = true } diff --git a/cumulus/client/relay-chain-minimal-node/Cargo.toml b/cumulus/client/relay-chain-minimal-node/Cargo.toml index 5b1e30cea9ba..95ecadc8bd06 100644 --- a/cumulus/client/relay-chain-minimal-node/Cargo.toml +++ b/cumulus/client/relay-chain-minimal-node/Cargo.toml @@ -5,45 +5,43 @@ version = "0.7.0" edition.workspace = true description = "Minimal node implementation to be used in tandem with RPC or light-client mode." license = "GPL-3.0-or-later WITH Classpath-exception-2.0" -homepage.workspace = true -repository.workspace = true [lints] workspace = true [dependencies] # polkadot deps +polkadot-primitives = { workspace = true, default-features = true } polkadot-core-primitives = { workspace = true, default-features = true } -polkadot-node-network-protocol = { workspace = true, default-features = true } -polkadot-node-subsystem-util = { workspace = true, default-features = true } polkadot-overseer = { workspace = true, default-features = true } -polkadot-primitives = { workspace = true, default-features = true } +polkadot-node-subsystem-util = { workspace = true, default-features = true } +polkadot-node-network-protocol = { workspace = true, default-features = true } polkadot-network-bridge = { workspace = true, default-features = true } polkadot-service = { workspace = true, default-features = true } # substrate deps -prometheus-endpoint = { workspace = true, default-features = true } sc-authority-discovery = { workspace = true, default-features = true } -sc-client-api = { workspace = true, default-features = true } sc-network = { workspace = true, default-features = true } sc-network-common = { workspace = true, default-features = true } sc-service = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +prometheus-endpoint = { workspace = true, default-features = true } sc-tracing = { workspace = true, default-features = true } sc-utils = { workspace = true, default-features = true } sp-api = { workspace = true, default-features = true } -sp-blockchain = { workspace = true, default-features = true } -sp-consensus = { workspace = true, default-features = true } sp-consensus-babe = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } tokio = { features = ["macros"], workspace = true, default-features = true } # cumulus deps -cumulus-primitives-core = { workspace = true, default-features = true } cumulus-relay-chain-interface = { workspace = true, default-features = true } cumulus-relay-chain-rpc-interface = { workspace = true, default-features = true } +cumulus-primitives-core = { workspace = true, default-features = true } array-bytes = { workspace = true, default-features = true } +tracing = { workspace = true, default-features = true } async-trait = { workspace = true } futures = { workspace = true } -tracing = { workspace = true, default-features = true } diff --git a/cumulus/client/relay-chain-minimal-node/src/lib.rs b/cumulus/client/relay-chain-minimal-node/src/lib.rs index f70a73a5d5ce..a3d858ea40c9 100644 --- a/cumulus/client/relay-chain-minimal-node/src/lib.rs +++ b/cumulus/client/relay-chain-minimal-node/src/lib.rs @@ -224,7 +224,7 @@ async fn new_minimal_relay_chain( + let (network, network_starter, sync_service) = build_collator_network::( &config, net_config, task_manager.spawn_handle(), @@ -262,6 +262,8 @@ async fn new_minimal_relay_chain>( genesis_hash: Hash, best_header: Header, notification_metrics: NotificationMetrics, -) -> Result<(Arc, Arc), Error> { +) -> Result< + (Arc, NetworkStarter, Arc), + Error, +> { let protocol_id = config.protocol_id(); let (block_announce_config, _notification_service) = get_block_announce_proto_config::( protocol_id.clone(), @@ -82,6 +85,8 @@ pub(crate) fn build_collator_network>( let network_worker = Network::new(network_params)?; let network_service = network_worker.network_service(); + let (network_start_tx, network_start_rx) = futures::channel::oneshot::channel(); + // The network worker is responsible for gathering all network messages and processing // them. This is quite a heavy task, and at the time of the writing of this comment it // frequently happens that this future takes several seconds or in some situations @@ -89,9 +94,22 @@ pub(crate) fn build_collator_network>( // issue, and ideally we would like to fix the network future to take as little time as // possible, but we also take the extra harm-prevention measure to execute the networking // future using `spawn_blocking`. - spawn_handle.spawn_blocking("network-worker", Some("networking"), network_worker.run()); + spawn_handle.spawn_blocking("network-worker", Some("networking"), async move { + if network_start_rx.await.is_err() { + tracing::warn!( + "The NetworkStart returned as part of `build_network` has been silently dropped" + ); + // This `return` might seem unnecessary, but we don't want to make it look like + // everything is working as normal even though the user is clearly misusing the API. + return + } + + network_worker.run().await; + }); + + let network_starter = NetworkStarter::new(network_start_tx); - Ok((network_service, Arc::new(SyncOracle {}))) + Ok((network_service, network_starter, Arc::new(SyncOracle {}))) } fn adjust_network_config_light_in_peers(config: &mut NetworkConfiguration) { diff --git a/cumulus/client/relay-chain-rpc-interface/Cargo.toml b/cumulus/client/relay-chain-rpc-interface/Cargo.toml index 50b438e34237..fb4cb4ceed4e 100644 --- a/cumulus/client/relay-chain-rpc-interface/Cargo.toml +++ b/cumulus/client/relay-chain-rpc-interface/Cargo.toml @@ -5,8 +5,6 @@ version = "0.7.0" edition.workspace = true description = "Implementation of the RelayChainInterface trait that connects to a remote RPC-node." license = "GPL-3.0-or-later WITH Classpath-exception-2.0" -homepage.workspace = true -repository.workspace = true [lints] workspace = true @@ -20,36 +18,36 @@ polkadot-overseer = { workspace = true, default-features = true } cumulus-primitives-core = { workspace = true, default-features = true } cumulus-relay-chain-interface = { workspace = true, default-features = true } -prometheus-endpoint = { workspace = true, default-features = true } -sc-client-api = { workspace = true, default-features = true } -sc-rpc-api = { workspace = true, default-features = true } -sc-service = { workspace = true, default-features = true } sp-api = { workspace = true, default-features = true } -sp-authority-discovery = { workspace = true, default-features = true } -sp-consensus-babe = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } -sp-runtime = { workspace = true, default-features = true } +sp-consensus-babe = { workspace = true, default-features = true } +sp-authority-discovery = { workspace = true, default-features = true } sp-state-machine = { workspace = true, default-features = true } sp-storage = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } sp-version = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sc-rpc-api = { workspace = true, default-features = true } +sc-service = { workspace = true, default-features = true } +prometheus-endpoint = { workspace = true, default-features = true } tokio = { features = ["sync"], workspace = true, default-features = true } tokio-util = { features = ["compat"], workspace = true } -async-trait = { workspace = true } -codec = { workspace = true, default-features = true } -either = { workspace = true, default-features = true } futures = { workspace = true } futures-timer = { workspace = true } +codec = { workspace = true, default-features = true } jsonrpsee = { features = ["ws-client"], workspace = true } -pin-project = { workspace = true } -prometheus = { workspace = true } -rand = { workspace = true, default-features = true } -schnellru = { workspace = true } -serde = { workspace = true, default-features = true } +tracing = { workspace = true, default-features = true } +async-trait = { workspace = true } +url = { workspace = true } serde_json = { workspace = true, default-features = true } +serde = { workspace = true, default-features = true } +schnellru = { workspace = true } smoldot = { default_features = false, features = ["std"], workspace = true } smoldot-light = { default_features = false, features = ["std"], workspace = true } +either = { workspace = true, default-features = true } thiserror = { workspace = true } -tracing = { workspace = true, default-features = true } -url = { workspace = true } +rand = { workspace = true, default-features = true } +pin-project = { workspace = true } +prometheus = { workspace = true } diff --git a/cumulus/client/service/Cargo.toml b/cumulus/client/service/Cargo.toml index c88386b985a4..8e9e41ca89dc 100644 --- a/cumulus/client/service/Cargo.toml +++ b/cumulus/client/service/Cargo.toml @@ -5,35 +5,32 @@ authors.workspace = true edition.workspace = true description = "Common functions used to assemble the components of a parachain node." license = "GPL-3.0-or-later WITH Classpath-exception-2.0" -homepage.workspace = true -repository.workspace = true [lints] workspace = true [dependencies] futures = { workspace = true } -futures-timer = { workspace = true } # Substrate sc-client-api = { workspace = true, default-features = true } sc-consensus = { workspace = true, default-features = true } -sc-network = { workspace = true, default-features = true } -sc-network-sync = { workspace = true, default-features = true } -sc-network-transactions = { workspace = true, default-features = true } +sc-transaction-pool = { workspace = true, default-features = true } sc-rpc = { workspace = true, default-features = true } sc-service = { workspace = true, default-features = true } sc-sysinfo = { workspace = true, default-features = true } sc-telemetry = { workspace = true, default-features = true } -sc-transaction-pool = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } +sc-network-sync = { workspace = true, default-features = true } sc-utils = { workspace = true, default-features = true } +sc-network-transactions = { workspace = true, default-features = true } sp-api = { workspace = true, default-features = true } sp-blockchain = { workspace = true, default-features = true } sp-consensus = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } -sp-io = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } sp-transaction-pool = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } # Polkadot polkadot-primitives = { workspace = true, default-features = true } @@ -42,10 +39,10 @@ polkadot-primitives = { workspace = true, default-features = true } cumulus-client-cli = { workspace = true, default-features = true } cumulus-client-collator = { workspace = true, default-features = true } cumulus-client-consensus-common = { workspace = true, default-features = true } -cumulus-client-network = { workspace = true, default-features = true } cumulus-client-pov-recovery = { workspace = true, default-features = true } +cumulus-client-network = { workspace = true, default-features = true } cumulus-primitives-core = { workspace = true, default-features = true } cumulus-primitives-proof-size-hostfunction = { workspace = true, default-features = true } -cumulus-relay-chain-inprocess-interface = { workspace = true, default-features = true } cumulus-relay-chain-interface = { workspace = true, default-features = true } +cumulus-relay-chain-inprocess-interface = { workspace = true, default-features = true } cumulus-relay-chain-minimal-node = { workspace = true, default-features = true } diff --git a/cumulus/client/service/src/lib.rs b/cumulus/client/service/src/lib.rs index 912109c2ad32..ae83f2ade3f6 100644 --- a/cumulus/client/service/src/lib.rs +++ b/cumulus/client/service/src/lib.rs @@ -40,7 +40,7 @@ use sc_consensus::{ use sc_network::{config::SyncMode, service::traits::NetworkService, NetworkBackend}; use sc_network_sync::SyncingService; use sc_network_transactions::TransactionsHandlerController; -use sc_service::{Configuration, SpawnTaskHandle, TaskManager, WarpSyncConfig}; +use sc_service::{Configuration, NetworkStarter, SpawnTaskHandle, TaskManager, WarpSyncConfig}; use sc_telemetry::{log, TelemetryWorkerHandle}; use sc_utils::mpsc::TracingUnboundedSender; use sp_api::ProvideRuntimeApi; @@ -439,6 +439,7 @@ pub async fn build_network<'a, Block, Client, RCInterface, IQ, Network>( Arc, TracingUnboundedSender>, TransactionsHandlerController, + NetworkStarter, Arc>, )> where diff --git a/cumulus/docs/release.md b/cumulus/docs/release.md new file mode 100644 index 000000000000..8302b7b9b7fc --- /dev/null +++ b/cumulus/docs/release.md @@ -0,0 +1,135 @@ +# Releases + +## Versioning + +### Example #1 + +``` +| Polkadot | v 0. 9.22 | +| Client | v 0. 9.22 0 | +| Runtime | v 9 22 0 | => 9220 +| semver | 0. 9.22 0 | +``` + +### Example #2 + +``` +| Polkadot | v 0.10.42 | +| Client | v 0.10.42 0 | +| Runtime | v 10.42 0 | => 10420 +| semver | 0.10.42 0 | +``` + +### Example #3 + +``` +| Polkadot | v 1. 2.18 | +| Client | v 1. 2.18 0 | +| Runtime | v 1 2 18 0 | => 102180 +| semver | 1. 2.18 0 | +``` + + +This document contains information related to the releasing process and describes a few of the steps and checks that are +performed during the release process. + +## Client + +### Burn In + +Ensure that Parity DevOps has run the new release on Westend and Kusama Asset Hub collators for 12h prior to publishing +the release. + +### Build Artifacts + +Add any necessary assets to the release. They should include: + +- Linux binaries + - GPG signature + - SHA256 checksum +- WASM binaries of the runtimes +- Source code + + +## Runtimes + +### Spec Version + +A new runtime release must bump the `spec_version`. This may follow a pattern with the client release (e.g. runtime +v9220 corresponds to v0.9.22). + +### Runtime version bump between RCs + +The clients need to be aware of runtime changes. However, we do not want to bump the `spec_version` for every single +release candidate. Instead, we can bump the `impl` field of the version to signal the change to the client. This applies +only to runtimes that have been deployed. + +### Old Migrations Removed + +Previous `on_runtime_upgrade` functions from old upgrades should be removed. + +### New Migrations + +Ensure that any migrations that are required due to storage or logic changes are included in the `on_runtime_upgrade` +function of the appropriate pallets. + +### Extrinsic Ordering & Storage + +Offline signing libraries depend on a consistent ordering of call indices and functions. Compare the metadata of the +current and new runtimes and ensure that the `module index, call index` tuples map to the same set of functions. It also +checks if there have been any changes in `storage`. In case of a breaking change, increase `transaction_version`. + +To verify the order has not changed, manually start the following +[Github Action](https://github.com/paritytech/polkadot-sdk/cumulus/.github/workflows/release-20_extrinsic-ordering-check-from-bin.yml). +It takes around a minute to run and will produce the report as artifact you need to manually check. + +To run it, in the _Run Workflow_ dropdown: +1. **Use workflow from**: to ignore, leave `master` as default +2. **The WebSocket url of the reference node**: - Asset Hub Polkadot: `wss://statemint-rpc.polkadot.io` + - Asset Hub Kusama: `wss://statemine-rpc.polkadot.io` + - Asset Hub Westend: `wss://westmint-rpc.polkadot.io` +3. **A url to a Linux binary for the node containing the runtime to test**: Paste the URL of the latest + release-candidate binary from the draft-release on Github. The binary has to previously be uploaded to S3 (Github url + link to the binary is constantly changing) + - E.g: https://releases.parity.io/cumulus/v0.9.270-rc3/polkadot-parachain +4. **The name of the chain under test. Usually, you would pass a local chain**: - Asset Hub Polkadot: + `asset-hub-polkadot-local` + - Asset Hub Kusama: `asset-hub-kusama-local` + - Asset Hub Westend: `asset-hub-westend-local` +5. Click **Run workflow** + +When the workflow is done, click on it and download the zip artifact, inside you'll find an `output.txt` file. The +things to look for in the output are lines like: + +- `[Identity] idx 28 -> 25 (calls 15)` - indicates the index for Identity has changed +- `[+] Society, Recovery` - indicates the new version includes 2 additional modules/pallets. +- If no indices have changed, every modules line should look something like `[Identity] idx 25 (calls 15)` + +**Note**: Adding new functions to the runtime does not constitute a breaking change as long as the indexes did not +change. + +**Note**: Extrinsic function signatures changes (adding/removing & ordering arguments) are not caught by the job, so +those changes should be reviewed "manually" + +### Benchmarks + +The Benchmarks can now be started from the CI. First find the CI pipeline from +[here](https://gitlab.parity.io/parity/mirrors/cumulus/-/pipelines?page=1&scope=all&ref=release-parachains-v9220) and +pick the latest. [Guide](https://github.com/paritytech/ci_cd/wiki/Benchmarks:-cumulus) + +### Integration Tests + +Until https://github.com/paritytech/ci_cd/issues/499 is done, tests will have to be run manually. +1. Go to https://github.com/paritytech/parachains-integration-tests and check out the release branch. E.g. +https://github.com/paritytech/parachains-integration-tests/tree/release-v9270-v0.9.27 for `release-parachains-v0.9.270` +2. Clone `release-parachains-` branch from Cumulus +3. `cargo build --release` +4. Copy `./target/polkadot-parachain` to `./bin` +5. Clone `it/release--fast-sudo` from Polkadot In case the branch does not exists (it is a manual process): + cherry pick `paritytech/polkadot@791c8b8` and run: + `find . -type f -name "*.toml" -print0 | xargs -0 sed -i '' -e 's/polkadot-vX.X.X/polkadot-v/g'` +6. `cargo build --release --features fast-runtime` +7. Copy `./target/polkadot` into `./bin` (in Cumulus) +8. Run the tests: + - Asset Hub Polkadot: `yarn zombienet-test -c ./examples/statemint/config.toml -t ./examples/statemint` + - Asset Hub Kusama: `yarn zombienet-test -c ./examples/statemine/config.toml -t ./examples/statemine` diff --git a/cumulus/pallets/aura-ext/Cargo.toml b/cumulus/pallets/aura-ext/Cargo.toml index fcda79f1d5c1..c08148928b7c 100644 --- a/cumulus/pallets/aura-ext/Cargo.toml +++ b/cumulus/pallets/aura-ext/Cargo.toml @@ -5,8 +5,6 @@ authors.workspace = true edition.workspace = true description = "AURA consensus extension pallet for parachains" license = "Apache-2.0" -homepage.workspace = true -repository.workspace = true [lints] workspace = true diff --git a/cumulus/pallets/collator-selection/Cargo.toml b/cumulus/pallets/collator-selection/Cargo.toml index 651cceebbc6e..8d67db3daf8b 100644 --- a/cumulus/pallets/collator-selection/Cargo.toml +++ b/cumulus/pallets/collator-selection/Cargo.toml @@ -16,29 +16,29 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { features = ["derive"], workspace = true } log = { workspace = true } +codec = { features = ["derive"], workspace = true } rand = { features = ["std_rng"], workspace = true } scale-info = { features = ["derive"], workspace = true } +sp-runtime = { workspace = true } +sp-staking = { workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } pallet-authorship = { workspace = true } pallet-balances = { workspace = true } pallet-session = { workspace = true } -sp-runtime = { workspace = true } -sp-staking = { workspace = true } frame-benchmarking = { optional = true, workspace = true } [dev-dependencies] -pallet-aura = { workspace = true, default-features = true } -pallet-timestamp = { workspace = true, default-features = true } -sp-consensus-aura = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } sp-io = { workspace = true, default-features = true } -sp-runtime = { workspace = true, default-features = true } sp-tracing = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +pallet-timestamp = { workspace = true, default-features = true } +sp-consensus-aura = { workspace = true, default-features = true } +pallet-aura = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/cumulus/pallets/dmp-queue/Cargo.toml b/cumulus/pallets/dmp-queue/Cargo.toml index 4f5bbc97bfc2..936526290d93 100644 --- a/cumulus/pallets/dmp-queue/Cargo.toml +++ b/cumulus/pallets/dmp-queue/Cargo.toml @@ -21,8 +21,8 @@ scale-info = { features = ["derive"], workspace = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } -sp-io = { workspace = true } sp-runtime = { workspace = true } +sp-io = { workspace = true } # Polkadot xcm = { workspace = true } @@ -56,7 +56,6 @@ runtime-benchmarks = [ "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", "sp-runtime/runtime-benchmarks", - "xcm/runtime-benchmarks", ] try-runtime = [ diff --git a/cumulus/pallets/parachain-system/Cargo.toml b/cumulus/pallets/parachain-system/Cargo.toml index 6b6bc4fbcefe..3cb0394c4b95 100644 --- a/cumulus/pallets/parachain-system/Cargo.toml +++ b/cumulus/pallets/parachain-system/Cargo.toml @@ -5,8 +5,6 @@ authors.workspace = true edition.workspace = true description = "Base pallet for cumulus-based parachains" license = "Apache-2.0" -homepage.workspace = true -repository.workspace = true [lints] workspace = true @@ -17,8 +15,8 @@ codec = { features = ["derive"], workspace = true } environmental = { workspace = true } impl-trait-for-tuples = { workspace = true } log = { workspace = true } -scale-info = { features = ["derive"], workspace = true } trie-db = { workspace = true } +scale-info = { features = ["derive"], workspace = true } # Substrate frame-benchmarking = { optional = true, workspace = true } @@ -38,6 +36,7 @@ sp-version = { workspace = true } # Polkadot polkadot-parachain-primitives = { features = ["wasm-api"], workspace = true } polkadot-runtime-parachains = { workspace = true } +polkadot-runtime-common = { optional = true, workspace = true } xcm = { workspace = true } xcm-builder = { workspace = true } @@ -49,18 +48,18 @@ cumulus-primitives-proof-size-hostfunction = { workspace = true } [dev-dependencies] assert_matches = { workspace = true } -futures = { workspace = true } hex-literal = { workspace = true, default-features = true } -rand = { workspace = true, default-features = true } trie-standardmap = { workspace = true } +rand = { workspace = true, default-features = true } +futures = { workspace = true } # Substrate sc-client-api = { workspace = true, default-features = true } -sp-consensus-slots = { workspace = true, default-features = true } -sp-crypto-hashing = { workspace = true, default-features = true } sp-keyring = { workspace = true, default-features = true } +sp-crypto-hashing = { workspace = true, default-features = true } sp-tracing = { workspace = true, default-features = true } sp-version = { workspace = true, default-features = true } +sp-consensus-slots = { workspace = true, default-features = true } # Cumulus cumulus-test-client = { workspace = true } @@ -83,6 +82,7 @@ std = [ "log/std", "pallet-message-queue/std", "polkadot-parachain-primitives/std", + "polkadot-runtime-common/std", "polkadot-runtime-parachains/std", "scale-info/std", "sp-core/std", @@ -107,16 +107,17 @@ runtime-benchmarks = [ "frame-system/runtime-benchmarks", "pallet-message-queue/runtime-benchmarks", "polkadot-parachain-primitives/runtime-benchmarks", + "polkadot-runtime-common/runtime-benchmarks", "polkadot-runtime-parachains/runtime-benchmarks", "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", - "xcm/runtime-benchmarks", ] try-runtime = [ "frame-support/try-runtime", "frame-system/try-runtime", "pallet-message-queue/try-runtime", + "polkadot-runtime-common?/try-runtime", "polkadot-runtime-parachains/try-runtime", "sp-runtime/try-runtime", ] diff --git a/cumulus/pallets/parachain-system/proc-macro/Cargo.toml b/cumulus/pallets/parachain-system/proc-macro/Cargo.toml index d4485a400cb8..da6f0fd03efb 100644 --- a/cumulus/pallets/parachain-system/proc-macro/Cargo.toml +++ b/cumulus/pallets/parachain-system/proc-macro/Cargo.toml @@ -5,8 +5,6 @@ authors.workspace = true edition.workspace = true description = "Proc macros provided by the parachain-system pallet" license = "Apache-2.0" -homepage.workspace = true -repository.workspace = true [lints] workspace = true @@ -15,10 +13,10 @@ workspace = true proc-macro = true [dependencies] -proc-macro-crate = { workspace = true } +syn = { workspace = true } proc-macro2 = { workspace = true } quote = { workspace = true } -syn = { workspace = true } +proc-macro-crate = { workspace = true } [features] default = ["std"] diff --git a/cumulus/pallets/parachain-system/src/lib.rs b/cumulus/pallets/parachain-system/src/lib.rs index 0fa759357f65..39fc8321a072 100644 --- a/cumulus/pallets/parachain-system/src/lib.rs +++ b/cumulus/pallets/parachain-system/src/lib.rs @@ -1636,7 +1636,7 @@ impl InspectMessageQueues for Pallet { } #[cfg(feature = "runtime-benchmarks")] -impl polkadot_runtime_parachains::EnsureForParachain for Pallet { +impl polkadot_runtime_common::xcm_sender::EnsureForParachain for Pallet { fn ensure(para_id: ParaId) { if let ChannelStatus::Closed = Self::get_channel_status(para_id) { Self::open_outbound_hrmp_channel_for_benchmarks_or_tests(para_id) diff --git a/cumulus/pallets/parachain-system/src/validate_block/trie_cache.rs b/cumulus/pallets/parachain-system/src/validate_block/trie_cache.rs index 36efd3decf77..035541fb17b1 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/trie_cache.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/trie_cache.rs @@ -85,10 +85,7 @@ impl CacheProvider { } impl TrieCacheProvider for CacheProvider { - type Cache<'a> - = TrieCache<'a, H> - where - H: 'a; + type Cache<'a> = TrieCache<'a, H> where H: 'a; fn as_trie_db_cache(&self, storage_root: ::Out) -> Self::Cache<'_> { TrieCache { diff --git a/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs b/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs index 8dc2f20dd390..4a478d047f1b 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs @@ -115,10 +115,7 @@ impl SizeOnlyRecorderProvider { } impl sp_trie::TrieRecorderProvider for SizeOnlyRecorderProvider { - type Recorder<'a> - = SizeOnlyRecorder<'a, H> - where - H: 'a; + type Recorder<'a> = SizeOnlyRecorder<'a, H> where H: 'a; fn drain_storage_proof(self) -> Option { None diff --git a/cumulus/pallets/session-benchmarking/Cargo.toml b/cumulus/pallets/session-benchmarking/Cargo.toml index 6d77e567c9b6..5af94434e0af 100644 --- a/cumulus/pallets/session-benchmarking/Cargo.toml +++ b/cumulus/pallets/session-benchmarking/Cargo.toml @@ -17,11 +17,11 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { workspace = true } -frame-benchmarking = { optional = true, workspace = true } +sp-runtime = { workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } +frame-benchmarking = { optional = true, workspace = true } pallet-session = { workspace = true } -sp-runtime = { workspace = true } [features] default = ["std"] diff --git a/cumulus/pallets/solo-to-para/Cargo.toml b/cumulus/pallets/solo-to-para/Cargo.toml index 2088361bf11a..5fd1939e93a0 100644 --- a/cumulus/pallets/solo-to-para/Cargo.toml +++ b/cumulus/pallets/solo-to-para/Cargo.toml @@ -5,8 +5,6 @@ authors.workspace = true edition.workspace = true description = "Adds functionality to migrate from a Solo to a Parachain" license = "Apache-2.0" -homepage.workspace = true -repository.workspace = true [lints] workspace = true diff --git a/cumulus/pallets/xcm/Cargo.toml b/cumulus/pallets/xcm/Cargo.toml index 25938763c956..35d7a083b061 100644 --- a/cumulus/pallets/xcm/Cargo.toml +++ b/cumulus/pallets/xcm/Cargo.toml @@ -5,8 +5,6 @@ name = "cumulus-pallet-xcm" version = "0.7.0" license = "Apache-2.0" description = "Pallet for stuff specific to parachains' usage of XCM" -homepage.workspace = true -repository.workspace = true [lints] workspace = true @@ -15,10 +13,10 @@ workspace = true codec = { features = ["derive"], workspace = true } scale-info = { features = ["derive"], workspace = true } -frame-support = { workspace = true } -frame-system = { workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } xcm = { workspace = true } diff --git a/cumulus/pallets/xcmp-queue/Cargo.toml b/cumulus/pallets/xcmp-queue/Cargo.toml index 43dfae8927d2..9c7470eda6da 100644 --- a/cumulus/pallets/xcmp-queue/Cargo.toml +++ b/cumulus/pallets/xcmp-queue/Cargo.toml @@ -5,8 +5,6 @@ authors.workspace = true edition.workspace = true description = "Pallet to queue outbound and inbound XCMP messages." license = "Apache-2.0" -homepage.workspace = true -repository.workspace = true [lints] workspace = true @@ -19,24 +17,24 @@ scale-info = { features = ["derive"], workspace = true } # Substrate frame-support = { workspace = true } frame-system = { workspace = true } -pallet-message-queue = { workspace = true } -sp-core = { workspace = true } sp-io = { workspace = true } +sp-core = { workspace = true } sp-runtime = { workspace = true } +pallet-message-queue = { workspace = true } # Polkadot polkadot-runtime-common = { workspace = true } polkadot-runtime-parachains = { workspace = true } xcm = { workspace = true } -xcm-builder = { workspace = true } xcm-executor = { workspace = true } +xcm-builder = { workspace = true } # Cumulus cumulus-primitives-core = { workspace = true } # Optional import for benchmarking -bounded-collections = { workspace = true } frame-benchmarking = { optional = true, workspace = true } +bounded-collections = { workspace = true } # Bridges bp-xcm-bridge-hub-router = { optional = true, workspace = true } @@ -44,9 +42,9 @@ bp-xcm-bridge-hub-router = { optional = true, workspace = true } [dev-dependencies] # Substrate -frame-support = { features = ["experimental"], workspace = true, default-features = true } -pallet-balances = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } +pallet-balances = { workspace = true, default-features = true } +frame-support = { features = ["experimental"], workspace = true, default-features = true } # Cumulus cumulus-pallet-parachain-system = { workspace = true, default-features = true } @@ -87,7 +85,6 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", - "xcm/runtime-benchmarks", ] try-runtime = [ "cumulus-pallet-parachain-system/try-runtime", diff --git a/cumulus/parachains/chain-specs/asset-hub-kusama.json b/cumulus/parachains/chain-specs/asset-hub-kusama.json index ae4409e4f44f..58b8ac019227 100644 --- a/cumulus/parachains/chain-specs/asset-hub-kusama.json +++ b/cumulus/parachains/chain-specs/asset-hub-kusama.json @@ -28,8 +28,7 @@ "/dns/mine14.rotko.net/tcp/35524/wss/p2p/12D3KooWJUFnjR2PNbsJhudwPVaWCoZy1acPGKjM2cSuGj345BBu", "/dns/asset-hub-kusama.bootnodes.polkadotters.com/tcp/30511/p2p/12D3KooWDpk7wVH7RgjErEvbvAZ2kY5VeaAwRJP5ojmn1e8b8UbU", "/dns/asset-hub-kusama.bootnodes.polkadotters.com/tcp/30513/wss/p2p/12D3KooWDpk7wVH7RgjErEvbvAZ2kY5VeaAwRJP5ojmn1e8b8UbU", - "/dns/boot-kusama-assethub.luckyfriday.io/tcp/443/wss/p2p/12D3KooWSwaeFs6FNgpgh54fdoxSDAA4nJNaPE3PAcse2GRrG7b3", - "/dns/asset-hub-kusama-01.bootnode.stkd.io/tcp/30633/wss/p2p/12D3KooWNCg821LyWDVrAJ2mG6ScDeeBFuDPiJtLYc9jCGNCyMoq" + "/dns/boot-kusama-assethub.luckyfriday.io/tcp/443/wss/p2p/12D3KooWSwaeFs6FNgpgh54fdoxSDAA4nJNaPE3PAcse2GRrG7b3" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/asset-hub-polkadot.json b/cumulus/parachains/chain-specs/asset-hub-polkadot.json index 62efb924c171..3e46501b0078 100644 --- a/cumulus/parachains/chain-specs/asset-hub-polkadot.json +++ b/cumulus/parachains/chain-specs/asset-hub-polkadot.json @@ -28,8 +28,7 @@ "/dns/mint14.rotko.net/tcp/35514/wss/p2p/12D3KooWKkzLjYF6M5eEs7nYiqEtRqY8SGVouoCwo3nCWsRnThDW", "/dns/asset-hub-polkadot.bootnodes.polkadotters.com/tcp/30508/p2p/12D3KooWKbfY9a9oywxMJKiALmt7yhrdQkjXMtvxhhDDN23vG93R", "/dns/asset-hub-polkadot.bootnodes.polkadotters.com/tcp/30510/wss/p2p/12D3KooWKbfY9a9oywxMJKiALmt7yhrdQkjXMtvxhhDDN23vG93R", - "/dns/boot-polkadot-assethub.luckyfriday.io/tcp/443/wss/p2p/12D3KooWDR9M7CjV1xdjCRbRwkFn1E7sjMaL4oYxGyDWxuLrFc2J", - "/dns/asset-hub-polkadot-01.bootnode.stkd.io/tcp/30633/wss/p2p/12D3KooWJUhizuk3crSvpyKLGycHBtnP93rwjksVueveU6x6k6RY" + "/dns/boot-polkadot-assethub.luckyfriday.io/tcp/443/wss/p2p/12D3KooWDR9M7CjV1xdjCRbRwkFn1E7sjMaL4oYxGyDWxuLrFc2J" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/asset-hub-westend.json b/cumulus/parachains/chain-specs/asset-hub-westend.json index 67a208c2787b..42717974a0b3 100644 --- a/cumulus/parachains/chain-specs/asset-hub-westend.json +++ b/cumulus/parachains/chain-specs/asset-hub-westend.json @@ -29,8 +29,7 @@ "/dns/wmint14.rotko.net/tcp/34534/ws/p2p/12D3KooWE4UDXqgtTcMCyUQ8S4uvaT8VMzzTBA6NWmKuYwTacWuN", "/dns/wmint14.rotko.net/tcp/35534/wss/p2p/12D3KooWE4UDXqgtTcMCyUQ8S4uvaT8VMzzTBA6NWmKuYwTacWuN", "/dns/asset-hub-westend.bootnodes.polkadotters.com/tcp/30514/p2p/12D3KooWNFYysCqmojxqjjaTfD2VkWBNngfyUKWjcR4WFixfHNTk", - "/dns/asset-hub-westend.bootnodes.polkadotters.com/tcp/30516/wss/p2p/12D3KooWNFYysCqmojxqjjaTfD2VkWBNngfyUKWjcR4WFixfHNTk", - "/dns/asset-hub-westend-01.bootnode.stkd.io/tcp/30633/wss/p2p/12D3KooWDUPyF2q8b6fVFEuwxBbRV3coAy1kzuCPU3D9TRiLnUfE" + "/dns/asset-hub-westend.bootnodes.polkadotters.com/tcp/30516/wss/p2p/12D3KooWNFYysCqmojxqjjaTfD2VkWBNngfyUKWjcR4WFixfHNTk" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/bridge-hub-kusama.json b/cumulus/parachains/chain-specs/bridge-hub-kusama.json index 83910965584f..36558b325bbf 100644 --- a/cumulus/parachains/chain-specs/bridge-hub-kusama.json +++ b/cumulus/parachains/chain-specs/bridge-hub-kusama.json @@ -28,8 +28,7 @@ "/dns/kbr13.rotko.net/tcp/35553/wss/p2p/12D3KooWAmBp54mUEYtvsk2kxNEsDbAvdUMcaghxKXgUQxmPEQ66", "/dns/bridge-hub-kusama.bootnodes.polkadotters.com/tcp/30520/p2p/12D3KooWH3pucezRRS5esoYyzZsUkKWcPSByQxEvmM819QL1HPLV", "/dns/bridge-hub-kusama.bootnodes.polkadotters.com/tcp/30522/wss/p2p/12D3KooWH3pucezRRS5esoYyzZsUkKWcPSByQxEvmM819QL1HPLV", - "/dns/boot-kusama-bridgehub.luckyfriday.io/tcp/443/wss/p2p/12D3KooWQybw6AFmAvrFfwUQnNxUpS12RovapD6oorh2mAJr4xyd", - "/dns/bridge-hub-kusama-01.bootnode.stkd.io/tcp/30633/wss/p2p/12D3KooWBE1ZhrYqMC3ECFK6qbufS9kgKuF57XpvvZU6LKsPUSnF" + "/dns/boot-kusama-bridgehub.luckyfriday.io/tcp/443/wss/p2p/12D3KooWQybw6AFmAvrFfwUQnNxUpS12RovapD6oorh2mAJr4xyd" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/bridge-hub-polkadot.json b/cumulus/parachains/chain-specs/bridge-hub-polkadot.json index 30585efaf4f1..eb22e09035f3 100644 --- a/cumulus/parachains/chain-specs/bridge-hub-polkadot.json +++ b/cumulus/parachains/chain-specs/bridge-hub-polkadot.json @@ -28,8 +28,7 @@ "/dns/bridge-hub-polkadot.bootnodes.polkadotters.com/tcp/30519/wss/p2p/12D3KooWLUNE3LHPDa1WrrZaYT7ArK66CLM1bPv7kKz74UcLnQRB", "/dns/boot-polkadot-bridgehub.luckyfriday.io/tcp/443/wss/p2p/12D3KooWKf3mBXHjLbwtPqv1BdbQuwbFNcQQYxASS7iQ25264AXH", "/dns/bridge-hub-polkadot.bootnode.amforc.com/tcp/29999/wss/p2p/12D3KooWGT5E56rAHfT5dY1pMLTrpAgV72yfDtD1Y5tPCHaTsifp", - "/dns/bridge-hub-polkadot.bootnode.amforc.com/tcp/30010/p2p/12D3KooWGT5E56rAHfT5dY1pMLTrpAgV72yfDtD1Y5tPCHaTsifp", - "/dns/bridge-hub-polkadot-01.bootnode.stkd.io/tcp/30633/wss/p2p/12D3KooWSBpo6fYU8CUr4fwA14CKSDUSj5jSgZzQDBNL1B8Dnmaw" + "/dns/bridge-hub-polkadot.bootnode.amforc.com/tcp/30010/p2p/12D3KooWGT5E56rAHfT5dY1pMLTrpAgV72yfDtD1Y5tPCHaTsifp" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/bridge-hub-westend.json b/cumulus/parachains/chain-specs/bridge-hub-westend.json index 05d679a3e23f..40c7c7460c23 100644 --- a/cumulus/parachains/chain-specs/bridge-hub-westend.json +++ b/cumulus/parachains/chain-specs/bridge-hub-westend.json @@ -29,8 +29,7 @@ "/dns/bridge-hub-westend.bootnodes.polkadotters.com/tcp/30523/p2p/12D3KooWPkwgJofp4GeeRwNgXqkp2aFwdLkCWv3qodpBJLwK43Jj", "/dns/bridge-hub-westend.bootnodes.polkadotters.com/tcp/30525/wss/p2p/12D3KooWPkwgJofp4GeeRwNgXqkp2aFwdLkCWv3qodpBJLwK43Jj", "/dns/bridge-hub-westend.bootnode.amforc.com/tcp/29999/wss/p2p/12D3KooWDSWod2gMtHxunXot538oEMw9p42pnPrpRELdsfYyT8R6", - "/dns/bridge-hub-westend.bootnode.amforc.com/tcp/30007/p2p/12D3KooWDSWod2gMtHxunXot538oEMw9p42pnPrpRELdsfYyT8R6", - "/dns/bridge-hub-westend-01.bootnode.stkd.io/tcp/30633/wss/p2p/12D3KooWJEfDZxrEKehoPbW2Mfg6rypttMXCMgMiybmapKqcByc1" + "/dns/bridge-hub-westend.bootnode.amforc.com/tcp/30007/p2p/12D3KooWDSWod2gMtHxunXot538oEMw9p42pnPrpRELdsfYyT8R6" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/collectives-polkadot.json b/cumulus/parachains/chain-specs/collectives-polkadot.json index 458530baf336..5ccccbec9053 100644 --- a/cumulus/parachains/chain-specs/collectives-polkadot.json +++ b/cumulus/parachains/chain-specs/collectives-polkadot.json @@ -27,8 +27,7 @@ "/dns/pch16.rotko.net/tcp/35576/wss/p2p/12D3KooWKrm3XmuGzJH17Wcn4HRDGsEjLZGDgN77q3ZhwnnQP7y1", "/dns/collectives-polkadot.bootnodes.polkadotters.com/tcp/30526/p2p/12D3KooWNohUjvJtGKUa8Vhy8C1ZBB5N8JATB6e7rdLVCioeb3ff", "/dns/collectives-polkadot.bootnodes.polkadotters.com/tcp/30528/wss/p2p/12D3KooWNohUjvJtGKUa8Vhy8C1ZBB5N8JATB6e7rdLVCioeb3ff", - "/dns/boot-polkadot-collectives.luckyfriday.io/tcp/443/wss/p2p/12D3KooWCzifnPooTt4kvTnXT7FTKTymVL7xn7DURQLsS2AKpf6w", - "/dns/collectives-polkadot-01.bootnode.stkd.io/tcp/30633/wss/p2p/12D3KooWNscpobBzjPEdjbbjjKRYh9j1whYJvagRJwb9UH68zCPC" + "/dns/boot-polkadot-collectives.luckyfriday.io/tcp/443/wss/p2p/12D3KooWCzifnPooTt4kvTnXT7FTKTymVL7xn7DURQLsS2AKpf6w" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/collectives-westend.json b/cumulus/parachains/chain-specs/collectives-westend.json index aa0204df1a06..f583eddcef1f 100644 --- a/cumulus/parachains/chain-specs/collectives-westend.json +++ b/cumulus/parachains/chain-specs/collectives-westend.json @@ -29,8 +29,7 @@ "/dns/wch13.rotko.net/tcp/34593/ws/p2p/12D3KooWPG85zhuSRoyptjLkFD4iJFistjiBmc15JgQ96B4fdXYr", "/dns/wch13.rotko.net/tcp/35593/wss/p2p/12D3KooWPG85zhuSRoyptjLkFD4iJFistjiBmc15JgQ96B4fdXYr", "/dns/collectives-westend.bootnodes.polkadotters.com/tcp/30529/p2p/12D3KooWAFkXNSBfyPduZVgfS7pj5NuVpbU8Ee5gHeF8wvos7Yqn", - "/dns/collectives-westend.bootnodes.polkadotters.com/tcp/30531/wss/p2p/12D3KooWAFkXNSBfyPduZVgfS7pj5NuVpbU8Ee5gHeF8wvos7Yqn", - "/dns/collectives-westend-01.bootnode.stkd.io/tcp/30633/wss/p2p/12D3KooWFH7UZnWESzuRSgrLvNSfALjtpr9PmG7QGyRNCizWEHcd" + "/dns/collectives-westend.bootnodes.polkadotters.com/tcp/30531/wss/p2p/12D3KooWAFkXNSBfyPduZVgfS7pj5NuVpbU8Ee5gHeF8wvos7Yqn" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/coretime-kusama.json b/cumulus/parachains/chain-specs/coretime-kusama.json index 8352588a1e4b..3e4ffae403bd 100644 --- a/cumulus/parachains/chain-specs/coretime-kusama.json +++ b/cumulus/parachains/chain-specs/coretime-kusama.json @@ -26,8 +26,7 @@ "/dns/coretime-kusama-bootnode.radiumblock.com/tcp/30333/p2p/12D3KooWFzW9AgxNfkVNCepVByS7URDCRDAA5p3XzBLVptqZvWoL", "/dns/coretime-kusama-bootnode.radiumblock.com/tcp/30336/wss/p2p/12D3KooWFzW9AgxNfkVNCepVByS7URDCRDAA5p3XzBLVptqZvWoL", "/dns/coretime-kusama.bootnode.amforc.com/tcp/29999/wss/p2p/12D3KooWPrgxrrumrANp6Bp2SMEwMQHPHDbPzA1HbcrakZrbFi5P", - "/dns/coretime-kusama.bootnode.amforc.com/tcp/30013/p2p/12D3KooWPrgxrrumrANp6Bp2SMEwMQHPHDbPzA1HbcrakZrbFi5P", - "/dns/coretime-kusama-01.bootnode.stkd.io/tcp/30633/wss/p2p/12D3KooWMPc6jEjzFLRCK7QgbcNh3gvxCzGvDKhU4F66QWf2kZmq" + "/dns/coretime-kusama.bootnode.amforc.com/tcp/30013/p2p/12D3KooWPrgxrrumrANp6Bp2SMEwMQHPHDbPzA1HbcrakZrbFi5P" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/coretime-polkadot.json b/cumulus/parachains/chain-specs/coretime-polkadot.json index 7c12ee155b41..e4f947d2afc9 100644 --- a/cumulus/parachains/chain-specs/coretime-polkadot.json +++ b/cumulus/parachains/chain-specs/coretime-polkadot.json @@ -12,8 +12,7 @@ "/dns/coretime-polkadot-boot-ng.dwellir.com/tcp/443/wss/p2p/12D3KooWGpmytHjdthrkKgkXDZyKm9ABtJ2PtGk9NStJDG4pChy9", "/dns/coretime-polkadot-boot-ng.dwellir.com/tcp/30361/p2p/12D3KooWGpmytHjdthrkKgkXDZyKm9ABtJ2PtGk9NStJDG4pChy9", "/dns/coretime-polkadot-bootnode.radiumblock.com/tcp/30333/p2p/12D3KooWFsQphSqvqjVyKcEdR1D7LPcXHqjmy6ASuJrTr5isk9JU", - "/dns/coretime-polkadot-bootnode.radiumblock.com/tcp/30336/wss/p2p/12D3KooWFsQphSqvqjVyKcEdR1D7LPcXHqjmy6ASuJrTr5isk9JU", - "/dns/coretime-polkadot-01.bootnode.stkd.io/tcp/30633/wss/p2p/12D3KooWFG9WQQTf3MX3YQypZjJtoJM5zCQgJcqYdxxTStsbhZGU" + "/dns/coretime-polkadot-bootnode.radiumblock.com/tcp/30336/wss/p2p/12D3KooWFsQphSqvqjVyKcEdR1D7LPcXHqjmy6ASuJrTr5isk9JU" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/coretime-westend.json b/cumulus/parachains/chain-specs/coretime-westend.json index de6923bd7669..42f67526c29a 100644 --- a/cumulus/parachains/chain-specs/coretime-westend.json +++ b/cumulus/parachains/chain-specs/coretime-westend.json @@ -30,8 +30,7 @@ "/dns/coretime-westend.bootnodes.polkadotters.com/tcp/30358/wss/p2p/12D3KooWDc9T2vQ8rHvX7hAt9eLWktD9Q89NDTcLm5STkuNbzUGf", "/dns/coretime-westend.bootnodes.polkadotters.com/tcp/30356/p2p/12D3KooWDc9T2vQ8rHvX7hAt9eLWktD9Q89NDTcLm5STkuNbzUGf", "/dns/coretime-westend.bootnode.amforc.com/tcp/29999/wss/p2p/12D3KooWG9a9H9An96E3kgXL1sirHta117iuacJXnJRaUywkMiSd", - "/dns/coretime-westend.bootnode.amforc.com/tcp/30013/p2p/12D3KooWG9a9H9An96E3kgXL1sirHta117iuacJXnJRaUywkMiSd", - "/dns/coretime-westend-01.bootnode.stkd.io/tcp/30633/wss/p2p/12D3KooWCFNzjaiq45ZpW2qStmQdG5w7ZHrmi3RWUeG8cV2pPc2Y" + "/dns/coretime-westend.bootnode.amforc.com/tcp/30013/p2p/12D3KooWG9a9H9An96E3kgXL1sirHta117iuacJXnJRaUywkMiSd" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/people-kusama.json b/cumulus/parachains/chain-specs/people-kusama.json index 701e6e7dc1ec..300b9fcfb183 100644 --- a/cumulus/parachains/chain-specs/people-kusama.json +++ b/cumulus/parachains/chain-specs/people-kusama.json @@ -28,8 +28,7 @@ "/dns/ibp-boot-kusama-people.luckyfriday.io/tcp/30342/p2p/12D3KooWM4bRafMH2StfBEQtyj5cMWfGLYbuikCZmvKv9m1MQVPn", "/dns/ibp-boot-kusama-people.luckyfriday.io/tcp/443/wss/p2p/12D3KooWM4bRafMH2StfBEQtyj5cMWfGLYbuikCZmvKv9m1MQVPn", "/dns4/people-kusama.boot.stake.plus/tcp/30332/wss/p2p/12D3KooWRuKr3ogzXwD8zE2CTWenGdy8vSfViAjYMwGiwvFCsz8n", - "/dns/people-kusama.boot.stake.plus/tcp/31332/wss/p2p/12D3KooWFkDKdFxBJFyj9zumuJ4Mmctec2GqdYHcKYq8MTVe8dxf", - "/dns/people-kusama-01.bootnode.stkd.io/tcp/30633/wss/p2p/12D3KooWN32MmhPgZN8e1Dmc8DzEUKsfC2hga3Lqekko4VWvrbhq" + "/dns/people-kusama.boot.stake.plus/tcp/31332/wss/p2p/12D3KooWFkDKdFxBJFyj9zumuJ4Mmctec2GqdYHcKYq8MTVe8dxf" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/people-polkadot.json b/cumulus/parachains/chain-specs/people-polkadot.json index ff8d57b9284d..083c0fbf44a4 100644 --- a/cumulus/parachains/chain-specs/people-polkadot.json +++ b/cumulus/parachains/chain-specs/people-polkadot.json @@ -8,8 +8,7 @@ "/dns/polkadot-people-connect-0.polkadot.io/tcp/443/wss/p2p/12D3KooWP7BoJ7nAF9QnsreN8Eft1yHNUhvhxFiQyKFEUePi9mu3", "/dns/polkadot-people-connect-1.polkadot.io/tcp/443/wss/p2p/12D3KooWSSfWY3fTGJvGkuNUNBSNVCdLLNJnwkZSNQt7GCRYXu4o", "/dns/people-polkadot-boot-ng.dwellir.com/tcp/443/wss/p2p/12D3KooWKMYu1L28TkDf1ooMW8D8PHcztLnjV3bausH9eiVTRUYN", - "/dns/people-polkadot-boot-ng.dwellir.com/tcp/30346/p2p/12D3KooWKMYu1L28TkDf1ooMW8D8PHcztLnjV3bausH9eiVTRUYN", - "/dns/people-polkadot-01.bootnode.stkd.io/tcp/30633/wss/p2p/12D3KooWDf2aLDKHQyLkDzdEGs6exNzWWw62s2EK9g1wrujJzRZt" + "/dns/people-polkadot-boot-ng.dwellir.com/tcp/30346/p2p/12D3KooWKMYu1L28TkDf1ooMW8D8PHcztLnjV3bausH9eiVTRUYN" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/people-westend.json b/cumulus/parachains/chain-specs/people-westend.json index e52d7b299e1d..ac24b2e64359 100644 --- a/cumulus/parachains/chain-specs/people-westend.json +++ b/cumulus/parachains/chain-specs/people-westend.json @@ -28,8 +28,7 @@ "/dns/wppl16.rotko.net/tcp/33766/p2p/12D3KooWHwUXBUo2WRMUBwPLC2ttVbnEk1KvDyESYAeKcNoCn7WS", "/dns/wppl16.rotko.net/tcp/35766/wss/p2p/12D3KooWHwUXBUo2WRMUBwPLC2ttVbnEk1KvDyESYAeKcNoCn7WS", "/dns/people-westend-boot-ng.dwellir.com/tcp/443/wss/p2p/12D3KooWBdCpCabhgBpLn67LWcXE2JJCCTMhuJHrfDNiTiCCr3KX", - "/dns/people-westend-boot-ng.dwellir.com/tcp/30355/p2p/12D3KooWBdCpCabhgBpLn67LWcXE2JJCCTMhuJHrfDNiTiCCr3KX", - "/dns/people-westend-01.bootnode.stkd.io/tcp/30633/wss/p2p/12D3KooWJzL4R3kq9Ms88gsV6bS9zGT8DHySdqwau5SHNqTzToNM" + "/dns/people-westend-boot-ng.dwellir.com/tcp/30355/p2p/12D3KooWBdCpCabhgBpLn67LWcXE2JJCCTMhuJHrfDNiTiCCr3KX" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/common/Cargo.toml b/cumulus/parachains/common/Cargo.toml index 6c52c3201c71..6d436bdf799a 100644 --- a/cumulus/parachains/common/Cargo.toml +++ b/cumulus/parachains/common/Cargo.toml @@ -5,8 +5,6 @@ authors.workspace = true edition.workspace = true description = "Logic which is common to all parachain runtimes" license = "Apache-2.0" -homepage.workspace = true -repository.workspace = true [lints] workspace = true @@ -39,9 +37,9 @@ xcm = { workspace = true } xcm-executor = { workspace = true } # Cumulus +pallet-collator-selection = { workspace = true } cumulus-primitives-core = { workspace = true } cumulus-primitives-utility = { workspace = true } -pallet-collator-selection = { workspace = true } parachain-info = { workspace = true } [dev-dependencies] @@ -92,5 +90,4 @@ runtime-benchmarks = [ "polkadot-primitives/runtime-benchmarks", "sp-runtime/runtime-benchmarks", "xcm-executor/runtime-benchmarks", - "xcm/runtime-benchmarks", ] diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/Cargo.toml index a164a8197f72..25796e7d64b4 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/Cargo.toml @@ -13,15 +13,15 @@ workspace = true [dependencies] # Substrate -frame-support = { workspace = true } sp-core = { workspace = true } sp-keyring = { workspace = true } +frame-support = { workspace = true } # Cumulus -asset-hub-rococo-runtime = { workspace = true, default-features = true } +parachains-common = { workspace = true, default-features = true } cumulus-primitives-core = { workspace = true } emulated-integration-tests-common = { workspace = true } -parachains-common = { workspace = true, default-features = true } +asset-hub-rococo-runtime = { workspace = true, default-features = true } rococo-emulated-chain = { workspace = true } testnet-parachains-constants = { features = ["rococo"], workspace = true, default-features = true } diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/Cargo.toml index c67b94d0db73..8e423ebbf9c2 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/Cargo.toml @@ -13,17 +13,17 @@ workspace = true [dependencies] # Substrate -frame-support = { workspace = true } sp-core = { workspace = true } sp-keyring = { workspace = true } +frame-support = { workspace = true } # Cumulus -asset-hub-westend-runtime = { workspace = true } +parachains-common = { workspace = true, default-features = true } cumulus-primitives-core = { workspace = true } emulated-integration-tests-common = { workspace = true } -parachains-common = { workspace = true, default-features = true } -testnet-parachains-constants = { features = ["westend"], workspace = true, default-features = true } +asset-hub-westend-runtime = { workspace = true } westend-emulated-chain = { workspace = true, default-features = true } +testnet-parachains-constants = { features = ["westend"], workspace = true, default-features = true } # Polkadot xcm = { workspace = true } diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/Cargo.toml index 8b16d8ac27ae..231265085eda 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/Cargo.toml @@ -13,9 +13,9 @@ workspace = true [dependencies] # Substrate -frame-support = { workspace = true } sp-core = { workspace = true } sp-keyring = { workspace = true } +frame-support = { workspace = true } # Polkadot Dependencies xcm = { workspace = true } @@ -24,8 +24,8 @@ xcm = { workspace = true } bp-messages = { workspace = true } # Cumulus -bridge-hub-common = { workspace = true } -bridge-hub-rococo-runtime = { workspace = true, default-features = true } -emulated-integration-tests-common = { workspace = true } parachains-common = { workspace = true, default-features = true } +emulated-integration-tests-common = { workspace = true } +bridge-hub-rococo-runtime = { workspace = true, default-features = true } +bridge-hub-common = { workspace = true } testnet-parachains-constants = { features = ["rococo"], workspace = true, default-features = true } diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/Cargo.toml index 292b5bd3e434..8292e132809c 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/Cargo.toml @@ -13,9 +13,9 @@ workspace = true [dependencies] # Substrate -frame-support = { workspace = true } sp-core = { workspace = true } sp-keyring = { workspace = true } +frame-support = { workspace = true } # Polkadot Dependencies xcm = { workspace = true } @@ -24,8 +24,8 @@ xcm = { workspace = true } bp-messages = { workspace = true } # Cumulus -bridge-hub-common = { workspace = true } -bridge-hub-westend-runtime = { workspace = true, default-features = true } -emulated-integration-tests-common = { workspace = true } parachains-common = { workspace = true, default-features = true } +emulated-integration-tests-common = { workspace = true } +bridge-hub-westend-runtime = { workspace = true, default-features = true } +bridge-hub-common = { workspace = true } testnet-parachains-constants = { features = ["westend"], workspace = true, default-features = true } diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/collectives/collectives-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/parachains/collectives/collectives-westend/Cargo.toml index 55e3ad6743ed..87dfd73ab05b 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/collectives/collectives-westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/collectives/collectives-westend/Cargo.toml @@ -13,12 +13,12 @@ workspace = true [dependencies] # Substrate -frame-support = { workspace = true } sp-core = { workspace = true } +frame-support = { workspace = true } # Cumulus -collectives-westend-runtime = { workspace = true } +parachains-common = { workspace = true, default-features = true } cumulus-primitives-core = { workspace = true } emulated-integration-tests-common = { workspace = true } -parachains-common = { workspace = true, default-features = true } +collectives-westend-runtime = { workspace = true } testnet-parachains-constants = { features = ["westend"], workspace = true, default-features = true } diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/coretime/coretime-rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/parachains/coretime/coretime-rococo/Cargo.toml index 8f12dc675199..94d43c5eee2f 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/coretime/coretime-rococo/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/coretime/coretime-rococo/Cargo.toml @@ -13,12 +13,12 @@ workspace = true [dependencies] # Substrate -frame-support = { workspace = true } sp-core = { workspace = true } +frame-support = { workspace = true } # Cumulus -coretime-rococo-runtime = { workspace = true, default-features = true } +parachains-common = { workspace = true, default-features = true } cumulus-primitives-core = { workspace = true } +coretime-rococo-runtime = { workspace = true, default-features = true } emulated-integration-tests-common = { workspace = true } -parachains-common = { workspace = true, default-features = true } testnet-parachains-constants = { features = ["rococo"], workspace = true, default-features = true } diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/coretime/coretime-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/parachains/coretime/coretime-westend/Cargo.toml index fad1000ac66c..2640c27d016b 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/coretime/coretime-westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/coretime/coretime-westend/Cargo.toml @@ -13,12 +13,12 @@ workspace = true [dependencies] # Substrate -frame-support = { workspace = true } sp-core = { workspace = true } +frame-support = { workspace = true } # Cumulus -coretime-westend-runtime = { workspace = true, default-features = true } +parachains-common = { workspace = true, default-features = true } cumulus-primitives-core = { workspace = true } +coretime-westend-runtime = { workspace = true, default-features = true } emulated-integration-tests-common = { workspace = true } -parachains-common = { workspace = true, default-features = true } testnet-parachains-constants = { features = ["westend"], workspace = true, default-features = true } diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/people/people-rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/parachains/people/people-rococo/Cargo.toml index c98e8629e31d..1549d6a2ab6b 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/people/people-rococo/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/people/people-rococo/Cargo.toml @@ -10,12 +10,12 @@ publish = false [dependencies] # Substrate -frame-support = { workspace = true } sp-core = { workspace = true } +frame-support = { workspace = true } # Cumulus +parachains-common = { workspace = true, default-features = true } cumulus-primitives-core = { workspace = true } emulated-integration-tests-common = { workspace = true } -parachains-common = { workspace = true, default-features = true } people-rococo-runtime = { workspace = true } testnet-parachains-constants = { features = ["rococo"], workspace = true, default-features = true } diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/people/people-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/parachains/people/people-westend/Cargo.toml index 598ba5488f85..9c5ac0bca9de 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/people/people-westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/people/people-westend/Cargo.toml @@ -10,12 +10,12 @@ publish = false [dependencies] # Substrate -frame-support = { workspace = true } sp-core = { workspace = true } +frame-support = { workspace = true } # Cumulus +parachains-common = { workspace = true, default-features = true } cumulus-primitives-core = { workspace = true } emulated-integration-tests-common = { workspace = true } -parachains-common = { workspace = true, default-features = true } people-westend-runtime = { workspace = true } testnet-parachains-constants = { features = ["westend"], workspace = true, default-features = true } diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/Cargo.toml index 7e92e3bf9448..743cd7dc54a2 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/Cargo.toml @@ -13,15 +13,15 @@ workspace = true [dependencies] # Substrate -frame-support = { workspace = true } sp-core = { workspace = true } sp-keyring = { workspace = true } +frame-support = { workspace = true } # Polkadot xcm = { workspace = true } # Cumulus +parachains-common = { workspace = true, default-features = true } cumulus-primitives-core = { workspace = true } emulated-integration-tests-common = { workspace = true } -parachains-common = { workspace = true, default-features = true } penpal-runtime = { workspace = true } diff --git a/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/Cargo.toml index ccf3854e67d8..6db1263df8c7 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/Cargo.toml @@ -13,18 +13,18 @@ workspace = true [dependencies] # Substrate -sc-consensus-grandpa = { workspace = true } +sp-core = { workspace = true } +sp-keyring = { workspace = true } sp-authority-discovery = { workspace = true } sp-consensus-babe = { workspace = true } sp-consensus-beefy = { workspace = true, default-features = true } -sp-core = { workspace = true } -sp-keyring = { workspace = true } +sc-consensus-grandpa = { workspace = true } # Polkadot polkadot-primitives = { workspace = true } -rococo-runtime = { workspace = true } rococo-runtime-constants = { workspace = true } +rococo-runtime = { workspace = true } # Cumulus -emulated-integration-tests-common = { workspace = true } parachains-common = { workspace = true, default-features = true } +emulated-integration-tests-common = { workspace = true } diff --git a/cumulus/parachains/integration-tests/emulated/chains/relays/westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/relays/westend/Cargo.toml index 9b980d7d39cc..de285d9885a2 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/relays/westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/chains/relays/westend/Cargo.toml @@ -13,21 +13,21 @@ workspace = true [dependencies] # Substrate -pallet-staking = { workspace = true } -sc-consensus-grandpa = { workspace = true } +sp-core = { workspace = true } +sp-runtime = { workspace = true } sp-authority-discovery = { workspace = true } sp-consensus-babe = { workspace = true } sp-consensus-beefy = { workspace = true, default-features = true } -sp-core = { workspace = true } -sp-runtime = { workspace = true } +sc-consensus-grandpa = { workspace = true } +pallet-staking = { workspace = true } # Polkadot polkadot-primitives = { workspace = true } -westend-runtime = { workspace = true } westend-runtime-constants = { workspace = true } +westend-runtime = { workspace = true } xcm = { workspace = true } xcm-runtime-apis = { workspace = true } # Cumulus -emulated-integration-tests-common = { workspace = true } parachains-common = { workspace = true, default-features = true } +emulated-integration-tests-common = { workspace = true } diff --git a/cumulus/parachains/integration-tests/emulated/common/Cargo.toml b/cumulus/parachains/integration-tests/emulated/common/Cargo.toml index e921deb9c628..23edaf6bfe65 100644 --- a/cumulus/parachains/integration-tests/emulated/common/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/common/Cargo.toml @@ -5,8 +5,6 @@ authors.workspace = true edition.workspace = true license = "Apache-2.0" description = "Common resources for integration testing with xcm-emulator" -homepage.workspace = true -repository.workspace = true [lints] workspace = true @@ -16,36 +14,36 @@ codec = { workspace = true } paste = { workspace = true, default-features = true } # Substrate -frame-support = { workspace = true, default-features = true } -pallet-assets = { workspace = true, default-features = true } -pallet-balances = { workspace = true, default-features = true } -pallet-message-queue = { workspace = true, default-features = true } +sp-consensus-beefy = { workspace = true, default-features = true } sc-consensus-grandpa = { workspace = true, default-features = true } sp-authority-discovery = { workspace = true, default-features = true } -sp-consensus-babe = { workspace = true, default-features = true } -sp-consensus-beefy = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +frame-support = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } +sp-consensus-babe = { workspace = true, default-features = true } sp-keyring = { workspace = true, default-features = true } -sp-runtime = { workspace = true, default-features = true } +pallet-assets = { workspace = true, default-features = true } +pallet-balances = { workspace = true, default-features = true } +pallet-message-queue = { workspace = true, default-features = true } # Polkadot -pallet-xcm = { workspace = true, default-features = true } -polkadot-parachain-primitives = { workspace = true, default-features = true } polkadot-primitives = { workspace = true, default-features = true } +polkadot-parachain-primitives = { workspace = true, default-features = true } polkadot-runtime-parachains = { workspace = true, default-features = true } xcm = { workspace = true, default-features = true } +pallet-xcm = { workspace = true, default-features = true } # Cumulus -asset-test-utils = { workspace = true, default-features = true } -cumulus-pallet-parachain-system = { workspace = true, default-features = true } -cumulus-pallet-xcmp-queue = { workspace = true, default-features = true } -cumulus-primitives-core = { workspace = true, default-features = true } parachains-common = { workspace = true, default-features = true } +cumulus-primitives-core = { workspace = true, default-features = true } xcm-emulator = { workspace = true, default-features = true } +cumulus-pallet-xcmp-queue = { workspace = true, default-features = true } +cumulus-pallet-parachain-system = { workspace = true, default-features = true } +asset-test-utils = { workspace = true, default-features = true } # Bridges bp-messages = { workspace = true, default-features = true } bp-xcm-bridge-hub = { workspace = true, default-features = true } -bridge-runtime-common = { workspace = true, default-features = true } pallet-bridge-messages = { workspace = true, default-features = true } pallet-xcm-bridge-hub = { workspace = true, default-features = true } +bridge-runtime-common = { workspace = true, default-features = true } diff --git a/cumulus/parachains/integration-tests/emulated/common/src/impls.rs b/cumulus/parachains/integration-tests/emulated/common/src/impls.rs index 9dad323aa19c..c0d42cf2758e 100644 --- a/cumulus/parachains/integration-tests/emulated/common/src/impls.rs +++ b/cumulus/parachains/integration-tests/emulated/common/src/impls.rs @@ -370,8 +370,6 @@ macro_rules! impl_send_transact_helpers_for_relay_chain { let destination: $crate::impls::Location = ::child_location_of(recipient); let xcm = $crate::impls::xcm_transact_unpaid_execution(call, $crate::impls::OriginKind::Superuser); - $crate::impls::dmp::Pallet::<::Runtime>::make_parachain_reachable(recipient); - // Send XCM `Transact` $crate::impls::assert_ok!(]>::XcmPallet::send( root_origin, diff --git a/cumulus/parachains/integration-tests/emulated/common/src/macros.rs b/cumulus/parachains/integration-tests/emulated/common/src/macros.rs index cd2b41e5198f..b776cafb2545 100644 --- a/cumulus/parachains/integration-tests/emulated/common/src/macros.rs +++ b/cumulus/parachains/integration-tests/emulated/common/src/macros.rs @@ -23,7 +23,6 @@ pub use pallet_message_queue; pub use pallet_xcm; // Polkadot -pub use polkadot_runtime_parachains::dmp::Pallet as Dmp; pub use xcm::{ prelude::{ AccountId32, All, Asset, AssetId, BuyExecution, DepositAsset, ExpectTransactStatus, @@ -157,8 +156,6 @@ macro_rules! test_relay_is_trusted_teleporter { // Send XCM message from Relay <$sender_relay>::execute_with(|| { - $crate::macros::Dmp::<<$sender_relay as $crate::macros::Chain>::Runtime>::make_parachain_reachable(<$receiver_para>::para_id()); - assert_ok!(<$sender_relay as [<$sender_relay Pallet>]>::XcmPallet::limited_teleport_assets( origin.clone(), bx!(para_destination.clone().into()), diff --git a/cumulus/parachains/integration-tests/emulated/common/src/xcm_helpers.rs b/cumulus/parachains/integration-tests/emulated/common/src/xcm_helpers.rs index 380f4983ad98..9125c976525e 100644 --- a/cumulus/parachains/integration-tests/emulated/common/src/xcm_helpers.rs +++ b/cumulus/parachains/integration-tests/emulated/common/src/xcm_helpers.rs @@ -31,7 +31,7 @@ pub fn xcm_transact_paid_execution( VersionedXcm::from(Xcm(vec![ WithdrawAsset(fees.clone().into()), BuyExecution { fees, weight_limit }, - Transact { origin_kind, call, fallback_max_weight: None }, + Transact { origin_kind, call }, RefundSurplus, DepositAsset { assets: All.into(), @@ -53,7 +53,7 @@ pub fn xcm_transact_unpaid_execution( VersionedXcm::from(Xcm(vec![ UnpaidExecution { weight_limit, check_origin }, - Transact { origin_kind, call, fallback_max_weight: None }, + Transact { origin_kind, call }, ])) } diff --git a/cumulus/parachains/integration-tests/emulated/networks/rococo-system/Cargo.toml b/cumulus/parachains/integration-tests/emulated/networks/rococo-system/Cargo.toml index 2f8889e48162..864f3c6edd7e 100644 --- a/cumulus/parachains/integration-tests/emulated/networks/rococo-system/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/networks/rococo-system/Cargo.toml @@ -12,10 +12,10 @@ workspace = true [dependencies] # Cumulus +emulated-integration-tests-common = { workspace = true } +rococo-emulated-chain = { workspace = true } asset-hub-rococo-emulated-chain = { workspace = true } bridge-hub-rococo-emulated-chain = { workspace = true } -coretime-rococo-emulated-chain = { workspace = true } -emulated-integration-tests-common = { workspace = true } -penpal-emulated-chain = { workspace = true } people-rococo-emulated-chain = { workspace = true } -rococo-emulated-chain = { workspace = true } +penpal-emulated-chain = { workspace = true } +coretime-rococo-emulated-chain = { workspace = true } diff --git a/cumulus/parachains/integration-tests/emulated/networks/rococo-westend-system/Cargo.toml b/cumulus/parachains/integration-tests/emulated/networks/rococo-westend-system/Cargo.toml index 1b789b21c7df..cd0cb272b7f5 100644 --- a/cumulus/parachains/integration-tests/emulated/networks/rococo-westend-system/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/networks/rococo-westend-system/Cargo.toml @@ -12,11 +12,11 @@ workspace = true [dependencies] # Cumulus +emulated-integration-tests-common = { workspace = true } +rococo-emulated-chain = { workspace = true } +westend-emulated-chain = { workspace = true, default-features = true } asset-hub-rococo-emulated-chain = { workspace = true } asset-hub-westend-emulated-chain = { workspace = true } bridge-hub-rococo-emulated-chain = { workspace = true } bridge-hub-westend-emulated-chain = { workspace = true } -emulated-integration-tests-common = { workspace = true } penpal-emulated-chain = { workspace = true } -rococo-emulated-chain = { workspace = true } -westend-emulated-chain = { workspace = true, default-features = true } diff --git a/cumulus/parachains/integration-tests/emulated/networks/westend-system/Cargo.toml b/cumulus/parachains/integration-tests/emulated/networks/westend-system/Cargo.toml index 50e75a6bdd74..cec2e3733b2a 100644 --- a/cumulus/parachains/integration-tests/emulated/networks/westend-system/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/networks/westend-system/Cargo.toml @@ -12,11 +12,11 @@ workspace = true [dependencies] # Cumulus +emulated-integration-tests-common = { workspace = true } +westend-emulated-chain = { workspace = true } asset-hub-westend-emulated-chain = { workspace = true } bridge-hub-westend-emulated-chain = { workspace = true } collectives-westend-emulated-chain = { workspace = true } -coretime-westend-emulated-chain = { workspace = true } -emulated-integration-tests-common = { workspace = true } penpal-emulated-chain = { workspace = true } people-westend-emulated-chain = { workspace = true } -westend-emulated-chain = { workspace = true } +coretime-westend-emulated-chain = { workspace = true } diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/Cargo.toml index 9e8b8f2a52d7..3d40db6b03ab 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/Cargo.toml @@ -11,31 +11,31 @@ publish = false workspace = true [dependencies] -assert_matches = { workspace = true } codec = { workspace = true } +assert_matches = { workspace = true } # Substrate +sp-runtime = { workspace = true } +sp-core = { workspace = true } frame-support = { workspace = true } -pallet-asset-conversion = { workspace = true } -pallet-assets = { workspace = true } pallet-balances = { workspace = true } +pallet-assets = { workspace = true } +pallet-asset-conversion = { workspace = true } pallet-message-queue = { workspace = true } pallet-treasury = { workspace = true } pallet-utility = { workspace = true } -sp-core = { workspace = true } -sp-runtime = { workspace = true } # Polkadot -pallet-xcm = { workspace = true } -polkadot-runtime-common = { workspace = true, default-features = true } -rococo-runtime-constants = { workspace = true, default-features = true } xcm = { workspace = true } +pallet-xcm = { workspace = true } xcm-executor = { workspace = true } xcm-runtime-apis = { workspace = true, default-features = true } +polkadot-runtime-common = { workspace = true, default-features = true } +rococo-runtime-constants = { workspace = true, default-features = true } # Cumulus asset-test-utils = { workspace = true, default-features = true } cumulus-pallet-parachain-system = { workspace = true } -emulated-integration-tests-common = { workspace = true } parachains-common = { workspace = true, default-features = true } +emulated-integration-tests-common = { workspace = true } rococo-system-emulated-network = { workspace = true } diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/hybrid_transfers.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/hybrid_transfers.rs index fb95c361f089..baec7d20f415 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/hybrid_transfers.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/hybrid_transfers.rs @@ -13,8 +13,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -use rococo_system_emulated_network::rococo_emulated_chain::rococo_runtime::Dmp; - use super::reserve_transfer::*; use crate::{ imports::*, @@ -779,8 +777,6 @@ fn transfer_native_asset_from_relay_to_para_through_asset_hub() { xcm: xcm_on_final_dest, }]); - Dmp::make_parachain_reachable(AssetHubRococo::para_id()); - // First leg is a teleport, from there a local-reserve-transfer to final dest ::XcmPallet::transfer_assets_using_type_and_then( t.signed_origin, diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/reserve_transfer.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/reserve_transfer.rs index 407a581afeb9..698ef2c9e792 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/reserve_transfer.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/reserve_transfer.rs @@ -14,7 +14,6 @@ // limitations under the License. use crate::imports::*; -use rococo_system_emulated_network::rococo_emulated_chain::rococo_runtime::Dmp; use sp_core::{crypto::get_public_from_string_or_panic, sr25519}; fn relay_to_para_sender_assertions(t: RelayToParaTest) { @@ -116,7 +115,7 @@ pub fn system_para_to_para_sender_assertions(t: SystemParaToParaTest) { assert_expected_events!( AssetHubRococo, vec![ - // Delivery fees are paid + // Transport fees are paid RuntimeEvent::PolkadotXcm(pallet_xcm::Event::FeesPaid { .. }) => {}, ] ); @@ -275,7 +274,7 @@ fn system_para_to_para_assets_sender_assertions(t: SystemParaToParaTest) { t.args.dest.clone() ), }, - // Delivery fees are paid + // Transport fees are paid RuntimeEvent::PolkadotXcm( pallet_xcm::Event::FeesPaid { .. } ) => {}, @@ -306,7 +305,7 @@ fn para_to_system_para_assets_sender_assertions(t: ParaToSystemParaTest) { owner: *owner == t.sender.account_id, balance: *balance == t.args.amount, }, - // Delivery fees are paid + // Transport fees are paid RuntimeEvent::PolkadotXcm( pallet_xcm::Event::FeesPaid { .. } ) => {}, @@ -488,11 +487,6 @@ pub fn para_to_para_through_hop_receiver_assertions(t: Test DispatchResult { - let Junction::Parachain(para_id) = *t.args.dest.chain_location().last().unwrap() else { - unimplemented!("Destination is not a parachain?") - }; - - Dmp::make_parachain_reachable(para_id); ::XcmPallet::limited_reserve_transfer_assets( t.signed_origin, bx!(t.args.dest.into()), @@ -552,13 +546,6 @@ fn para_to_system_para_reserve_transfer_assets(t: ParaToSystemParaTest) -> Dispa fn para_to_para_through_relay_limited_reserve_transfer_assets( t: ParaToParaThroughRelayTest, ) -> DispatchResult { - let Junction::Parachain(para_id) = *t.args.dest.chain_location().last().unwrap() else { - unimplemented!("Destination is not a parachain?") - }; - - Rococo::ext_wrapper(|| { - Dmp::make_parachain_reachable(para_id); - }); ::PolkadotXcm::limited_reserve_transfer_assets( t.signed_origin, bx!(t.args.dest.into()), diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/treasury.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/treasury.rs index 8648c8ce9311..69111d38bcac 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/treasury.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/treasury.rs @@ -29,7 +29,6 @@ use frame_support::{ use parachains_common::AccountId; use polkadot_runtime_common::impls::VersionedLocatableAsset; use rococo_runtime_constants::currency::GRAND; -use rococo_system_emulated_network::rococo_emulated_chain::rococo_runtime::Dmp; use xcm_executor::traits::ConvertLocation; // Fund Treasury account on Asset Hub from Treasury account on Relay Chain with ROCs. @@ -65,7 +64,6 @@ fn spend_roc_on_asset_hub() { treasury_balance * 2, )); - Dmp::make_parachain_reachable(1000); let native_asset = Location::here(); let asset_hub_location: Location = [Parachain(1000)].into(); let treasury_location: Location = (Parent, PalletInstance(18)).into(); @@ -201,8 +199,6 @@ fn create_and_claim_treasury_spend_in_usdt() { // create a conversion rate from `asset_kind` to the native currency. assert_ok!(AssetRate::create(root.clone(), Box::new(asset_kind.clone()), 2.into())); - Dmp::make_parachain_reachable(1000); - // create and approve a treasury spend. assert_ok!(Treasury::spend( root, diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/Cargo.toml index 5cd00c239e60..71e44e5cee7d 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/Cargo.toml @@ -11,36 +11,35 @@ publish = false workspace = true [dependencies] -assert_matches = { workspace = true } codec = { workspace = true } +assert_matches = { workspace = true } # Substrate +sp-runtime = { workspace = true } +sp-core = { workspace = true } frame-metadata-hash-extension = { workspace = true, default-features = true } frame-support = { workspace = true } frame-system = { workspace = true } -pallet-asset-conversion = { workspace = true } -pallet-asset-tx-payment = { workspace = true } -pallet-assets = { workspace = true } pallet-balances = { workspace = true } +pallet-assets = { workspace = true } +pallet-asset-conversion = { workspace = true } +pallet-treasury = { workspace = true } pallet-message-queue = { workspace = true } pallet-transaction-payment = { workspace = true } -pallet-treasury = { workspace = true } -sp-core = { workspace = true } -sp-runtime = { workspace = true } +pallet-asset-tx-payment = { workspace = true } # Polkadot -pallet-xcm = { workspace = true } polkadot-runtime-common = { workspace = true, default-features = true } xcm = { workspace = true } xcm-builder = { workspace = true } xcm-executor = { workspace = true } +pallet-xcm = { workspace = true } xcm-runtime-apis = { workspace = true } # Cumulus +parachains-common = { workspace = true, default-features = true } asset-test-utils = { workspace = true, default-features = true } -assets-common = { workspace = true } -cumulus-pallet-parachain-system = { workspace = true } cumulus-pallet-xcmp-queue = { workspace = true } +cumulus-pallet-parachain-system = { workspace = true } emulated-integration-tests-common = { workspace = true } -parachains-common = { workspace = true, default-features = true } westend-system-emulated-network = { workspace = true } diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/lib.rs index 36630e2d2221..3cca99fbfe5c 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/lib.rs @@ -106,7 +106,6 @@ mod imports { pub type ParaToParaThroughRelayTest = Test; pub type ParaToParaThroughAHTest = Test; pub type RelayToParaThroughAHTest = Test; - pub type PenpalToRelayThroughAHTest = Test; } #[cfg(test)] diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/claim_assets.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/claim_assets.rs index a7f52eb7e09d..90af907654f9 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/claim_assets.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/claim_assets.rs @@ -17,9 +17,7 @@ use crate::imports::*; -use assets_common::runtime_api::runtime_decl_for_fungibles_api::FungiblesApiV2; use emulated_integration_tests_common::test_chain_can_claim_assets; -use frame_support::traits::fungible::Mutate; use xcm_executor::traits::DropAssets; #[test] @@ -35,83 +33,3 @@ fn assets_can_be_claimed() { amount ); } - -#[test] -fn chain_can_claim_assets_for_its_users() { - // Many Penpal users have assets trapped in AssetHubWestend. - let beneficiaries: Vec<(Location, Assets)> = vec![ - // Some WND. - ( - Location::new(1, [Parachain(2000), AccountId32 { id: [0u8; 32], network: None }]), - (Parent, 10_000_000_000_000u128).into(), - ), - // Some USDT. - ( - Location::new(1, [Parachain(2000), AccountId32 { id: [1u8; 32], network: None }]), - ([PalletInstance(ASSETS_PALLET_ID), GeneralIndex(USDT_ID.into())], 100_000_000u128) - .into(), - ), - ]; - - // Start with those assets trapped. - AssetHubWestend::execute_with(|| { - for (location, assets) in &beneficiaries { - ::PolkadotXcm::drop_assets( - location, - assets.clone().into(), - &XcmContext { origin: None, message_id: [0u8; 32], topic: None }, - ); - } - }); - - let penpal_to_asset_hub = PenpalA::sibling_location_of(AssetHubWestend::para_id()); - let mut builder = Xcm::<()>::builder() - .withdraw_asset((Parent, 1_000_000_000_000u128)) - .pay_fees((Parent, 100_000_000_000u128)); - - // Loop through all beneficiaries. - for (location, assets) in &beneficiaries { - builder = builder.execute_with_origin( - // We take only the last part, the `AccountId32` junction. - Some((*location.interior().last().unwrap()).into()), - Xcm::<()>::builder_unsafe() - .claim_asset(assets.clone(), Location::new(0, [GeneralIndex(5)])) // Means lost assets were version 5. - .deposit_asset(assets.clone(), location.clone()) - .build(), - ) - } - - // Finish assembling the message. - let message = builder.build(); - - // Fund PenpalA's sovereign account on AssetHubWestend so it can pay for fees. - AssetHubWestend::execute_with(|| { - let penpal_as_seen_by_asset_hub = AssetHubWestend::sibling_location_of(PenpalA::para_id()); - let penpal_sov_account_on_asset_hub = - AssetHubWestend::sovereign_account_id_of(penpal_as_seen_by_asset_hub); - type Balances = ::Balances; - assert_ok!(>::mint_into( - &penpal_sov_account_on_asset_hub, - 2_000_000_000_000u128, - )); - }); - - // We can send a message from Penpal root that claims all those assets for each beneficiary. - PenpalA::execute_with(|| { - assert_ok!(::PolkadotXcm::send( - ::RuntimeOrigin::root(), - bx!(penpal_to_asset_hub.into()), - bx!(VersionedXcm::from(message)), - )); - }); - - // We assert beneficiaries have received their funds. - AssetHubWestend::execute_with(|| { - for (location, expected_assets) in &beneficiaries { - let sov_account = AssetHubWestend::sovereign_account_id_of(location.clone()); - let actual_assets = - ::Runtime::query_account_balances(sov_account).unwrap(); - assert_eq!(VersionedAssets::from(expected_assets.clone()), actual_assets); - } - }); -} diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/hybrid_transfers.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/hybrid_transfers.rs index 91ebdda16828..a0fc82fba6ef 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/hybrid_transfers.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/hybrid_transfers.rs @@ -13,8 +13,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -use westend_system_emulated_network::westend_emulated_chain::westend_runtime::Dmp; - use super::reserve_transfer::*; use crate::{ imports::*, @@ -660,13 +658,13 @@ fn bidirectional_teleport_foreign_asset_between_para_and_asset_hub_using_explici } // =============================================================== -// ====== Transfer - Native Asset - Relay->AssetHub->Penpal ====== +// ===== Transfer - Native Asset - Relay->AssetHub->Parachain ==== // =============================================================== -/// Transfers of native asset Relay to Penpal (using AssetHub reserve). Parachains want to avoid +/// Transfers of native asset Relay to Parachain (using AssetHub reserve). Parachains want to avoid /// managing SAs on all system chains, thus want all their DOT-in-reserve to be held in their /// Sovereign Account on Asset Hub. #[test] -fn transfer_native_asset_from_relay_to_penpal_through_asset_hub() { +fn transfer_native_asset_from_relay_to_para_through_asset_hub() { // Init values for Relay let destination = Westend::child_location_of(PenpalA::para_id()); let sender = WestendSender::get(); @@ -780,8 +778,6 @@ fn transfer_native_asset_from_relay_to_penpal_through_asset_hub() { xcm: xcm_on_final_dest, }]); - Dmp::make_parachain_reachable(AssetHubWestend::para_id()); - // First leg is a teleport, from there a local-reserve-transfer to final dest ::XcmPallet::transfer_assets_using_type_and_then( t.signed_origin, @@ -824,137 +820,6 @@ fn transfer_native_asset_from_relay_to_penpal_through_asset_hub() { assert!(receiver_assets_after < receiver_assets_before + amount_to_send); } -// =============================================================== -// ===== Transfer - Native Asset - Penpal->AssetHub->Relay ======= -// =============================================================== -/// Transfers of native asset Penpal to Relay (using AssetHub reserve). Parachains want to avoid -/// managing SAs on all system chains, thus want all their DOT-in-reserve to be held in their -/// Sovereign Account on Asset Hub. -#[test] -fn transfer_native_asset_from_penpal_to_relay_through_asset_hub() { - // Init values for Penpal - let destination = RelayLocation::get(); - let sender = PenpalASender::get(); - let amount_to_send: Balance = WESTEND_ED * 100; - - // Init values for Penpal - let relay_native_asset_location = RelayLocation::get(); - let receiver = WestendReceiver::get(); - - // Init Test - let test_args = TestContext { - sender: sender.clone(), - receiver: receiver.clone(), - args: TestArgs::new_para( - destination.clone(), - receiver.clone(), - amount_to_send, - (Parent, amount_to_send).into(), - None, - 0, - ), - }; - let mut test = PenpalToRelayThroughAHTest::new(test_args); - - let sov_penpal_on_ah = AssetHubWestend::sovereign_account_id_of( - AssetHubWestend::sibling_location_of(PenpalA::para_id()), - ); - // fund Penpal's sender account - PenpalA::mint_foreign_asset( - ::RuntimeOrigin::signed(PenpalAssetOwner::get()), - relay_native_asset_location.clone(), - sender.clone(), - amount_to_send * 2, - ); - // fund Penpal's SA on AssetHub with the assets held in reserve - AssetHubWestend::fund_accounts(vec![(sov_penpal_on_ah.clone().into(), amount_to_send * 2)]); - - // prefund Relay checking account so we accept teleport "back" from AssetHub - let check_account = - Westend::execute_with(|| ::XcmPallet::check_account()); - Westend::fund_accounts(vec![(check_account, amount_to_send)]); - - // Query initial balances - let sender_balance_before = PenpalA::execute_with(|| { - type ForeignAssets = ::ForeignAssets; - >::balance(relay_native_asset_location.clone(), &sender) - }); - let sov_penpal_on_ah_before = AssetHubWestend::execute_with(|| { - ::Balances::free_balance(sov_penpal_on_ah.clone()) - }); - let receiver_balance_before = Westend::execute_with(|| { - ::Balances::free_balance(receiver.clone()) - }); - - fn transfer_assets_dispatchable(t: PenpalToRelayThroughAHTest) -> DispatchResult { - let fee_idx = t.args.fee_asset_item as usize; - let fee: Asset = t.args.assets.inner().get(fee_idx).cloned().unwrap(); - let asset_hub_location = PenpalA::sibling_location_of(AssetHubWestend::para_id()); - let context = PenpalUniversalLocation::get(); - - // reanchor fees to the view of destination (Westend Relay) - let mut remote_fees = fee.clone().reanchored(&t.args.dest, &context).unwrap(); - if let Fungible(ref mut amount) = remote_fees.fun { - // we already spent some fees along the way, just use half of what we started with - *amount = *amount / 2; - } - let xcm_on_final_dest = Xcm::<()>(vec![ - BuyExecution { fees: remote_fees, weight_limit: t.args.weight_limit.clone() }, - DepositAsset { - assets: Wild(AllCounted(t.args.assets.len() as u32)), - beneficiary: t.args.beneficiary, - }, - ]); - - // reanchor final dest (Westend Relay) to the view of hop (Asset Hub) - let mut dest = t.args.dest.clone(); - dest.reanchor(&asset_hub_location, &context).unwrap(); - // on Asset Hub - let xcm_on_hop = Xcm::<()>(vec![InitiateTeleport { - assets: Wild(AllCounted(t.args.assets.len() as u32)), - dest, - xcm: xcm_on_final_dest, - }]); - - // First leg is a reserve-withdraw, from there a teleport to final dest - ::PolkadotXcm::transfer_assets_using_type_and_then( - t.signed_origin, - bx!(asset_hub_location.into()), - bx!(t.args.assets.into()), - bx!(TransferType::DestinationReserve), - bx!(fee.id.into()), - bx!(TransferType::DestinationReserve), - bx!(VersionedXcm::from(xcm_on_hop)), - t.args.weight_limit, - ) - } - test.set_dispatchable::(transfer_assets_dispatchable); - test.assert(); - - // Query final balances - let sender_balance_after = PenpalA::execute_with(|| { - type ForeignAssets = ::ForeignAssets; - >::balance(relay_native_asset_location.clone(), &sender) - }); - let sov_penpal_on_ah_after = AssetHubWestend::execute_with(|| { - ::Balances::free_balance(sov_penpal_on_ah.clone()) - }); - let receiver_balance_after = Westend::execute_with(|| { - ::Balances::free_balance(receiver.clone()) - }); - - // Sender's asset balance is reduced by amount sent plus delivery fees - assert!(sender_balance_after < sender_balance_before - amount_to_send); - // SA on AH balance is decreased by `amount_to_send` - assert_eq!(sov_penpal_on_ah_after, sov_penpal_on_ah_before - amount_to_send); - // Receiver's balance is increased - assert!(receiver_balance_after > receiver_balance_before); - // Receiver's balance increased by `amount_to_send - delivery_fees - bought_execution`; - // `delivery_fees` might be paid from transfer or JIT, also `bought_execution` is unknown but - // should be non-zero - assert!(receiver_balance_after < receiver_balance_before + amount_to_send); -} - // ============================================================================================== // ==== Bidirectional Transfer - Native + Teleportable Foreign Assets - Parachain<->AssetHub ==== // ============================================================================================== @@ -974,7 +839,7 @@ fn bidirectional_transfer_multiple_assets_between_penpal_and_asset_hub() { // xcm to be executed at dest let xcm_on_dest = Xcm(vec![ // since this is the last hop, we don't need to further use any assets previously - // reserved for fees (there are no further hops to cover delivery fees for); we + // reserved for fees (there are no further hops to cover transport fees for); we // RefundSurplus to get back any unspent fees RefundSurplus, DepositAsset { assets: Wild(All), beneficiary: t.args.beneficiary }, @@ -1010,7 +875,7 @@ fn bidirectional_transfer_multiple_assets_between_penpal_and_asset_hub() { // xcm to be executed at dest let xcm_on_dest = Xcm(vec![ // since this is the last hop, we don't need to further use any assets previously - // reserved for fees (there are no further hops to cover delivery fees for); we + // reserved for fees (there are no further hops to cover transport fees for); we // RefundSurplus to get back any unspent fees RefundSurplus, DepositAsset { assets: Wild(All), beneficiary: t.args.beneficiary }, diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/reserve_transfer.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/reserve_transfer.rs index dc36fed42932..558eab13e5c7 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/reserve_transfer.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/reserve_transfer.rs @@ -15,7 +15,6 @@ use crate::{create_pool_with_wnd_on, foreign_balance_on, imports::*}; use sp_core::{crypto::get_public_from_string_or_panic, sr25519}; -use westend_system_emulated_network::westend_emulated_chain::westend_runtime::Dmp; fn relay_to_para_sender_assertions(t: RelayToParaTest) { type RuntimeEvent = ::RuntimeEvent; @@ -116,7 +115,7 @@ pub fn system_para_to_para_sender_assertions(t: SystemParaToParaTest) { assert_expected_events!( AssetHubWestend, vec![ - // Delivery fees are paid + // Transport fees are paid RuntimeEvent::PolkadotXcm(pallet_xcm::Event::FeesPaid { .. }) => {}, ] ); @@ -275,7 +274,7 @@ fn system_para_to_para_assets_sender_assertions(t: SystemParaToParaTest) { t.args.dest.clone() ), }, - // Delivery fees are paid + // Transport fees are paid RuntimeEvent::PolkadotXcm( pallet_xcm::Event::FeesPaid { .. } ) => {}, @@ -306,7 +305,7 @@ fn para_to_system_para_assets_sender_assertions(t: ParaToSystemParaTest) { owner: *owner == t.sender.account_id, balance: *balance == t.args.amount, }, - // Delivery fees are paid + // Transport fees are paid RuntimeEvent::PolkadotXcm( pallet_xcm::Event::FeesPaid { .. } ) => {}, @@ -488,11 +487,6 @@ pub fn para_to_para_through_hop_receiver_assertions(t: Test DispatchResult { - let Junction::Parachain(para_id) = *t.args.dest.chain_location().last().unwrap() else { - unimplemented!("Destination is not a parachain?") - }; - - Dmp::make_parachain_reachable(para_id); ::XcmPallet::limited_reserve_transfer_assets( t.signed_origin, bx!(t.args.dest.into()), @@ -539,13 +533,6 @@ fn para_to_system_para_reserve_transfer_assets(t: ParaToSystemParaTest) -> Dispa fn para_to_para_through_relay_limited_reserve_transfer_assets( t: ParaToParaThroughRelayTest, ) -> DispatchResult { - let Junction::Parachain(para_id) = *t.args.dest.chain_location().last().unwrap() else { - unimplemented!("Destination is not a parachain?") - }; - - Westend::ext_wrapper(|| { - Dmp::make_parachain_reachable(para_id); - }); ::PolkadotXcm::limited_reserve_transfer_assets( t.signed_origin, bx!(t.args.dest.into()), diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/set_asset_claimer.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/set_asset_claimer.rs index bc00106b47c1..544b05360521 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/set_asset_claimer.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/set_asset_claimer.rs @@ -44,7 +44,7 @@ fn test_set_asset_claimer_within_a_chain() { type RuntimeCall = ::RuntimeCall; let asset_trap_xcm = Xcm::::builder_unsafe() - .set_hints(vec![AssetClaimer { location: bob_location.clone() }]) + .set_asset_claimer(bob_location.clone()) .withdraw_asset(assets.clone()) .clear_origin() .build(); @@ -116,7 +116,7 @@ fn test_set_asset_claimer_between_the_chains() { let assets: Assets = (Parent, trap_amount).into(); type RuntimeCall = ::RuntimeCall; let trap_xcm = Xcm::::builder_unsafe() - .set_hints(vec![AssetClaimer { location: alice_bh_sibling.clone() }]) + .set_asset_claimer(alice_bh_sibling.clone()) .withdraw_asset(assets.clone()) .clear_origin() .build(); diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/transact.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/transact.rs index 7e881a332a53..3c53cfb261be 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/transact.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/transact.rs @@ -43,10 +43,10 @@ fn transfer_and_transact_in_same_xcm( // xcm to be executed at dest let xcm_on_dest = Xcm(vec![ - Transact { origin_kind: OriginKind::Xcm, call, fallback_max_weight: None }, + Transact { origin_kind: OriginKind::Xcm, call }, ExpectTransactStatus(MaybeErrorCode::Success), // since this is the last hop, we don't need to further use any assets previously - // reserved for fees (there are no further hops to cover delivery fees for); we + // reserved for fees (there are no further hops to cover transport fees for); we // RefundSurplus to get back any unspent fees RefundSurplus, DepositAsset { assets: Wild(All), beneficiary }, diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/treasury.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/treasury.rs index 3b53557fc05c..c303e6411d33 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/treasury.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/treasury.rs @@ -20,7 +20,6 @@ use emulated_integration_tests_common::{ }; use frame_support::traits::fungibles::{Inspect, Mutate}; use polkadot_runtime_common::impls::VersionedLocatableAsset; -use westend_system_emulated_network::westend_emulated_chain::westend_runtime::Dmp; use xcm_executor::traits::ConvertLocation; #[test] @@ -59,8 +58,6 @@ fn create_and_claim_treasury_spend() { // create a conversion rate from `asset_kind` to the native currency. assert_ok!(AssetRate::create(root.clone(), Box::new(asset_kind.clone()), 2.into())); - Dmp::make_parachain_reachable(1000); - // create and approve a treasury spend. assert_ok!(Treasury::spend( root, diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/Cargo.toml index 7bb7277df45c..9f6fe78a33ee 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/Cargo.toml @@ -12,21 +12,21 @@ workspace = true [dependencies] codec = { workspace = true } -hex-literal = { workspace = true, default-features = true } scale-info = { features = ["derive"], workspace = true } +hex-literal = { workspace = true, default-features = true } # Substrate +sp-core = { workspace = true } frame-support = { workspace = true } -pallet-asset-conversion = { workspace = true } pallet-assets = { workspace = true } +pallet-asset-conversion = { workspace = true } pallet-balances = { workspace = true } pallet-message-queue = { workspace = true, default-features = true } -sp-core = { workspace = true } sp-runtime = { workspace = true } # Polkadot -pallet-xcm = { workspace = true } xcm = { workspace = true } +pallet-xcm = { workspace = true } xcm-executor = { workspace = true } xcm-runtime-apis = { workspace = true } @@ -44,7 +44,7 @@ testnet-parachains-constants = { features = ["rococo", "westend"], workspace = t # Snowbridge snowbridge-core = { workspace = true } -snowbridge-pallet-inbound-queue-fixtures = { workspace = true, default-features = true } -snowbridge-pallet-outbound-queue = { workspace = true } -snowbridge-pallet-system = { workspace = true } snowbridge-router-primitives = { workspace = true } +snowbridge-pallet-system = { workspace = true } +snowbridge-pallet-outbound-queue = { workspace = true } +snowbridge-pallet-inbound-queue-fixtures = { workspace = true, default-features = true } diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/asset_transfers.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/asset_transfers.rs index a2a61660afff..33ab1e70b97b 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/asset_transfers.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/asset_transfers.rs @@ -16,7 +16,7 @@ use crate::tests::*; fn send_assets_over_bridge(send_fn: F) { - // fund the AHR's SA on BHR for paying bridge delivery fees + // fund the AHR's SA on BHR for paying bridge transport fees BridgeHubRococo::fund_para_sovereign(AssetHubRococo::para_id(), 10_000_000_000_000u128); // set XCM versions diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/register_bridged_assets.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/register_bridged_assets.rs index 70e7a7a3ddd3..1ae3a1b15805 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/register_bridged_assets.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/register_bridged_assets.rs @@ -58,7 +58,7 @@ fn register_rococo_asset_on_wah_from_rah() { let destination = asset_hub_westend_location(); - // fund the RAH's SA on RBH for paying bridge delivery fees + // fund the RAH's SA on RBH for paying bridge transport fees BridgeHubRococo::fund_para_sovereign(AssetHubRococo::para_id(), 10_000_000_000_000u128); // set XCM versions diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/send_xcm.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/send_xcm.rs index cfcb581238e6..931a3128f826 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/send_xcm.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/send_xcm.rs @@ -13,8 +13,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -use rococo_system_emulated_network::rococo_emulated_chain::rococo_runtime::Dmp; - use crate::tests::*; #[test] @@ -40,8 +38,6 @@ fn send_xcm_from_rococo_relay_to_westend_asset_hub_should_fail_on_not_applicable // Rococo Global Consensus // Send XCM message from Relay Chain to Bridge Hub source Parachain Rococo::execute_with(|| { - Dmp::make_parachain_reachable(BridgeHubRococo::para_id()); - assert_ok!(::XcmPallet::send( sudo_origin, bx!(destination), @@ -69,7 +65,7 @@ fn send_xcm_through_opened_lane_with_different_xcm_version_on_hops_works() { let native_token = Location::parent(); let amount = ASSET_HUB_ROCOCO_ED * 1_000; - // fund the AHR's SA on BHR for paying bridge delivery fees + // fund the AHR's SA on BHR for paying bridge transport fees BridgeHubRococo::fund_para_sovereign(AssetHubRococo::para_id(), 10_000_000_000_000u128); // fund sender AssetHubRococo::fund_accounts(vec![(AssetHubRococoSender::get().into(), amount * 10)]); diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs index c72d5045ddc0..d59553574c26 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs @@ -84,11 +84,7 @@ fn create_agent() { let remote_xcm = VersionedXcm::from(Xcm(vec![ UnpaidExecution { weight_limit: Unlimited, check_origin: None }, DescendOrigin(Parachain(origin_para).into()), - Transact { - origin_kind: OriginKind::Xcm, - call: create_agent_call.encode().into(), - fallback_max_weight: None, - }, + Transact { origin_kind: OriginKind::Xcm, call: create_agent_call.encode().into() }, ])); // Rococo Global Consensus @@ -142,11 +138,7 @@ fn create_channel() { let create_agent_xcm = VersionedXcm::from(Xcm(vec![ UnpaidExecution { weight_limit: Unlimited, check_origin: None }, DescendOrigin(Parachain(origin_para).into()), - Transact { - origin_kind: OriginKind::Xcm, - call: create_agent_call.encode().into(), - fallback_max_weight: None, - }, + Transact { origin_kind: OriginKind::Xcm, call: create_agent_call.encode().into() }, ])); let create_channel_call = @@ -155,11 +147,7 @@ fn create_channel() { let create_channel_xcm = VersionedXcm::from(Xcm(vec![ UnpaidExecution { weight_limit: Unlimited, check_origin: None }, DescendOrigin(Parachain(origin_para).into()), - Transact { - origin_kind: OriginKind::Xcm, - call: create_channel_call.encode().into(), - fallback_max_weight: None, - }, + Transact { origin_kind: OriginKind::Xcm, call: create_channel_call.encode().into() }, ])); // Rococo Global Consensus diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/Cargo.toml index dc3bbb269d70..b87f25ac0f01 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/Cargo.toml @@ -11,23 +11,23 @@ publish = false workspace = true [dependencies] -codec = { workspace = true } hex-literal = { workspace = true, default-features = true } +codec = { workspace = true } log = { workspace = true } scale-info = { workspace = true } # Substrate frame-support = { workspace = true } -pallet-asset-conversion = { workspace = true } pallet-assets = { workspace = true } +pallet-asset-conversion = { workspace = true } pallet-balances = { workspace = true } pallet-message-queue = { workspace = true, default-features = true } sp-core = { workspace = true } sp-runtime = { workspace = true } # Polkadot -pallet-xcm = { workspace = true } xcm = { workspace = true } +pallet-xcm = { workspace = true } xcm-executor = { workspace = true } xcm-runtime-apis = { workspace = true } @@ -36,18 +36,18 @@ pallet-bridge-messages = { workspace = true } pallet-xcm-bridge-hub = { workspace = true } # Cumulus -asset-hub-westend-runtime = { workspace = true } -bridge-hub-westend-runtime = { workspace = true } cumulus-pallet-xcmp-queue = { workspace = true } emulated-integration-tests-common = { workspace = true } parachains-common = { workspace = true, default-features = true } rococo-westend-system-emulated-network = { workspace = true } testnet-parachains-constants = { features = ["rococo", "westend"], workspace = true, default-features = true } +asset-hub-westend-runtime = { workspace = true } +bridge-hub-westend-runtime = { workspace = true } # Snowbridge snowbridge-core = { workspace = true } +snowbridge-router-primitives = { workspace = true } +snowbridge-pallet-system = { workspace = true } +snowbridge-pallet-outbound-queue = { workspace = true } snowbridge-pallet-inbound-queue = { workspace = true } snowbridge-pallet-inbound-queue-fixtures = { workspace = true } -snowbridge-pallet-outbound-queue = { workspace = true } -snowbridge-pallet-system = { workspace = true } -snowbridge-router-primitives = { workspace = true } diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/asset_transfers.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/asset_transfers.rs index cc90c10b54bc..ab09517339db 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/asset_transfers.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/asset_transfers.rs @@ -17,7 +17,7 @@ use crate::{create_pool_with_native_on, tests::*}; use xcm::latest::AssetTransferFilter; fn send_assets_over_bridge(send_fn: F) { - // fund the AHW's SA on BHW for paying bridge delivery fees + // fund the AHW's SA on BHW for paying bridge transport fees BridgeHubWestend::fund_para_sovereign(AssetHubWestend::para_id(), 10_000_000_000_000u128); // set XCM versions @@ -592,7 +592,7 @@ fn do_send_pens_and_wnds_from_penpal_westend_via_ahw_to_asset_hub_rococo( // XCM to be executed at dest (Rococo Asset Hub) let xcm_on_dest = Xcm(vec![ // since this is the last hop, we don't need to further use any assets previously - // reserved for fees (there are no further hops to cover delivery fees for); we + // reserved for fees (there are no further hops to cover transport fees for); we // RefundSurplus to get back any unspent fees RefundSurplus, // deposit everything to final beneficiary diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/register_bridged_assets.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/register_bridged_assets.rs index 952fc35e6703..424f1e55956b 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/register_bridged_assets.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/register_bridged_assets.rs @@ -82,7 +82,7 @@ fn register_asset_on_rah_from_wah(bridged_asset_at_rah: Location) { let destination = asset_hub_rococo_location(); - // fund the WAH's SA on WBH for paying bridge delivery fees + // fund the WAH's SA on WBH for paying bridge transport fees BridgeHubWestend::fund_para_sovereign(AssetHubWestend::para_id(), 10_000_000_000_000u128); // set XCM versions diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/send_xcm.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/send_xcm.rs index 60f8af2242f9..787d7dc842cb 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/send_xcm.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/send_xcm.rs @@ -13,8 +13,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -use rococo_westend_system_emulated_network::westend_emulated_chain::westend_runtime::Dmp; - use crate::tests::*; #[test] @@ -40,8 +38,6 @@ fn send_xcm_from_westend_relay_to_rococo_asset_hub_should_fail_on_not_applicable // Westend Global Consensus // Send XCM message from Relay Chain to Bridge Hub source Parachain Westend::execute_with(|| { - Dmp::make_parachain_reachable(BridgeHubWestend::para_id()); - assert_ok!(::XcmPallet::send( sudo_origin, bx!(destination), @@ -69,7 +65,7 @@ fn send_xcm_through_opened_lane_with_different_xcm_version_on_hops_works() { let native_token = Location::parent(); let amount = ASSET_HUB_WESTEND_ED * 1_000; - // fund the AHR's SA on BHR for paying bridge delivery fees + // fund the AHR's SA on BHR for paying bridge transport fees BridgeHubWestend::fund_para_sovereign(AssetHubWestend::para_id(), 10_000_000_000_000u128); // fund sender AssetHubWestend::fund_accounts(vec![(AssetHubWestendSender::get().into(), amount * 10)]); diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/transact.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/transact.rs index f6a3c53c4bf5..db42704dae61 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/transact.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/transact.rs @@ -49,10 +49,10 @@ fn transfer_and_transact_in_same_xcm( // xcm to be executed at dest let xcm_on_dest = Xcm(vec![ - Transact { origin_kind: OriginKind::Xcm, call, fallback_max_weight: None }, + Transact { origin_kind: OriginKind::Xcm, call }, ExpectTransactStatus(MaybeErrorCode::Success), // since this is the last hop, we don't need to further use any assets previously - // reserved for fees (there are no further hops to cover delivery fees for); we + // reserved for fees (there are no further hops to cover transport fees for); we // RefundSurplus to get back any unspent fees RefundSurplus, DepositAsset { assets: Wild(All), beneficiary }, diff --git a/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/Cargo.toml index 1d4e93d40da4..c4d281b75a77 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/Cargo.toml @@ -11,31 +11,31 @@ publish = false workspace = true [dependencies] -assert_matches = { workspace = true } codec = { workspace = true } +assert_matches = { workspace = true } # Substrate +sp-runtime = { workspace = true } frame-support = { workspace = true } +pallet-balances = { workspace = true } pallet-asset-rate = { workspace = true } pallet-assets = { workspace = true } -pallet-balances = { workspace = true } -pallet-message-queue = { workspace = true } pallet-treasury = { workspace = true } +pallet-message-queue = { workspace = true } pallet-utility = { workspace = true } pallet-whitelist = { workspace = true } -sp-runtime = { workspace = true } # Polkadot -pallet-xcm = { workspace = true } polkadot-runtime-common = { workspace = true, default-features = true } -westend-runtime-constants = { workspace = true, default-features = true } xcm = { workspace = true } xcm-executor = { workspace = true } +pallet-xcm = { workspace = true } +westend-runtime-constants = { workspace = true, default-features = true } # Cumulus -cumulus-pallet-parachain-system = { workspace = true } -cumulus-pallet-xcmp-queue = { workspace = true } -emulated-integration-tests-common = { workspace = true } parachains-common = { workspace = true, default-features = true } testnet-parachains-constants = { features = ["westend"], workspace = true, default-features = true } +cumulus-pallet-xcmp-queue = { workspace = true } +cumulus-pallet-parachain-system = { workspace = true } +emulated-integration-tests-common = { workspace = true } westend-system-emulated-network = { workspace = true } diff --git a/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/src/tests/fellowship.rs b/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/src/tests/fellowship.rs index 802fed1e681d..80b82e0c446f 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/src/tests/fellowship.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/src/tests/fellowship.rs @@ -41,7 +41,6 @@ fn fellows_whitelist_call() { ) .encode() .into(), - fallback_max_weight: None } ]))), }); diff --git a/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/src/tests/fellowship_treasury.rs b/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/src/tests/fellowship_treasury.rs index ed7c9bafc607..8418e3da3bba 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/src/tests/fellowship_treasury.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/src/tests/fellowship_treasury.rs @@ -20,7 +20,6 @@ use frame_support::{ }; use polkadot_runtime_common::impls::VersionedLocatableAsset; use westend_runtime_constants::currency::UNITS; -use westend_system_emulated_network::westend_emulated_chain::westend_runtime::Dmp; use xcm_executor::traits::ConvertLocation; // Fund Fellowship Treasury from Westend Treasury and spend from Fellowship Treasury. @@ -58,8 +57,6 @@ fn fellowship_treasury_spend() { treasury_balance * 2, )); - Dmp::make_parachain_reachable(1000); - let native_asset = Location::here(); let asset_hub_location: Location = [Parachain(1000)].into(); let treasury_location: Location = (Parent, PalletInstance(37)).into(); diff --git a/cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-rococo/Cargo.toml index 61397b1b8d40..28d9da0993ff 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-rococo/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-rococo/Cargo.toml @@ -13,8 +13,8 @@ publish = false frame-support = { workspace = true } pallet-balances = { workspace = true } pallet-broker = { workspace = true, default-features = true } -pallet-identity = { workspace = true } pallet-message-queue = { workspace = true } +pallet-identity = { workspace = true } sp-runtime = { workspace = true } # Polkadot diff --git a/cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-rococo/src/tests/coretime_interface.rs b/cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-rococo/src/tests/coretime_interface.rs index 554025e1ecfe..9915b1753ef6 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-rococo/src/tests/coretime_interface.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-rococo/src/tests/coretime_interface.rs @@ -17,7 +17,6 @@ use crate::imports::*; use frame_support::traits::OnInitialize; use pallet_broker::{ConfigRecord, Configuration, CoreAssignment, CoreMask, ScheduleItem}; use rococo_runtime_constants::system_parachain::coretime::TIMESLICE_PERIOD; -use rococo_system_emulated_network::rococo_emulated_chain::rococo_runtime::Dmp; use sp_runtime::Perbill; #[test] @@ -35,10 +34,6 @@ fn transact_hardcoded_weights_are_sane() { type CoretimeEvent = ::RuntimeEvent; type RelayEvent = ::RuntimeEvent; - Rococo::execute_with(|| { - Dmp::make_parachain_reachable(CoretimeRococo::para_id()); - }); - // Reserve a workload, configure broker and start sales. CoretimeRococo::execute_with(|| { // Hooks don't run in emulated tests - workaround as we need `on_initialize` to tick things diff --git a/cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-westend/Cargo.toml index 9f0eadf13650..d57e7926b0ec 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-westend/Cargo.toml @@ -13,8 +13,8 @@ publish = false frame-support = { workspace = true } pallet-balances = { workspace = true } pallet-broker = { workspace = true, default-features = true } -pallet-identity = { workspace = true } pallet-message-queue = { workspace = true } +pallet-identity = { workspace = true } sp-runtime = { workspace = true } # Polkadot diff --git a/cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-westend/src/tests/coretime_interface.rs b/cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-westend/src/tests/coretime_interface.rs index 900994b1afc1..00530f80b958 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-westend/src/tests/coretime_interface.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-westend/src/tests/coretime_interface.rs @@ -18,7 +18,6 @@ use frame_support::traits::OnInitialize; use pallet_broker::{ConfigRecord, Configuration, CoreAssignment, CoreMask, ScheduleItem}; use sp_runtime::Perbill; use westend_runtime_constants::system_parachain::coretime::TIMESLICE_PERIOD; -use westend_system_emulated_network::westend_emulated_chain::westend_runtime::Dmp; #[test] fn transact_hardcoded_weights_are_sane() { @@ -35,10 +34,6 @@ fn transact_hardcoded_weights_are_sane() { type CoretimeEvent = ::RuntimeEvent; type RelayEvent = ::RuntimeEvent; - Westend::execute_with(|| { - Dmp::make_parachain_reachable(CoretimeWestend::para_id()); - }); - // Reserve a workload, configure broker and start sales. CoretimeWestend::execute_with(|| { // Hooks don't run in emulated tests - workaround as we need `on_initialize` to tick things diff --git a/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/Cargo.toml index 8b12897ef018..011be93ecac7 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/Cargo.toml @@ -13,8 +13,8 @@ codec = { workspace = true } # Substrate frame-support = { workspace = true } pallet-balances = { workspace = true } -pallet-identity = { workspace = true } pallet-message-queue = { workspace = true } +pallet-identity = { workspace = true } sp-runtime = { workspace = true } # Polkadot diff --git a/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/Cargo.toml index e069c1f61783..aa6eebc5458f 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/Cargo.toml @@ -13,14 +13,13 @@ codec = { workspace = true } # Substrate frame-support = { workspace = true } pallet-balances = { workspace = true } -pallet-identity = { workspace = true } pallet-message-queue = { workspace = true } +pallet-identity = { workspace = true } pallet-xcm = { workspace = true } sp-runtime = { workspace = true } # Polkadot polkadot-runtime-common = { workspace = true, default-features = true } -westend-runtime = { workspace = true } westend-runtime-constants = { workspace = true, default-features = true } xcm = { workspace = true } xcm-executor = { workspace = true } diff --git a/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/governance.rs b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/governance.rs deleted file mode 100644 index ea438f80552e..000000000000 --- a/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/governance.rs +++ /dev/null @@ -1,550 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use crate::imports::*; -use frame_support::traits::ProcessMessageError; - -use codec::Encode; -use frame_support::sp_runtime::traits::Dispatchable; -use parachains_common::AccountId; -use people_westend_runtime::people::IdentityInfo; -use westend_runtime::{ - governance::pallet_custom_origins::Origin::GeneralAdmin as GeneralAdminOrigin, Dmp, -}; -use westend_system_emulated_network::people_westend_emulated_chain::people_westend_runtime; - -use pallet_identity::Data; - -use emulated_integration_tests_common::accounts::{ALICE, BOB}; - -#[test] -fn relay_commands_add_registrar() { - let (origin_kind, origin) = (OriginKind::Superuser, ::RuntimeOrigin::root()); - - let registrar: AccountId = [1; 32].into(); - Westend::execute_with(|| { - type Runtime = ::Runtime; - type RuntimeCall = ::RuntimeCall; - type RuntimeEvent = ::RuntimeEvent; - type PeopleCall = ::RuntimeCall; - type PeopleRuntime = ::Runtime; - - Dmp::make_parachain_reachable(1004); - - let add_registrar_call = - PeopleCall::Identity(pallet_identity::Call::::add_registrar { - account: registrar.into(), - }); - - let xcm_message = RuntimeCall::XcmPallet(pallet_xcm::Call::::send { - dest: bx!(VersionedLocation::from(Location::new(0, [Parachain(1004)]))), - message: bx!(VersionedXcm::from(Xcm(vec![ - UnpaidExecution { weight_limit: Unlimited, check_origin: None }, - Transact { - origin_kind, - call: add_registrar_call.encode().into(), - fallback_max_weight: None - } - ]))), - }); - - assert_ok!(xcm_message.dispatch(origin)); - - assert_expected_events!( - Westend, - vec![ - RuntimeEvent::XcmPallet(pallet_xcm::Event::Sent { .. }) => {}, - ] - ); - }); - - PeopleWestend::execute_with(|| { - type RuntimeEvent = ::RuntimeEvent; - - assert_expected_events!( - PeopleWestend, - vec![ - RuntimeEvent::Identity(pallet_identity::Event::RegistrarAdded { .. }) => {}, - RuntimeEvent::MessageQueue(pallet_message_queue::Event::Processed { success: true, .. }) => {}, - ] - ); - }); -} - -#[test] -fn relay_commands_add_registrar_wrong_origin() { - let people_westend_alice = PeopleWestend::account_id_of(ALICE); - - let origins = vec![ - ( - OriginKind::SovereignAccount, - ::RuntimeOrigin::signed(people_westend_alice), - ), - (OriginKind::Xcm, GeneralAdminOrigin.into()), - ]; - - let mut signed_origin = true; - - for (origin_kind, origin) in origins { - let registrar: AccountId = [1; 32].into(); - Westend::execute_with(|| { - type Runtime = ::Runtime; - type RuntimeCall = ::RuntimeCall; - type RuntimeEvent = ::RuntimeEvent; - type PeopleCall = ::RuntimeCall; - type PeopleRuntime = ::Runtime; - - Dmp::make_parachain_reachable(1004); - - let add_registrar_call = - PeopleCall::Identity(pallet_identity::Call::::add_registrar { - account: registrar.into(), - }); - - let xcm_message = RuntimeCall::XcmPallet(pallet_xcm::Call::::send { - dest: bx!(VersionedLocation::from(Location::new(0, [Parachain(1004)]))), - message: bx!(VersionedXcm::from(Xcm(vec![ - UnpaidExecution { weight_limit: Unlimited, check_origin: None }, - Transact { - origin_kind, - call: add_registrar_call.encode().into(), - fallback_max_weight: None - } - ]))), - }); - - assert_ok!(xcm_message.dispatch(origin)); - assert_expected_events!( - Westend, - vec![ - RuntimeEvent::XcmPallet(pallet_xcm::Event::Sent { .. }) => {}, - ] - ); - }); - - PeopleWestend::execute_with(|| { - type RuntimeEvent = ::RuntimeEvent; - - if signed_origin { - assert_expected_events!( - PeopleWestend, - vec![ - RuntimeEvent::MessageQueue(pallet_message_queue::Event::ProcessingFailed { error: ProcessMessageError::Unsupported, .. }) => {}, - ] - ); - } else { - assert_expected_events!( - PeopleWestend, - vec![ - RuntimeEvent::MessageQueue(pallet_message_queue::Event::Processed { success: true, .. }) => {}, - ] - ); - } - }); - - signed_origin = false; - } -} - -#[test] -fn relay_commands_kill_identity() { - // To kill an identity, first one must be set - PeopleWestend::execute_with(|| { - type PeopleRuntime = ::Runtime; - type PeopleRuntimeEvent = ::RuntimeEvent; - - let people_westend_alice = - ::RuntimeOrigin::signed(PeopleWestend::account_id_of(ALICE)); - - let identity_info = IdentityInfo { - email: Data::Raw(b"test@test.io".to_vec().try_into().unwrap()), - ..Default::default() - }; - let identity: Box<::IdentityInformation> = - Box::new(identity_info); - - assert_ok!(::Identity::set_identity( - people_westend_alice, - identity - )); - - assert_expected_events!( - PeopleWestend, - vec![ - PeopleRuntimeEvent::Identity(pallet_identity::Event::IdentitySet { .. }) => {}, - ] - ); - }); - - let (origin_kind, origin) = (OriginKind::Superuser, ::RuntimeOrigin::root()); - - Westend::execute_with(|| { - type Runtime = ::Runtime; - type RuntimeCall = ::RuntimeCall; - type PeopleCall = ::RuntimeCall; - type RuntimeEvent = ::RuntimeEvent; - type PeopleRuntime = ::Runtime; - - Dmp::make_parachain_reachable(1004); - - let kill_identity_call = - PeopleCall::Identity(pallet_identity::Call::::kill_identity { - target: people_westend_runtime::MultiAddress::Id(PeopleWestend::account_id_of( - ALICE, - )), - }); - - let xcm_message = RuntimeCall::XcmPallet(pallet_xcm::Call::::send { - dest: bx!(VersionedLocation::from(Location::new(0, [Parachain(1004)]))), - message: bx!(VersionedXcm::from(Xcm(vec![ - UnpaidExecution { weight_limit: Unlimited, check_origin: None }, - Transact { - origin_kind, - call: kill_identity_call.encode().into(), - fallback_max_weight: None - } - ]))), - }); - - assert_ok!(xcm_message.dispatch(origin)); - - assert_expected_events!( - Westend, - vec![ - RuntimeEvent::XcmPallet(pallet_xcm::Event::Sent { .. }) => {}, - ] - ); - }); - - PeopleWestend::execute_with(|| { - type RuntimeEvent = ::RuntimeEvent; - - assert_expected_events!( - PeopleWestend, - vec![ - RuntimeEvent::Identity(pallet_identity::Event::IdentityKilled { .. }) => {}, - RuntimeEvent::MessageQueue(pallet_message_queue::Event::Processed { success: true, .. }) => {}, - ] - ); - }); -} - -#[test] -fn relay_commands_kill_identity_wrong_origin() { - let people_westend_alice = PeopleWestend::account_id_of(BOB); - - let origins = vec![ - ( - OriginKind::SovereignAccount, - ::RuntimeOrigin::signed(people_westend_alice), - ), - (OriginKind::Xcm, GeneralAdminOrigin.into()), - ]; - - for (origin_kind, origin) in origins { - Westend::execute_with(|| { - type Runtime = ::Runtime; - type RuntimeCall = ::RuntimeCall; - type PeopleCall = ::RuntimeCall; - type RuntimeEvent = ::RuntimeEvent; - type PeopleRuntime = ::Runtime; - - Dmp::make_parachain_reachable(1004); - - let kill_identity_call = - PeopleCall::Identity(pallet_identity::Call::::kill_identity { - target: people_westend_runtime::MultiAddress::Id(PeopleWestend::account_id_of( - ALICE, - )), - }); - - let xcm_message = RuntimeCall::XcmPallet(pallet_xcm::Call::::send { - dest: bx!(VersionedLocation::from(Location::new(0, [Parachain(1004)]))), - message: bx!(VersionedXcm::from(Xcm(vec![ - UnpaidExecution { weight_limit: Unlimited, check_origin: None }, - Transact { - origin_kind, - call: kill_identity_call.encode().into(), - fallback_max_weight: None - } - ]))), - }); - - assert_ok!(xcm_message.dispatch(origin)); - assert_expected_events!( - Westend, - vec![ - RuntimeEvent::XcmPallet(pallet_xcm::Event::Sent { .. }) => {}, - ] - ); - }); - - PeopleWestend::execute_with(|| { - assert_expected_events!(PeopleWestend, vec![]); - }); - } -} - -#[test] -fn relay_commands_add_remove_username_authority() { - let people_westend_alice = PeopleWestend::account_id_of(ALICE); - let people_westend_bob = PeopleWestend::account_id_of(BOB); - - let (origin_kind, origin, usr) = - (OriginKind::Superuser, ::RuntimeOrigin::root(), "rootusername"); - - // First, add a username authority. - Westend::execute_with(|| { - type Runtime = ::Runtime; - type RuntimeCall = ::RuntimeCall; - type RuntimeEvent = ::RuntimeEvent; - type PeopleCall = ::RuntimeCall; - type PeopleRuntime = ::Runtime; - - Dmp::make_parachain_reachable(1004); - - let add_username_authority = - PeopleCall::Identity(pallet_identity::Call::::add_username_authority { - authority: people_westend_runtime::MultiAddress::Id(people_westend_alice.clone()), - suffix: b"suffix1".into(), - allocation: 10, - }); - - let add_authority_xcm_msg = RuntimeCall::XcmPallet(pallet_xcm::Call::::send { - dest: bx!(VersionedLocation::from(Location::new(0, [Parachain(1004)]))), - message: bx!(VersionedXcm::from(Xcm(vec![ - UnpaidExecution { weight_limit: Unlimited, check_origin: None }, - Transact { - origin_kind, - call: add_username_authority.encode().into(), - fallback_max_weight: None - } - ]))), - }); - - assert_ok!(add_authority_xcm_msg.dispatch(origin.clone())); - - assert_expected_events!( - Westend, - vec![ - RuntimeEvent::XcmPallet(pallet_xcm::Event::Sent { .. }) => {}, - ] - ); - }); - - // Check events system-parachain-side - PeopleWestend::execute_with(|| { - type RuntimeEvent = ::RuntimeEvent; - - assert_expected_events!( - PeopleWestend, - vec![ - RuntimeEvent::Identity(pallet_identity::Event::AuthorityAdded { .. }) => {}, - RuntimeEvent::MessageQueue(pallet_message_queue::Event::Processed { success: true, .. }) => {}, - ] - ); - }); - - // Now, use the previously added username authority to concede a username to an account. - PeopleWestend::execute_with(|| { - type PeopleRuntimeEvent = ::RuntimeEvent; - let full_username = [usr.to_owned(), ".suffix1".to_owned()].concat().into_bytes(); - - assert_ok!(::Identity::set_username_for( - ::RuntimeOrigin::signed(people_westend_alice.clone()), - people_westend_runtime::MultiAddress::Id(people_westend_bob.clone()), - full_username, - None, - true - )); - - assert_expected_events!( - PeopleWestend, - vec![ - PeopleRuntimeEvent::Identity(pallet_identity::Event::UsernameQueued { .. }) => {}, - ] - ); - }); - - // Accept the given username - PeopleWestend::execute_with(|| { - type PeopleRuntimeEvent = ::RuntimeEvent; - let full_username = [usr.to_owned(), ".suffix1".to_owned()].concat().into_bytes(); - - assert_ok!(::Identity::accept_username( - ::RuntimeOrigin::signed(people_westend_bob.clone()), - full_username.try_into().unwrap(), - )); - - assert_expected_events!( - PeopleWestend, - vec![ - PeopleRuntimeEvent::Identity(pallet_identity::Event::UsernameSet { .. }) => {}, - ] - ); - }); - - // Now, remove the username authority with another priviledged XCM call. - Westend::execute_with(|| { - type Runtime = ::Runtime; - type RuntimeCall = ::RuntimeCall; - type RuntimeEvent = ::RuntimeEvent; - type PeopleCall = ::RuntimeCall; - type PeopleRuntime = ::Runtime; - - Dmp::make_parachain_reachable(1004); - - let remove_username_authority = PeopleCall::Identity(pallet_identity::Call::< - PeopleRuntime, - >::remove_username_authority { - authority: people_westend_runtime::MultiAddress::Id(people_westend_alice.clone()), - suffix: b"suffix1".into(), - }); - - let remove_authority_xcm_msg = RuntimeCall::XcmPallet(pallet_xcm::Call::::send { - dest: bx!(VersionedLocation::from(Location::new(0, [Parachain(1004)]))), - message: bx!(VersionedXcm::from(Xcm(vec![ - UnpaidExecution { weight_limit: Unlimited, check_origin: None }, - Transact { - origin_kind, - call: remove_username_authority.encode().into(), - fallback_max_weight: None - } - ]))), - }); - - assert_ok!(remove_authority_xcm_msg.dispatch(origin)); - - assert_expected_events!( - Westend, - vec![ - RuntimeEvent::XcmPallet(pallet_xcm::Event::Sent { .. }) => {}, - ] - ); - }); - - // Final event check. - PeopleWestend::execute_with(|| { - type RuntimeEvent = ::RuntimeEvent; - - assert_expected_events!( - PeopleWestend, - vec![ - RuntimeEvent::Identity(pallet_identity::Event::AuthorityRemoved { .. }) => {}, - RuntimeEvent::MessageQueue(pallet_message_queue::Event::Processed { success: true, .. }) => {}, - ] - ); - }); -} - -#[test] -fn relay_commands_add_remove_username_authority_wrong_origin() { - let people_westend_alice = PeopleWestend::account_id_of(ALICE); - - let origins = vec![ - ( - OriginKind::SovereignAccount, - ::RuntimeOrigin::signed(people_westend_alice.clone()), - ), - (OriginKind::Xcm, GeneralAdminOrigin.into()), - ]; - - for (origin_kind, origin) in origins { - Westend::execute_with(|| { - type Runtime = ::Runtime; - type RuntimeCall = ::RuntimeCall; - type RuntimeEvent = ::RuntimeEvent; - type PeopleCall = ::RuntimeCall; - type PeopleRuntime = ::Runtime; - - Dmp::make_parachain_reachable(1004); - - let add_username_authority = PeopleCall::Identity(pallet_identity::Call::< - PeopleRuntime, - >::add_username_authority { - authority: people_westend_runtime::MultiAddress::Id(people_westend_alice.clone()), - suffix: b"suffix1".into(), - allocation: 10, - }); - - let add_authority_xcm_msg = RuntimeCall::XcmPallet(pallet_xcm::Call::::send { - dest: bx!(VersionedLocation::from(Location::new(0, [Parachain(1004)]))), - message: bx!(VersionedXcm::from(Xcm(vec![ - UnpaidExecution { weight_limit: Unlimited, check_origin: None }, - Transact { - origin_kind, - call: add_username_authority.encode().into(), - fallback_max_weight: None - } - ]))), - }); - - assert_ok!(add_authority_xcm_msg.dispatch(origin.clone())); - assert_expected_events!( - Westend, - vec![ - RuntimeEvent::XcmPallet(pallet_xcm::Event::Sent { .. }) => {}, - ] - ); - }); - - // Check events system-parachain-side - PeopleWestend::execute_with(|| { - assert_expected_events!(PeopleWestend, vec![]); - }); - - Westend::execute_with(|| { - type Runtime = ::Runtime; - type RuntimeCall = ::RuntimeCall; - type RuntimeEvent = ::RuntimeEvent; - type PeopleCall = ::RuntimeCall; - type PeopleRuntime = ::Runtime; - - let remove_username_authority = PeopleCall::Identity(pallet_identity::Call::< - PeopleRuntime, - >::remove_username_authority { - authority: people_westend_runtime::MultiAddress::Id(people_westend_alice.clone()), - suffix: b"suffix1".into(), - }); - - Dmp::make_parachain_reachable(1004); - - let remove_authority_xcm_msg = - RuntimeCall::XcmPallet(pallet_xcm::Call::::send { - dest: bx!(VersionedLocation::from(Location::new(0, [Parachain(1004)]))), - message: bx!(VersionedXcm::from(Xcm(vec![ - UnpaidExecution { weight_limit: Unlimited, check_origin: None }, - Transact { - origin_kind: OriginKind::SovereignAccount, - call: remove_username_authority.encode().into(), - fallback_max_weight: None, - } - ]))), - }); - - assert_ok!(remove_authority_xcm_msg.dispatch(origin)); - assert_expected_events!( - Westend, - vec![ - RuntimeEvent::XcmPallet(pallet_xcm::Event::Sent { .. }) => {}, - ] - ); - }); - - PeopleWestend::execute_with(|| { - assert_expected_events!(PeopleWestend, vec![]); - }); - } -} diff --git a/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/mod.rs b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/mod.rs index b9ad9e3db467..08749b295dc2 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/mod.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/mod.rs @@ -14,5 +14,4 @@ // limitations under the License. mod claim_assets; -mod governance; mod teleport; diff --git a/cumulus/parachains/pallets/collective-content/Cargo.toml b/cumulus/parachains/pallets/collective-content/Cargo.toml index 09301bd738f3..c52021f67e36 100644 --- a/cumulus/parachains/pallets/collective-content/Cargo.toml +++ b/cumulus/parachains/pallets/collective-content/Cargo.toml @@ -5,8 +5,6 @@ authors = ["Parity Technologies "] edition.workspace = true description = "Managed content" license = "Apache-2.0" -homepage.workspace = true -repository.workspace = true [lints] workspace = true diff --git a/cumulus/parachains/pallets/parachain-info/Cargo.toml b/cumulus/parachains/pallets/parachain-info/Cargo.toml index 604441c65f29..e0bed23c4f8c 100644 --- a/cumulus/parachains/pallets/parachain-info/Cargo.toml +++ b/cumulus/parachains/pallets/parachain-info/Cargo.toml @@ -5,8 +5,6 @@ name = "staging-parachain-info" version = "0.7.0" license = "Apache-2.0" description = "Pallet to store the parachain ID" -homepage.workspace = true -repository.workspace = true [lints] workspace = true diff --git a/cumulus/parachains/pallets/ping/Cargo.toml b/cumulus/parachains/pallets/ping/Cargo.toml index 248b5d7202fa..51fc384a4f14 100644 --- a/cumulus/parachains/pallets/ping/Cargo.toml +++ b/cumulus/parachains/pallets/ping/Cargo.toml @@ -5,8 +5,6 @@ name = "cumulus-ping" version = "0.7.0" license = "Apache-2.0" description = "Ping Pallet for Cumulus XCM/UMP testing." -homepage.workspace = true -repository.workspace = true [lints] workspace = true @@ -15,14 +13,14 @@ workspace = true codec = { features = ["derive"], workspace = true } scale-info = { features = ["derive"], workspace = true } +sp-runtime = { workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } -sp-runtime = { workspace = true } xcm = { workspace = true } -cumulus-pallet-xcm = { workspace = true } cumulus-primitives-core = { workspace = true } +cumulus-pallet-xcm = { workspace = true } [features] default = ["std"] diff --git a/cumulus/parachains/pallets/ping/src/lib.rs b/cumulus/parachains/pallets/ping/src/lib.rs index b6423a81db3c..2cf32c891fc0 100644 --- a/cumulus/parachains/pallets/ping/src/lib.rs +++ b/cumulus/parachains/pallets/ping/src/lib.rs @@ -114,7 +114,6 @@ pub mod pallet { }) .encode() .into(), - fallback_max_weight: None, }]), ) { Ok((hash, cost)) => { @@ -215,7 +214,6 @@ pub mod pallet { }) .encode() .into(), - fallback_max_weight: None, }]), ) { Ok((hash, cost)) => diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml index c954ddb7b8c7..42adaba7a27c 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml @@ -5,8 +5,6 @@ authors.workspace = true edition.workspace = true description = "Rococo variant of Asset Hub parachain runtime" license = "Apache-2.0" -homepage.workspace = true -repository.workspace = true [lints] workspace = true @@ -27,10 +25,10 @@ frame-system = { workspace = true } frame-system-benchmarking = { optional = true, workspace = true } frame-system-rpc-runtime-api = { workspace = true } frame-try-runtime = { optional = true, workspace = true } -pallet-asset-conversion = { workspace = true } -pallet-asset-conversion-ops = { workspace = true } pallet-asset-conversion-tx-payment = { workspace = true } pallet-assets = { workspace = true } +pallet-asset-conversion-ops = { workspace = true } +pallet-asset-conversion = { workspace = true } pallet-assets-freezer = { workspace = true } pallet-aura = { workspace = true } pallet-authorship = { workspace = true } @@ -51,9 +49,9 @@ sp-api = { workspace = true } sp-block-builder = { workspace = true } sp-consensus-aura = { workspace = true } sp-core = { workspace = true } -sp-genesis-builder = { workspace = true } -sp-inherents = { workspace = true } sp-keyring = { workspace = true } +sp-inherents = { workspace = true } +sp-genesis-builder = { workspace = true } sp-offchain = { workspace = true } sp-runtime = { workspace = true } sp-session = { workspace = true } @@ -65,18 +63,17 @@ sp-weights = { workspace = true } primitive-types = { features = ["codec", "num-traits", "scale-info"], workspace = true } # Polkadot +rococo-runtime-constants = { workspace = true } pallet-xcm = { workspace = true } pallet-xcm-benchmarks = { optional = true, workspace = true } polkadot-parachain-primitives = { workspace = true } polkadot-runtime-common = { workspace = true } -rococo-runtime-constants = { workspace = true } xcm = { workspace = true } xcm-builder = { workspace = true } xcm-executor = { workspace = true } xcm-runtime-apis = { workspace = true } # Cumulus -assets-common = { workspace = true } cumulus-pallet-aura-ext = { workspace = true } cumulus-pallet-parachain-system = { workspace = true } cumulus-pallet-session-benchmarking = { workspace = true } @@ -84,24 +81,24 @@ cumulus-pallet-xcm = { workspace = true } cumulus-pallet-xcmp-queue = { features = ["bridging"], workspace = true } cumulus-primitives-aura = { workspace = true } cumulus-primitives-core = { workspace = true } -cumulus-primitives-storage-weight-reclaim = { workspace = true } cumulus-primitives-utility = { workspace = true } +cumulus-primitives-storage-weight-reclaim = { workspace = true } pallet-collator-selection = { workspace = true } parachain-info = { workspace = true } parachains-common = { workspace = true } testnet-parachains-constants = { features = ["rococo"], workspace = true } +assets-common = { workspace = true } # Bridges +pallet-xcm-bridge-hub-router = { workspace = true } bp-asset-hub-rococo = { workspace = true } bp-asset-hub-westend = { workspace = true } bp-bridge-hub-rococo = { workspace = true } bp-bridge-hub-westend = { workspace = true } -pallet-xcm-bridge-hub-router = { workspace = true } snowbridge-router-primitives = { workspace = true } [dev-dependencies] asset-test-utils = { workspace = true, default-features = true } -parachains-runtimes-test-utils = { workspace = true, default-features = true } [build-dependencies] substrate-wasm-builder = { optional = true, workspace = true, default-features = true } @@ -146,7 +143,6 @@ runtime-benchmarks = [ "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", "xcm-runtime-apis/runtime-benchmarks", - "xcm/runtime-benchmarks", ] try-runtime = [ "cumulus-pallet-aura-ext/try-runtime", diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs index 8f4ae4670acd..2f9d83bd9d0b 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs @@ -62,8 +62,7 @@ use frame_support::{ ord_parameter_types, parameter_types, traits::{ fungible, fungibles, tokens::imbalance::ResolveAssetTo, AsEnsureOriginWithArg, ConstBool, - ConstU128, ConstU32, ConstU64, ConstU8, EitherOfDiverse, Equals, InstanceFilter, - TransformOrigin, + ConstU128, ConstU32, ConstU64, ConstU8, EitherOfDiverse, InstanceFilter, TransformOrigin, }, weights::{ConstantMultiplier, Weight, WeightToFee as _}, BoundedVec, PalletId, @@ -124,7 +123,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: alloc::borrow::Cow::Borrowed("statemine"), impl_name: alloc::borrow::Cow::Borrowed("statemine"), authoring_version: 1, - spec_version: 1_017_001, + spec_version: 1_016_002, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 16, @@ -468,7 +467,6 @@ impl pallet_multisig::Config for Runtime { type DepositFactor = DepositFactor; type MaxSignatories = MaxSignatories; type WeightInfo = weights::pallet_multisig::WeightInfo; - type BlockNumberProvider = frame_system::Pallet; } impl pallet_utility::Config for Runtime { @@ -654,7 +652,6 @@ impl pallet_proxy::Config for Runtime { type CallHasher = BlakeTwo256; type AnnouncementDepositBase = AnnouncementDepositBase; type AnnouncementDepositFactor = AnnouncementDepositFactor; - type BlockNumberProvider = frame_system::Pallet; } parameter_types! { @@ -921,7 +918,6 @@ impl pallet_nfts::Config for Runtime { type WeightInfo = weights::pallet_nfts::WeightInfo; #[cfg(feature = "runtime-benchmarks")] type Helper = (); - type BlockNumberProvider = frame_system::Pallet; } /// XCM router instance to BridgeHub with bridging capabilities for `Westend` global @@ -937,10 +933,6 @@ impl pallet_xcm_bridge_hub_router::Config for Runtim type Bridges = xcm_config::bridging::NetworkExportTable; type DestinationVersion = PolkadotXcm; - type BridgeHubOrigin = frame_support::traits::EitherOfDiverse< - EnsureRoot, - EnsureXcm>, - >; type ToBridgeHubSender = XcmpQueue; type LocalXcmChannelManager = cumulus_pallet_xcmp_queue::bridging::InAndOutXcmpChannelStatusProvider; @@ -1420,31 +1412,37 @@ impl_runtime_apis! { // We accept the native token to pay fees. let mut acceptable_assets = vec![AssetId(native_token.clone())]; // We also accept all assets in a pool with the native token. - acceptable_assets.extend( - assets_common::PoolAdapter::::get_assets_in_pool_with(native_token) - .map_err(|()| XcmPaymentApiError::VersionedConversionFailed)? - ); + let assets_in_pool_with_native = assets_common::get_assets_in_pool_with::< + Runtime, + xcm::v5::Location + >(&native_token).map_err(|()| XcmPaymentApiError::VersionedConversionFailed)?.into_iter(); + acceptable_assets.extend(assets_in_pool_with_native); PolkadotXcm::query_acceptable_payment_assets(xcm_version, acceptable_assets) } fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result { let native_asset = xcm_config::TokenLocation::get(); let fee_in_native = WeightToFee::weight_to_fee(&weight); - let latest_asset_id: Result = asset.clone().try_into(); - match latest_asset_id { + match asset.try_as::() { Ok(asset_id) if asset_id.0 == native_asset => { // for native token Ok(fee_in_native) }, Ok(asset_id) => { - // Try to get current price of `asset_id` in `native_asset`. - if let Ok(Some(swapped_in_native)) = assets_common::PoolAdapter::::quote_price_tokens_for_exact_tokens( - asset_id.0.clone(), + let assets_in_pool_with_this_asset: Vec<_> = assets_common::get_assets_in_pool_with::< + Runtime, + xcm::v5::Location + >(&asset_id.0).map_err(|()| XcmPaymentApiError::VersionedConversionFailed)?; + if assets_in_pool_with_this_asset + .into_iter() + .map(|asset_id| asset_id.0) + .any(|location| location == native_asset) { + pallet_asset_conversion::Pallet::::quote_price_tokens_for_exact_tokens( + asset_id.clone().0, native_asset, fee_in_native, true, // We include the fee. - ) { - Ok(swapped_in_native) + ).ok_or(XcmPaymentApiError::AssetNotFound) } else { log::trace!(target: "xcm::xcm_runtime_apis", "query_weight_to_asset_fee - unhandled asset_id: {asset_id:?}!"); Err(XcmPaymentApiError::AssetNotFound) @@ -1854,8 +1852,20 @@ impl_runtime_apis! { type ToWestend = XcmBridgeHubRouterBench; - use frame_support::traits::WhitelistedStorageKeys; - let whitelist: Vec = AllPalletsWithSystem::whitelisted_storage_keys(); + let whitelist: Vec = vec![ + // Block Number + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(), + // Total Issuance + hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec().into(), + // Execution Phase + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec().into(), + // Event Count + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec().into(), + // System Events + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7").to_vec().into(), + //TODO: use from relay_well_known_keys::ACTIVE_CONFIG + hex_literal::hex!("06de3d8a54d27e44a9d5ce189618f22db4b49d95320d9021994c850f25b8e385").to_vec().into(), + ]; let mut batches = Vec::::new(); let params = (&config, &whitelist); diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm.rs index 8506125d4133..51b6543bae82 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm.rs @@ -17,27 +17,25 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-12-18, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `55b2c3410882`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-rococo-dev")`, DB CACHE: 1024 // Executed Command: // target/production/polkadot-parachain // benchmark // pallet -// --extrinsic=* -// --chain=asset-hub-rococo-dev -// --pallet=pallet_xcm -// --header=/__w/polkadot-sdk/polkadot-sdk/cumulus/file_header.txt -// --output=./cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights -// --wasm-execution=compiled // --steps=50 // --repeat=20 +// --extrinsic=* +// --wasm-execution=compiled // --heap-pages=4096 -// --no-storage-info -// --no-min-squares -// --no-median-slopes +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_xcm +// --chain=asset-hub-rococo-dev +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -66,16 +64,14 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 28_401_000 picoseconds. - Weight::from_parts(29_326_000, 0) + // Minimum execution time: 22_136_000 picoseconds. + Weight::from_parts(22_518_000, 0) .saturating_add(Weight::from_parts(0, 3610)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `PolkadotXcm::ShouldRecordXcm` (r:1 w:0) - /// Proof: `PolkadotXcm::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) @@ -94,20 +90,18 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 109_686_000 picoseconds. - Weight::from_parts(114_057_000, 0) + // Minimum execution time: 92_277_000 picoseconds. + Weight::from_parts(94_843_000, 0) .saturating_add(Weight::from_parts(0, 3610)) - .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) } /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `PolkadotXcm::ShouldRecordXcm` (r:1 w:0) - /// Proof: `PolkadotXcm::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `System::Account` (r:2 w:2) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::DeliveryFeeFactor` (r:1 w:0) - /// Proof: `XcmpQueue::DeliveryFeeFactor` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) + /// Proof: `XcmpQueue::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -117,29 +111,25 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Storage: `ParachainSystem::RelevantMessagingState` (r:1 w:0) /// Proof: `ParachainSystem::RelevantMessagingState` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: Some(1282), added: 1777, mode: `MaxEncodedLen`) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `XcmpQueue::OutboundXcmpMessages` (r:0 w:1) - /// Proof: `XcmpQueue::OutboundXcmpMessages` (`max_values`: None, `max_size`: Some(105506), added: 107981, mode: `MaxEncodedLen`) + /// Proof: `XcmpQueue::OutboundXcmpMessages` (`max_values`: None, `max_size`: None, mode: `Measured`) fn reserve_transfer_assets() -> Weight { // Proof Size summary in bytes: // Measured: `400` // Estimated: `6196` - // Minimum execution time: 137_693_000 picoseconds. - Weight::from_parts(142_244_000, 0) + // Minimum execution time: 120_110_000 picoseconds. + Weight::from_parts(122_968_000, 0) .saturating_add(Weight::from_parts(0, 6196)) - .saturating_add(T::DbWeight::get().reads(10)) + .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(5)) } /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `PolkadotXcm::ShouldRecordXcm` (r:1 w:0) - /// Proof: `PolkadotXcm::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Assets::Asset` (r:1 w:1) /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) /// Storage: `Assets::Account` (r:2 w:2) /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - /// Storage: `AssetsFreezer::FrozenBalances` (r:1 w:0) - /// Proof: `AssetsFreezer::FrozenBalances` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:2 w:2) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) @@ -156,24 +146,23 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn transfer_assets() -> Weight { // Proof Size summary in bytes: - // Measured: `537` + // Measured: `496` // Estimated: `6208` - // Minimum execution time: 178_291_000 picoseconds. - Weight::from_parts(185_648_000, 0) + // Minimum execution time: 143_116_000 picoseconds. + Weight::from_parts(147_355_000, 0) .saturating_add(Weight::from_parts(0, 6208)) - .saturating_add(T::DbWeight::get().reads(14)) + .saturating_add(T::DbWeight::get().reads(12)) .saturating_add(T::DbWeight::get().writes(7)) } - /// Storage: `PolkadotXcm::ShouldRecordXcm` (r:1 w:0) - /// Proof: `PolkadotXcm::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Benchmark::Override` (r:0 w:0) + /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) fn execute() -> Weight { // Proof Size summary in bytes: - // Measured: `103` - // Estimated: `1588` - // Minimum execution time: 14_014_000 picoseconds. - Weight::from_parts(14_522_000, 0) - .saturating_add(Weight::from_parts(0, 1588)) - .saturating_add(T::DbWeight::get().reads(1)) + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. + Weight::from_parts(18_446_744_073_709_551_000, 0) + .saturating_add(Weight::from_parts(0, 0)) } /// Storage: `PolkadotXcm::SupportedVersion` (r:0 w:1) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -181,8 +170,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_195_000 picoseconds. - Weight::from_parts(7_440_000, 0) + // Minimum execution time: 6_517_000 picoseconds. + Weight::from_parts(6_756_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -192,8 +181,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_278_000 picoseconds. - Weight::from_parts(2_488_000, 0) + // Minimum execution time: 1_894_000 picoseconds. + Weight::from_parts(2_024_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -219,8 +208,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 35_095_000 picoseconds. - Weight::from_parts(36_347_000, 0) + // Minimum execution time: 27_314_000 picoseconds. + Weight::from_parts(28_787_000, 0) .saturating_add(Weight::from_parts(0, 3610)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(5)) @@ -245,8 +234,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `363` // Estimated: `3828` - // Minimum execution time: 38_106_000 picoseconds. - Weight::from_parts(38_959_000, 0) + // Minimum execution time: 29_840_000 picoseconds. + Weight::from_parts(30_589_000, 0) .saturating_add(Weight::from_parts(0, 3828)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(4)) @@ -257,45 +246,45 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_307_000 picoseconds. - Weight::from_parts(2_478_000, 0) + // Minimum execution time: 1_893_000 picoseconds. + Weight::from_parts(2_017_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `PolkadotXcm::SupportedVersion` (r:6 w:2) + /// Storage: `PolkadotXcm::SupportedVersion` (r:5 w:2) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_supported_version() -> Weight { // Proof Size summary in bytes: // Measured: `159` - // Estimated: `15999` - // Minimum execution time: 25_238_000 picoseconds. - Weight::from_parts(25_910_000, 0) - .saturating_add(Weight::from_parts(0, 15999)) - .saturating_add(T::DbWeight::get().reads(6)) + // Estimated: `13524` + // Minimum execution time: 19_211_000 picoseconds. + Weight::from_parts(19_552_000, 0) + .saturating_add(Weight::from_parts(0, 13524)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifiers` (r:6 w:2) + /// Storage: `PolkadotXcm::VersionNotifiers` (r:5 w:2) /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notifiers() -> Weight { // Proof Size summary in bytes: // Measured: `163` - // Estimated: `16003` - // Minimum execution time: 25_626_000 picoseconds. - Weight::from_parts(26_147_000, 0) - .saturating_add(Weight::from_parts(0, 16003)) - .saturating_add(T::DbWeight::get().reads(6)) + // Estimated: `13528` + // Minimum execution time: 19_177_000 picoseconds. + Weight::from_parts(19_704_000, 0) + .saturating_add(Weight::from_parts(0, 13528)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:7 w:0) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:6 w:0) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn already_notified_target() -> Weight { // Proof Size summary in bytes: // Measured: `173` - // Estimated: `18488` - // Minimum execution time: 28_528_000 picoseconds. - Weight::from_parts(28_882_000, 0) - .saturating_add(Weight::from_parts(0, 18488)) - .saturating_add(T::DbWeight::get().reads(7)) + // Estimated: `16013` + // Minimum execution time: 20_449_000 picoseconds. + Weight::from_parts(21_075_000, 0) + .saturating_add(Weight::from_parts(0, 16013)) + .saturating_add(T::DbWeight::get().reads(6)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:2 w:1) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -315,36 +304,36 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `212` // Estimated: `6152` - // Minimum execution time: 33_042_000 picoseconds. - Weight::from_parts(34_444_000, 0) + // Minimum execution time: 26_578_000 picoseconds. + Weight::from_parts(27_545_000, 0) .saturating_add(Weight::from_parts(0, 6152)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:0) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:0) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn notify_target_migration_fail() -> Weight { // Proof Size summary in bytes: - // Measured: `176` - // Estimated: `13541` - // Minimum execution time: 18_218_000 picoseconds. - Weight::from_parts(18_622_000, 0) - .saturating_add(Weight::from_parts(0, 13541)) - .saturating_add(T::DbWeight::get().reads(5)) + // Measured: `206` + // Estimated: `11096` + // Minimum execution time: 11_646_000 picoseconds. + Weight::from_parts(11_944_000, 0) + .saturating_add(Weight::from_parts(0, 11096)) + .saturating_add(T::DbWeight::get().reads(4)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:6 w:2) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notify_targets() -> Weight { // Proof Size summary in bytes: // Measured: `170` - // Estimated: `16010` - // Minimum execution time: 25_838_000 picoseconds. - Weight::from_parts(26_276_000, 0) - .saturating_add(Weight::from_parts(0, 16010)) - .saturating_add(T::DbWeight::get().reads(6)) + // Estimated: `13535` + // Minimum execution time: 19_301_000 picoseconds. + Weight::from_parts(19_664_000, 0) + .saturating_add(Weight::from_parts(0, 13535)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:6 w:2) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) @@ -361,11 +350,11 @@ impl pallet_xcm::WeightInfo for WeightInfo { fn migrate_and_notify_old_targets() -> Weight { // Proof Size summary in bytes: // Measured: `212` - // Estimated: `16052` - // Minimum execution time: 46_196_000 picoseconds. - Weight::from_parts(47_859_000, 0) - .saturating_add(Weight::from_parts(0, 16052)) - .saturating_add(T::DbWeight::get().reads(12)) + // Estimated: `13577` + // Minimum execution time: 35_715_000 picoseconds. + Weight::from_parts(36_915_000, 0) + .saturating_add(Weight::from_parts(0, 13577)) + .saturating_add(T::DbWeight::get().reads(11)) .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) @@ -376,8 +365,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `103` // Estimated: `1588` - // Minimum execution time: 7_068_000 picoseconds. - Weight::from_parts(7_442_000, 0) + // Minimum execution time: 4_871_000 picoseconds. + Weight::from_parts(5_066_000, 0) .saturating_add(Weight::from_parts(0, 1588)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -388,24 +377,22 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7740` // Estimated: `11205` - // Minimum execution time: 31_497_000 picoseconds. - Weight::from_parts(31_975_000, 0) + // Minimum execution time: 25_150_000 picoseconds. + Weight::from_parts(26_119_000, 0) .saturating_add(Weight::from_parts(0, 11205)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `PolkadotXcm::ShouldRecordXcm` (r:1 w:0) - /// Proof: `PolkadotXcm::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::AssetTraps` (r:1 w:1) /// Proof: `PolkadotXcm::AssetTraps` (`max_values`: None, `max_size`: None, mode: `Measured`) fn claim_assets() -> Weight { // Proof Size summary in bytes: // Measured: `160` // Estimated: `3625` - // Minimum execution time: 44_534_000 picoseconds. - Weight::from_parts(46_175_000, 0) + // Minimum execution time: 38_248_000 picoseconds. + Weight::from_parts(39_122_000, 0) .saturating_add(Weight::from_parts(0, 3625)) - .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm_bridge_hub_router.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm_bridge_hub_router.rs index 9a75428ada8b..00ecf239428f 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm_bridge_hub_router.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm_bridge_hub_router.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm_bridge_hub_router` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-12-07, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-08-15, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-acd6uxux-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-696hpswk-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-rococo-dev")`, DB CACHE: 1024 // Executed Command: @@ -52,14 +52,14 @@ impl pallet_xcm_bridge_hub_router::WeightInfo for Weigh /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: Some(4002), added: 4497, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: Some(1282), added: 1777, mode: `MaxEncodedLen`) - /// Storage: `ToWestendXcmRouter::Bridge` (r:1 w:1) - /// Proof: `ToWestendXcmRouter::Bridge` (`max_values`: Some(1), `max_size`: Some(17), added: 512, mode: `MaxEncodedLen`) + /// Storage: `ToWestendXcmRouter::DeliveryFeeFactor` (r:1 w:1) + /// Proof: `ToWestendXcmRouter::DeliveryFeeFactor` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) fn on_initialize_when_non_congested() -> Weight { // Proof Size summary in bytes: - // Measured: `154` + // Measured: `153` // Estimated: `5487` - // Minimum execution time: 13_884_000 picoseconds. - Weight::from_parts(14_312_000, 0) + // Minimum execution time: 12_993_000 picoseconds. + Weight::from_parts(13_428_000, 0) .saturating_add(Weight::from_parts(0, 5487)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -72,21 +72,9 @@ impl pallet_xcm_bridge_hub_router::WeightInfo for Weigh // Proof Size summary in bytes: // Measured: `144` // Estimated: `5487` - // Minimum execution time: 6_909_000 picoseconds. - Weight::from_parts(7_115_000, 0) + // Minimum execution time: 6_305_000 picoseconds. + Weight::from_parts(6_536_000, 0) .saturating_add(Weight::from_parts(0, 5487)) .saturating_add(T::DbWeight::get().reads(2)) } - /// Storage: `ToWestendXcmRouter::Bridge` (r:1 w:1) - /// Proof: `ToWestendXcmRouter::Bridge` (`max_values`: Some(1), `max_size`: Some(17), added: 512, mode: `MaxEncodedLen`) - fn report_bridge_status() -> Weight { - // Proof Size summary in bytes: - // Measured: `150` - // Estimated: `1502` - // Minimum execution time: 12_394_000 picoseconds. - Weight::from_parts(12_883_000, 0) - .saturating_add(Weight::from_parts(0, 1502)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/mod.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/mod.rs index ccf473484cad..bf374fc415ce 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/mod.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/mod.rs @@ -22,7 +22,6 @@ use alloc::vec::Vec; use frame_support::weights::Weight; use pallet_xcm_benchmarks_fungible::WeightInfo as XcmFungibleWeight; use pallet_xcm_benchmarks_generic::WeightInfo as XcmGeneric; -use sp_runtime::BoundedVec; use xcm::{ latest::{prelude::*, AssetTransferFilter}, DoubleEncoded, @@ -85,11 +84,7 @@ impl XcmWeightInfo for AssetHubRococoXcmWeight { fn transfer_reserve_asset(assets: &Assets, _dest: &Location, _xcm: &Xcm<()>) -> Weight { assets.weigh_assets(XcmFungibleWeight::::transfer_reserve_asset()) } - fn transact( - _origin_type: &OriginKind, - _fallback_max_weight: &Option, - _call: &DoubleEncoded, - ) -> Weight { + fn transact(_origin_type: &OriginKind, _call: &DoubleEncoded) -> Weight { XcmGeneric::::transact() } fn hrmp_new_channel_open_request( @@ -177,16 +172,8 @@ impl XcmWeightInfo for AssetHubRococoXcmWeight { fn clear_error() -> Weight { XcmGeneric::::clear_error() } - fn set_hints(hints: &BoundedVec) -> Weight { - let mut weight = Weight::zero(); - for hint in hints { - match hint { - AssetClaimer { .. } => { - weight = weight.saturating_add(XcmGeneric::::asset_claimer()); - }, - } - } - weight + fn set_asset_claimer(_location: &Location) -> Weight { + XcmGeneric::::set_asset_claimer() } fn claim_asset(_assets: &Assets, _ticket: &Location) -> Weight { XcmGeneric::::claim_asset() @@ -266,7 +253,4 @@ impl XcmWeightInfo for AssetHubRococoXcmWeight { fn unpaid_execution(_: &WeightLimit, _: &Option) -> Weight { XcmGeneric::::unpaid_execution() } - fn execute_with_origin(_: &Option, _: &Xcm) -> Weight { - XcmGeneric::::execute_with_origin() - } } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs index d48debef94c8..ef08b432e5c7 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs @@ -87,7 +87,7 @@ impl WeightInfo { // Minimum execution time: 5_803_000 picoseconds. Weight::from_parts(5_983_000, 0) } - pub fn asset_claimer() -> Weight { + pub fn set_asset_claimer() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` @@ -373,11 +373,4 @@ impl WeightInfo { // Minimum execution time: 668_000 picoseconds. Weight::from_parts(726_000, 0) } - pub fn execute_with_origin() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 713_000 picoseconds. - Weight::from_parts(776_000, 0) - } } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs index 08b2f520c4b9..66743fa3a07e 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs @@ -66,7 +66,6 @@ use xcm_builder::{ use xcm_executor::XcmExecutor; parameter_types! { - pub const RootLocation: Location = Location::here(); pub const TokenLocation: Location = Location::parent(); pub const RelayNetwork: NetworkId = NetworkId::ByGenesis(ROCOCO_GENESIS_HASH); pub RelayChainOrigin: RuntimeOrigin = cumulus_pallet_xcm::Origin::Relay.into(); @@ -316,7 +315,6 @@ pub type ForeignAssetFeeAsExistentialDepositMultiplierFeeCharger = /// either execution or delivery. /// We only waive fees for system functions, which these locations represent. pub type WaivedLocations = ( - Equals, RelayOrOtherSystemParachains, Equals, ); diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/tests/tests.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/tests/tests.rs index 144934ecd4ab..5da8b45417a3 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/tests/tests.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/tests/tests.rs @@ -24,11 +24,10 @@ use asset_hub_rococo_runtime::{ ForeignAssetFeeAsExistentialDepositMultiplierFeeCharger, LocationToAccountId, StakingPot, TokenLocation, TrustBackedAssetsPalletLocation, XcmConfig, }, - AllPalletsWithoutSystem, AssetConversion, AssetDeposit, Assets, Balances, Block, - CollatorSelection, ExistentialDeposit, ForeignAssets, ForeignAssetsInstance, - MetadataDepositBase, MetadataDepositPerByte, ParachainSystem, Runtime, RuntimeCall, - RuntimeEvent, RuntimeOrigin, SessionKeys, ToWestendXcmRouterInstance, - TrustBackedAssetsInstance, XcmpQueue, + AllPalletsWithoutSystem, AssetConversion, AssetDeposit, Assets, Balances, CollatorSelection, + ExistentialDeposit, ForeignAssets, ForeignAssetsInstance, MetadataDepositBase, + MetadataDepositPerByte, ParachainSystem, Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, + SessionKeys, TrustBackedAssetsInstance, XcmpQueue, }; use asset_test_utils::{ test_cases_over_bridge::TestBridgingConfig, CollatorSessionKey, CollatorSessionKeys, @@ -1243,58 +1242,6 @@ mod asset_hub_rococo_tests { ) } - #[test] - fn report_bridge_status_from_xcm_bridge_router_for_westend_works() { - asset_test_utils::test_cases_over_bridge::report_bridge_status_from_xcm_bridge_router_works::< - Runtime, - AllPalletsWithoutSystem, - XcmConfig, - LocationToAccountId, - ToWestendXcmRouterInstance, - >( - collator_session_keys(), - bridging_to_asset_hub_westend, - || bp_asset_hub_rococo::build_congestion_message(Default::default(), true).into(), - || bp_asset_hub_rococo::build_congestion_message(Default::default(), false).into(), - ) - } - - #[test] - fn test_report_bridge_status_call_compatibility() { - // if this test fails, make sure `bp_asset_hub_rococo` has valid encoding - assert_eq!( - RuntimeCall::ToWestendXcmRouter( - pallet_xcm_bridge_hub_router::Call::report_bridge_status { - bridge_id: Default::default(), - is_congested: true, - } - ) - .encode(), - bp_asset_hub_rococo::Call::ToWestendXcmRouter( - bp_asset_hub_rococo::XcmBridgeHubRouterCall::report_bridge_status { - bridge_id: Default::default(), - is_congested: true, - } - ) - .encode() - ); - } - - #[test] - fn check_sane_weight_report_bridge_status_for_westend() { - use pallet_xcm_bridge_hub_router::WeightInfo; - let actual = >::WeightInfo::report_bridge_status(); - let max_weight = bp_asset_hub_rococo::XcmBridgeHubRouterTransactCallMaxWeight::get(); - assert!( - actual.all_lte(max_weight), - "max_weight: {:?} should be adjusted to actual {:?}", - max_weight, - actual - ); - } - #[test] fn reserve_transfer_native_asset_to_non_teleport_para_works() { asset_test_utils::test_cases::reserve_transfer_native_asset_to_non_teleport_para_works::< @@ -1524,19 +1471,3 @@ fn location_conversion_works() { assert_eq!(got, expected, "{}", tc.description); } } - -#[test] -fn xcm_payment_api_works() { - parachains_runtimes_test_utils::test_cases::xcm_payment_api_with_native_token_works::< - Runtime, - RuntimeCall, - RuntimeOrigin, - Block, - >(); - asset_test_utils::test_cases::xcm_payment_api_with_pools_works::< - Runtime, - RuntimeCall, - RuntimeOrigin, - Block, - >(); -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml index 7c31745d8f6e..d5eaa43ab834 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml @@ -5,8 +5,6 @@ authors.workspace = true edition.workspace = true description = "Westend variant of Asset Hub parachain runtime" license = "Apache-2.0" -homepage.workspace = true -repository.workspace = true [lints] workspace = true @@ -27,10 +25,10 @@ frame-system = { workspace = true } frame-system-benchmarking = { optional = true, workspace = true } frame-system-rpc-runtime-api = { workspace = true } frame-try-runtime = { optional = true, workspace = true } -pallet-asset-conversion = { workspace = true } pallet-asset-conversion-ops = { workspace = true } pallet-asset-conversion-tx-payment = { workspace = true } pallet-assets = { workspace = true } +pallet-asset-conversion = { workspace = true } pallet-assets-freezer = { workspace = true } pallet-aura = { workspace = true } pallet-authorship = { workspace = true } @@ -40,21 +38,21 @@ pallet-nft-fractionalization = { workspace = true } pallet-nfts = { workspace = true } pallet-nfts-runtime-api = { workspace = true } pallet-proxy = { workspace = true } -pallet-revive = { workspace = true } pallet-session = { workspace = true } pallet-state-trie-migration = { workspace = true } pallet-timestamp = { workspace = true } pallet-transaction-payment = { workspace = true } pallet-transaction-payment-rpc-runtime-api = { workspace = true } pallet-uniques = { workspace = true } +pallet-revive = { workspace = true } pallet-utility = { workspace = true } sp-api = { workspace = true } sp-block-builder = { workspace = true } sp-consensus-aura = { workspace = true } sp-core = { workspace = true } +sp-keyring = { workspace = true } sp-genesis-builder = { workspace = true } sp-inherents = { workspace = true } -sp-keyring = { workspace = true } sp-offchain = { workspace = true } sp-runtime = { workspace = true } sp-session = { workspace = true } @@ -77,33 +75,32 @@ xcm-executor = { workspace = true } xcm-runtime-apis = { workspace = true } # Cumulus -assets-common = { workspace = true } cumulus-pallet-aura-ext = { workspace = true } +pallet-message-queue = { workspace = true } cumulus-pallet-parachain-system = { workspace = true } cumulus-pallet-session-benchmarking = { workspace = true } cumulus-pallet-xcm = { workspace = true } cumulus-pallet-xcmp-queue = { features = ["bridging"], workspace = true } cumulus-primitives-aura = { workspace = true } cumulus-primitives-core = { workspace = true } -cumulus-primitives-storage-weight-reclaim = { workspace = true } cumulus-primitives-utility = { workspace = true } +cumulus-primitives-storage-weight-reclaim = { workspace = true } pallet-collator-selection = { workspace = true } -pallet-message-queue = { workspace = true } parachain-info = { workspace = true } parachains-common = { workspace = true } testnet-parachains-constants = { features = ["westend"], workspace = true } +assets-common = { workspace = true } # Bridges +pallet-xcm-bridge-hub-router = { workspace = true } bp-asset-hub-rococo = { workspace = true } bp-asset-hub-westend = { workspace = true } bp-bridge-hub-rococo = { workspace = true } bp-bridge-hub-westend = { workspace = true } -pallet-xcm-bridge-hub-router = { workspace = true } snowbridge-router-primitives = { workspace = true } [dev-dependencies] asset-test-utils = { workspace = true, default-features = true } -parachains-runtimes-test-utils = { workspace = true, default-features = true } [build-dependencies] substrate-wasm-builder = { optional = true, workspace = true, default-features = true } @@ -150,7 +147,6 @@ runtime-benchmarks = [ "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", "xcm-runtime-apis/runtime-benchmarks", - "xcm/runtime-benchmarks", ] try-runtime = [ "cumulus-pallet-aura-ext/try-runtime", diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/genesis_config_presets.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/genesis_config_presets.rs index 824544e3b687..f440b5a2f421 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/genesis_config_presets.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/genesis_config_presets.rs @@ -18,7 +18,6 @@ use crate::*; use alloc::{vec, vec::Vec}; use cumulus_primitives_core::ParaId; -use frame_support::build_struct_json_patch; use hex_literal::hex; use parachains_common::{AccountId, AuraId}; use sp_core::crypto::UncheckedInto; @@ -36,14 +35,15 @@ fn asset_hub_westend_genesis( endowment: Balance, id: ParaId, ) -> serde_json::Value { - build_struct_json_patch!(RuntimeGenesisConfig { + let config = RuntimeGenesisConfig { balances: BalancesConfig { balances: endowed_accounts.iter().cloned().map(|k| (k, endowment)).collect(), }, - parachain_info: ParachainInfoConfig { parachain_id: id }, + parachain_info: ParachainInfoConfig { parachain_id: id, ..Default::default() }, collator_selection: CollatorSelectionConfig { invulnerables: invulnerables.iter().cloned().map(|(acc, _)| acc).collect(), candidacy_bond: ASSET_HUB_WESTEND_ED * 16, + ..Default::default() }, session: SessionConfig { keys: invulnerables @@ -56,9 +56,16 @@ fn asset_hub_westend_genesis( ) }) .collect(), + ..Default::default() }, - polkadot_xcm: PolkadotXcmConfig { safe_xcm_version: Some(SAFE_XCM_VERSION) }, - }) + polkadot_xcm: PolkadotXcmConfig { + safe_xcm_version: Some(SAFE_XCM_VERSION), + ..Default::default() + }, + ..Default::default() + }; + + serde_json::to_value(config).expect("Could not build genesis config.") } /// Encapsulates names of predefined presets. diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index 26ef3219a1e9..63175222cc26 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -45,9 +45,12 @@ use frame_support::{ ord_parameter_types, parameter_types, traits::{ fungible, fungibles, - tokens::{imbalance::ResolveAssetTo, nonfungibles_v2::Inspect}, - AsEnsureOriginWithArg, ConstBool, ConstU128, ConstU32, ConstU64, ConstU8, Equals, - InstanceFilter, Nothing, TransformOrigin, + tokens::{ + imbalance::ResolveAssetTo, nonfungibles_v2::Inspect, Fortitude::Polite, + Preservation::Expendable, + }, + AsEnsureOriginWithArg, ConstBool, ConstU128, ConstU32, ConstU64, ConstU8, InstanceFilter, + Nothing, TransformOrigin, }, weights::{ConstantMultiplier, Weight, WeightToFee as _}, BoundedVec, PalletId, @@ -59,14 +62,13 @@ use frame_system::{ use pallet_asset_conversion_tx_payment::SwapAssetAdapter; use pallet_nfts::{DestroyWitness, PalletFeatures}; use pallet_revive::{evm::runtime::EthExtra, AddressMapper}; -use pallet_xcm::EnsureXcm; use parachains_common::{ impls::DealWithFees, message_queue::*, AccountId, AssetIdForTrustBackedAssets, AuraId, Balance, BlockNumber, CollectionId, Hash, Header, ItemId, Nonce, Signature, AVERAGE_ON_INITIALIZE_RATIO, NORMAL_DISPATCH_RATIO, }; use sp_api::impl_runtime_apis; -use sp_core::{crypto::KeyTypeId, OpaqueMetadata, H160, U256}; +use sp_core::{crypto::KeyTypeId, OpaqueMetadata, H160}; use sp_runtime::{ generic, impl_opaque_keys, traits::{AccountIdConversion, BlakeTwo256, Block as BlockT, Saturating, Verify}, @@ -125,7 +127,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: alloc::borrow::Cow::Borrowed("westmint"), impl_name: alloc::borrow::Cow::Borrowed("westmint"), authoring_version: 1, - spec_version: 1_017_003, + spec_version: 1_016_004, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 16, @@ -467,7 +469,6 @@ impl pallet_multisig::Config for Runtime { type DepositFactor = DepositFactor; type MaxSignatories = MaxSignatories; type WeightInfo = weights::pallet_multisig::WeightInfo; - type BlockNumberProvider = frame_system::Pallet; } impl pallet_utility::Config for Runtime { @@ -653,7 +654,6 @@ impl pallet_proxy::Config for Runtime { type CallHasher = BlakeTwo256; type AnnouncementDepositBase = AnnouncementDepositBase; type AnnouncementDepositFactor = AnnouncementDepositFactor; - type BlockNumberProvider = frame_system::Pallet; } parameter_types! { @@ -915,7 +915,6 @@ impl pallet_nfts::Config for Runtime { type WeightInfo = weights::pallet_nfts::WeightInfo; #[cfg(feature = "runtime-benchmarks")] type Helper = (); - type BlockNumberProvider = frame_system::Pallet; } /// XCM router instance to BridgeHub with bridging capabilities for `Rococo` global @@ -931,10 +930,6 @@ impl pallet_xcm_bridge_hub_router::Config for Runtime type Bridges = xcm_config::bridging::NetworkExportTable; type DestinationVersion = PolkadotXcm; - type BridgeHubOrigin = frame_support::traits::EitherOfDiverse< - EnsureRoot, - EnsureXcm>, - >; type ToBridgeHubSender = XcmpQueue; type LocalXcmChannelManager = cumulus_pallet_xcmp_queue::bridging::InAndOutXcmpChannelStatusProvider; @@ -1533,31 +1528,38 @@ impl_runtime_apis! { // We accept the native token to pay fees. let mut acceptable_assets = vec![AssetId(native_token.clone())]; // We also accept all assets in a pool with the native token. - acceptable_assets.extend( - assets_common::PoolAdapter::::get_assets_in_pool_with(native_token) - .map_err(|()| XcmPaymentApiError::VersionedConversionFailed)? - ); + let assets_in_pool_with_native = assets_common::get_assets_in_pool_with::< + Runtime, + xcm::v5::Location + >(&native_token).map_err(|()| XcmPaymentApiError::VersionedConversionFailed)?.into_iter(); + acceptable_assets.extend(assets_in_pool_with_native); PolkadotXcm::query_acceptable_payment_assets(xcm_version, acceptable_assets) } fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result { let native_asset = xcm_config::WestendLocation::get(); let fee_in_native = WeightToFee::weight_to_fee(&weight); - let latest_asset_id: Result = asset.clone().try_into(); - match latest_asset_id { + match asset.try_as::() { Ok(asset_id) if asset_id.0 == native_asset => { // for native asset Ok(fee_in_native) }, Ok(asset_id) => { - // Try to get current price of `asset_id` in `native_asset`. - if let Ok(Some(swapped_in_native)) = assets_common::PoolAdapter::::quote_price_tokens_for_exact_tokens( - asset_id.0.clone(), + // We recognize assets in a pool with the native one. + let assets_in_pool_with_this_asset: Vec<_> = assets_common::get_assets_in_pool_with::< + Runtime, + xcm::v5::Location + >(&asset_id.0).map_err(|()| XcmPaymentApiError::VersionedConversionFailed)?; + if assets_in_pool_with_this_asset + .into_iter() + .map(|asset_id| asset_id.0) + .any(|location| location == native_asset) { + pallet_asset_conversion::Pallet::::quote_price_tokens_for_exact_tokens( + asset_id.clone().0, native_asset, fee_in_native, true, // We include the fee. - ) { - Ok(swapped_in_native) + ).ok_or(XcmPaymentApiError::AssetNotFound) } else { log::trace!(target: "xcm::xcm_runtime_apis", "query_weight_to_asset_fee - unhandled asset_id: {asset_id:?}!"); Err(XcmPaymentApiError::AssetNotFound) @@ -2030,8 +2032,20 @@ impl_runtime_apis! { type ToRococo = XcmBridgeHubRouterBench; - use frame_support::traits::WhitelistedStorageKeys; - let whitelist: Vec = AllPalletsWithSystem::whitelisted_storage_keys(); + let whitelist: Vec = vec![ + // Block Number + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(), + // Total Issuance + hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec().into(), + // Execution Phase + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec().into(), + // Event Count + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec().into(), + // System Events + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7").to_vec().into(), + //TODO: use from relay_well_known_keys::ACTIVE_CONFIG + hex_literal::hex!("06de3d8a54d27e44a9d5ce189618f22db4b49d95320d9021994c850f25b8e385").to_vec().into(), + ]; let mut batches = Vec::::new(); let params = (&config, &whitelist); @@ -2066,18 +2080,28 @@ impl_runtime_apis! { impl pallet_revive::ReviveApi for Runtime { - fn balance(address: H160) -> U256 { - Revive::evm_balance(&address) + fn balance(address: H160) -> Balance { + use frame_support::traits::fungible::Inspect; + let account = ::AddressMapper::to_account_id(&address); + Balances::reducible_balance(&account, Expendable, Polite) } fn nonce(address: H160) -> Nonce { let account = ::AddressMapper::to_account_id(&address); System::account_nonce(account) } - - fn eth_transact(tx: pallet_revive::evm::GenericTransaction) -> Result, pallet_revive::EthTransactError> + fn eth_transact( + from: H160, + dest: Option, + value: Balance, + input: Vec, + gas_limit: Option, + storage_deposit_limit: Option, + ) -> pallet_revive::EthContractResult { - let blockweights: BlockWeights = ::BlockWeights::get(); + use pallet_revive::AddressMapper; + let blockweights = ::BlockWeights::get(); + let origin = ::AddressMapper::to_account_id(&from); let encoded_size = |pallet_call| { let call = RuntimeCall::Revive(pallet_call); @@ -2086,9 +2110,15 @@ impl_runtime_apis! { }; Revive::bare_eth_transact( - tx, - blockweights.max_block, + origin, + dest, + value, + input, + gas_limit.unwrap_or(blockweights.max_block), + storage_deposit_limit.unwrap_or(u128::MAX), encoded_size, + pallet_revive::DebugInfo::UnsafeDebug, + pallet_revive::CollectEvents::UnsafeCollect, ) } @@ -2106,7 +2136,7 @@ impl_runtime_apis! { dest, value, gas_limit.unwrap_or(blockweights.max_block), - pallet_revive::DepositLimit::Balance(storage_deposit_limit.unwrap_or(u128::MAX)), + storage_deposit_limit.unwrap_or(u128::MAX), input_data, pallet_revive::DebugInfo::UnsafeDebug, pallet_revive::CollectEvents::UnsafeCollect, @@ -2128,7 +2158,7 @@ impl_runtime_apis! { RuntimeOrigin::signed(origin), value, gas_limit.unwrap_or(blockweights.max_block), - pallet_revive::DepositLimit::Balance(storage_deposit_limit.unwrap_or(u128::MAX)), + storage_deposit_limit.unwrap_or(u128::MAX), code, data, salt, diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_xcm.rs index 93409463d4e5..be3d7661ab3c 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_xcm.rs @@ -17,27 +17,25 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-12-18, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-04-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `c0a5c14955e4`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-f3xfxtob-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-westend-dev")`, DB CACHE: 1024 // Executed Command: // target/production/polkadot-parachain // benchmark // pallet -// --extrinsic=* -// --chain=asset-hub-westend-dev -// --pallet=pallet_xcm -// --header=/__w/polkadot-sdk/polkadot-sdk/cumulus/file_header.txt -// --output=./cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights -// --wasm-execution=compiled // --steps=50 // --repeat=20 +// --extrinsic=* +// --wasm-execution=compiled // --heap-pages=4096 -// --no-storage-info -// --no-min-squares -// --no-median-slopes +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_xcm +// --chain=asset-hub-westend-dev +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -66,16 +64,14 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 28_333_000 picoseconds. - Weight::from_parts(29_115_000, 0) + // Minimum execution time: 21_050_000 picoseconds. + Weight::from_parts(21_834_000, 0) .saturating_add(Weight::from_parts(0, 3610)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `PolkadotXcm::ShouldRecordXcm` (r:1 w:0) - /// Proof: `PolkadotXcm::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) @@ -94,20 +90,18 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 111_150_000 picoseconds. - Weight::from_parts(113_250_000, 0) + // Minimum execution time: 92_497_000 picoseconds. + Weight::from_parts(95_473_000, 0) .saturating_add(Weight::from_parts(0, 3610)) - .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) } /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `PolkadotXcm::ShouldRecordXcm` (r:1 w:0) - /// Proof: `PolkadotXcm::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `System::Account` (r:2 w:2) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::DeliveryFeeFactor` (r:1 w:0) - /// Proof: `XcmpQueue::DeliveryFeeFactor` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) + /// Proof: `XcmpQueue::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -117,29 +111,25 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Storage: `ParachainSystem::RelevantMessagingState` (r:1 w:0) /// Proof: `ParachainSystem::RelevantMessagingState` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: Some(1282), added: 1777, mode: `MaxEncodedLen`) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `XcmpQueue::OutboundXcmpMessages` (r:0 w:1) - /// Proof: `XcmpQueue::OutboundXcmpMessages` (`max_values`: None, `max_size`: Some(105506), added: 107981, mode: `MaxEncodedLen`) + /// Proof: `XcmpQueue::OutboundXcmpMessages` (`max_values`: None, `max_size`: None, mode: `Measured`) fn reserve_transfer_assets() -> Weight { // Proof Size summary in bytes: - // Measured: `400` + // Measured: `367` // Estimated: `6196` - // Minimum execution time: 135_730_000 picoseconds. - Weight::from_parts(140_479_000, 0) + // Minimum execution time: 120_059_000 picoseconds. + Weight::from_parts(122_894_000, 0) .saturating_add(Weight::from_parts(0, 6196)) - .saturating_add(T::DbWeight::get().reads(10)) + .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(5)) } /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `PolkadotXcm::ShouldRecordXcm` (r:1 w:0) - /// Proof: `PolkadotXcm::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Assets::Asset` (r:1 w:1) /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) /// Storage: `Assets::Account` (r:2 w:2) /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - /// Storage: `AssetsFreezer::FrozenBalances` (r:1 w:0) - /// Proof: `AssetsFreezer::FrozenBalances` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:2 w:2) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) @@ -156,24 +146,21 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn transfer_assets() -> Weight { // Proof Size summary in bytes: - // Measured: `571` + // Measured: `496` // Estimated: `6208` - // Minimum execution time: 174_654_000 picoseconds. - Weight::from_parts(182_260_000, 0) + // Minimum execution time: 141_977_000 picoseconds. + Weight::from_parts(145_981_000, 0) .saturating_add(Weight::from_parts(0, 6208)) - .saturating_add(T::DbWeight::get().reads(14)) + .saturating_add(T::DbWeight::get().reads(12)) .saturating_add(T::DbWeight::get().writes(7)) } - /// Storage: `PolkadotXcm::ShouldRecordXcm` (r:1 w:0) - /// Proof: `PolkadotXcm::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn execute() -> Weight { // Proof Size summary in bytes: - // Measured: `103` - // Estimated: `1588` - // Minimum execution time: 12_750_000 picoseconds. - Weight::from_parts(13_124_000, 0) - .saturating_add(Weight::from_parts(0, 1588)) - .saturating_add(T::DbWeight::get().reads(1)) + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 7_426_000 picoseconds. + Weight::from_parts(7_791_000, 0) + .saturating_add(Weight::from_parts(0, 0)) } /// Storage: `PolkadotXcm::SupportedVersion` (r:0 w:1) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -181,8 +168,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_083_000 picoseconds. - Weight::from_parts(7_353_000, 0) + // Minimum execution time: 6_224_000 picoseconds. + Weight::from_parts(6_793_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -192,8 +179,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_254_000 picoseconds. - Weight::from_parts(2_408_000, 0) + // Minimum execution time: 1_812_000 picoseconds. + Weight::from_parts(2_008_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -219,8 +206,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 34_983_000 picoseconds. - Weight::from_parts(35_949_000, 0) + // Minimum execution time: 26_586_000 picoseconds. + Weight::from_parts(27_181_000, 0) .saturating_add(Weight::from_parts(0, 3610)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(5)) @@ -245,8 +232,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `363` // Estimated: `3828` - // Minimum execution time: 38_226_000 picoseconds. - Weight::from_parts(39_353_000, 0) + // Minimum execution time: 28_295_000 picoseconds. + Weight::from_parts(29_280_000, 0) .saturating_add(Weight::from_parts(0, 3828)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(4)) @@ -257,45 +244,45 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_254_000 picoseconds. - Weight::from_parts(2_432_000, 0) + // Minimum execution time: 1_803_000 picoseconds. + Weight::from_parts(1_876_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `PolkadotXcm::SupportedVersion` (r:6 w:2) + /// Storage: `PolkadotXcm::SupportedVersion` (r:5 w:2) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_supported_version() -> Weight { // Proof Size summary in bytes: // Measured: `159` - // Estimated: `15999` - // Minimum execution time: 25_561_000 picoseconds. - Weight::from_parts(26_274_000, 0) - .saturating_add(Weight::from_parts(0, 15999)) - .saturating_add(T::DbWeight::get().reads(6)) + // Estimated: `13524` + // Minimum execution time: 18_946_000 picoseconds. + Weight::from_parts(19_456_000, 0) + .saturating_add(Weight::from_parts(0, 13524)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifiers` (r:6 w:2) + /// Storage: `PolkadotXcm::VersionNotifiers` (r:5 w:2) /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notifiers() -> Weight { // Proof Size summary in bytes: // Measured: `163` - // Estimated: `16003` - // Minimum execution time: 25_950_000 picoseconds. - Weight::from_parts(26_532_000, 0) - .saturating_add(Weight::from_parts(0, 16003)) - .saturating_add(T::DbWeight::get().reads(6)) + // Estimated: `13528` + // Minimum execution time: 19_080_000 picoseconds. + Weight::from_parts(19_498_000, 0) + .saturating_add(Weight::from_parts(0, 13528)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:7 w:0) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:6 w:0) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn already_notified_target() -> Weight { // Proof Size summary in bytes: // Measured: `173` - // Estimated: `18488` - // Minimum execution time: 28_508_000 picoseconds. - Weight::from_parts(29_178_000, 0) - .saturating_add(Weight::from_parts(0, 18488)) - .saturating_add(T::DbWeight::get().reads(7)) + // Estimated: `16013` + // Minimum execution time: 20_637_000 picoseconds. + Weight::from_parts(21_388_000, 0) + .saturating_add(Weight::from_parts(0, 16013)) + .saturating_add(T::DbWeight::get().reads(6)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:2 w:1) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -315,36 +302,36 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `212` // Estimated: `6152` - // Minimum execution time: 33_244_000 picoseconds. - Weight::from_parts(33_946_000, 0) + // Minimum execution time: 25_701_000 picoseconds. + Weight::from_parts(26_269_000, 0) .saturating_add(Weight::from_parts(0, 6152)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:0) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:0) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn notify_target_migration_fail() -> Weight { // Proof Size summary in bytes: - // Measured: `176` - // Estimated: `13541` - // Minimum execution time: 18_071_000 picoseconds. - Weight::from_parts(18_677_000, 0) - .saturating_add(Weight::from_parts(0, 13541)) - .saturating_add(T::DbWeight::get().reads(5)) + // Measured: `206` + // Estimated: `11096` + // Minimum execution time: 11_949_000 picoseconds. + Weight::from_parts(12_249_000, 0) + .saturating_add(Weight::from_parts(0, 11096)) + .saturating_add(T::DbWeight::get().reads(4)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:6 w:2) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notify_targets() -> Weight { // Proof Size summary in bytes: // Measured: `170` - // Estimated: `16010` - // Minimum execution time: 25_605_000 picoseconds. - Weight::from_parts(26_284_000, 0) - .saturating_add(Weight::from_parts(0, 16010)) - .saturating_add(T::DbWeight::get().reads(6)) + // Estimated: `13535` + // Minimum execution time: 19_278_000 picoseconds. + Weight::from_parts(19_538_000, 0) + .saturating_add(Weight::from_parts(0, 13535)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:6 w:2) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) @@ -361,11 +348,11 @@ impl pallet_xcm::WeightInfo for WeightInfo { fn migrate_and_notify_old_targets() -> Weight { // Proof Size summary in bytes: // Measured: `212` - // Estimated: `16052` - // Minimum execution time: 46_991_000 picoseconds. - Weight::from_parts(47_866_000, 0) - .saturating_add(Weight::from_parts(0, 16052)) - .saturating_add(T::DbWeight::get().reads(12)) + // Estimated: `13577` + // Minimum execution time: 35_098_000 picoseconds. + Weight::from_parts(35_871_000, 0) + .saturating_add(Weight::from_parts(0, 13577)) + .saturating_add(T::DbWeight::get().reads(11)) .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) @@ -376,8 +363,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `103` // Estimated: `1588` - // Minimum execution time: 5_685_000 picoseconds. - Weight::from_parts(5_816_000, 0) + // Minimum execution time: 3_862_000 picoseconds. + Weight::from_parts(4_082_000, 0) .saturating_add(Weight::from_parts(0, 1588)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -388,24 +375,22 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7740` // Estimated: `11205` - // Minimum execution time: 31_271_000 picoseconds. - Weight::from_parts(32_195_000, 0) + // Minimum execution time: 25_423_000 picoseconds. + Weight::from_parts(25_872_000, 0) .saturating_add(Weight::from_parts(0, 11205)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `PolkadotXcm::ShouldRecordXcm` (r:1 w:0) - /// Proof: `PolkadotXcm::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::AssetTraps` (r:1 w:1) /// Proof: `PolkadotXcm::AssetTraps` (`max_values`: None, `max_size`: None, mode: `Measured`) fn claim_assets() -> Weight { // Proof Size summary in bytes: // Measured: `160` // Estimated: `3625` - // Minimum execution time: 43_530_000 picoseconds. - Weight::from_parts(44_942_000, 0) + // Minimum execution time: 37_148_000 picoseconds. + Weight::from_parts(37_709_000, 0) .saturating_add(Weight::from_parts(0, 3625)) - .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_xcm_bridge_hub_router.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_xcm_bridge_hub_router.rs index 78aa839deacd..c0898012e9f3 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_xcm_bridge_hub_router.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_xcm_bridge_hub_router.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm_bridge_hub_router` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-12-07, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-08-15, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-acd6uxux-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-696hpswk-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-westend-dev")`, DB CACHE: 1024 // Executed Command: @@ -52,14 +52,14 @@ impl pallet_xcm_bridge_hub_router::WeightInfo for Weigh /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: Some(4002), added: 4497, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: Some(1282), added: 1777, mode: `MaxEncodedLen`) - /// Storage: `ToRococoXcmRouter::Bridge` (r:1 w:1) - /// Proof: `ToRococoXcmRouter::Bridge` (`max_values`: Some(1), `max_size`: Some(17), added: 512, mode: `MaxEncodedLen`) + /// Storage: `ToRococoXcmRouter::DeliveryFeeFactor` (r:1 w:1) + /// Proof: `ToRococoXcmRouter::DeliveryFeeFactor` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) fn on_initialize_when_non_congested() -> Weight { // Proof Size summary in bytes: - // Measured: `259` + // Measured: `225` // Estimated: `5487` - // Minimum execution time: 14_643_000 picoseconds. - Weight::from_parts(14_992_000, 0) + // Minimum execution time: 13_483_000 picoseconds. + Weight::from_parts(13_862_000, 0) .saturating_add(Weight::from_parts(0, 5487)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -70,23 +70,11 @@ impl pallet_xcm_bridge_hub_router::WeightInfo for Weigh /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: Some(1282), added: 1777, mode: `MaxEncodedLen`) fn on_initialize_when_congested() -> Weight { // Proof Size summary in bytes: - // Measured: `144` + // Measured: `111` // Estimated: `5487` - // Minimum execution time: 5_367_000 picoseconds. - Weight::from_parts(5_604_000, 0) + // Minimum execution time: 5_078_000 picoseconds. + Weight::from_parts(5_233_000, 0) .saturating_add(Weight::from_parts(0, 5487)) .saturating_add(T::DbWeight::get().reads(2)) } - /// Storage: `ToRococoXcmRouter::Bridge` (r:1 w:1) - /// Proof: `ToRococoXcmRouter::Bridge` (`max_values`: Some(1), `max_size`: Some(17), added: 512, mode: `MaxEncodedLen`) - fn report_bridge_status() -> Weight { - // Proof Size summary in bytes: - // Measured: `150` - // Estimated: `1502` - // Minimum execution time: 12_562_000 picoseconds. - Weight::from_parts(12_991_000, 0) - .saturating_add(Weight::from_parts(0, 1502)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/xcm/mod.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/xcm/mod.rs index a0e9705ff01d..928f1910cbd2 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/xcm/mod.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/xcm/mod.rs @@ -21,7 +21,6 @@ use alloc::vec::Vec; use frame_support::weights::Weight; use pallet_xcm_benchmarks_fungible::WeightInfo as XcmFungibleWeight; use pallet_xcm_benchmarks_generic::WeightInfo as XcmGeneric; -use sp_runtime::BoundedVec; use xcm::{ latest::{prelude::*, AssetTransferFilter}, DoubleEncoded, @@ -84,11 +83,7 @@ impl XcmWeightInfo for AssetHubWestendXcmWeight { fn transfer_reserve_asset(assets: &Assets, _dest: &Location, _xcm: &Xcm<()>) -> Weight { assets.weigh_assets(XcmFungibleWeight::::transfer_reserve_asset()) } - fn transact( - _origin_type: &OriginKind, - _fallback_max_weight: &Option, - _call: &DoubleEncoded, - ) -> Weight { + fn transact(_origin_type: &OriginKind, _call: &DoubleEncoded) -> Weight { XcmGeneric::::transact() } fn hrmp_new_channel_open_request( @@ -177,16 +172,8 @@ impl XcmWeightInfo for AssetHubWestendXcmWeight { fn clear_error() -> Weight { XcmGeneric::::clear_error() } - fn set_hints(hints: &BoundedVec) -> Weight { - let mut weight = Weight::zero(); - for hint in hints { - match hint { - AssetClaimer { .. } => { - weight = weight.saturating_add(XcmGeneric::::asset_claimer()); - }, - } - } - weight + fn set_asset_claimer(_location: &Location) -> Weight { + XcmGeneric::::set_asset_claimer() } fn claim_asset(_assets: &Assets, _ticket: &Location) -> Weight { XcmGeneric::::claim_asset() @@ -266,7 +253,4 @@ impl XcmWeightInfo for AssetHubWestendXcmWeight { fn unpaid_execution(_: &WeightLimit, _: &Option) -> Weight { XcmGeneric::::unpaid_execution() } - fn execute_with_origin(_: &Option, _: &Xcm) -> Weight { - XcmGeneric::::execute_with_origin() - } } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs index 0ec2741c0490..7098f175d421 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs @@ -87,7 +87,7 @@ impl WeightInfo { // Minimum execution time: 5_580_000 picoseconds. Weight::from_parts(5_950_000, 0) } - pub fn asset_claimer() -> Weight { + pub fn set_asset_claimer() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` @@ -373,11 +373,4 @@ impl WeightInfo { // Minimum execution time: 638_000 picoseconds. Weight::from_parts(708_000, 0) } - pub fn execute_with_origin() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 713_000 picoseconds. - Weight::from_parts(776_000, 0) - } } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs index b4e938f1f8b5..88ccd42dff7f 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs @@ -63,7 +63,6 @@ use xcm_builder::{ use xcm_executor::XcmExecutor; parameter_types! { - pub const RootLocation: Location = Location::here(); pub const WestendLocation: Location = Location::parent(); pub const RelayNetwork: Option = Some(NetworkId::ByGenesis(WESTEND_GENESIS_HASH)); pub RelayChainOrigin: RuntimeOrigin = cumulus_pallet_xcm::Origin::Relay.into(); @@ -337,7 +336,6 @@ pub type ForeignAssetFeeAsExistentialDepositMultiplierFeeCharger = /// either execution or delivery. /// We only waive fees for system functions, which these locations represent. pub type WaivedLocations = ( - Equals, RelayOrOtherSystemParachains, Equals, FellowshipEntities, diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/tests/tests.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/tests/tests.rs index 24b6d83ffae4..5d0f843554a1 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/tests/tests.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/tests/tests.rs @@ -24,10 +24,10 @@ use asset_hub_westend_runtime::{ ForeignAssetFeeAsExistentialDepositMultiplierFeeCharger, LocationToAccountId, StakingPot, TrustBackedAssetsPalletLocation, WestendLocation, XcmConfig, }, - AllPalletsWithoutSystem, Assets, Balances, Block, ExistentialDeposit, ForeignAssets, + AllPalletsWithoutSystem, Assets, Balances, ExistentialDeposit, ForeignAssets, ForeignAssetsInstance, MetadataDepositBase, MetadataDepositPerByte, ParachainSystem, PolkadotXcm, Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, SessionKeys, - ToRococoXcmRouterInstance, TrustBackedAssetsInstance, XcmpQueue, + TrustBackedAssetsInstance, XcmpQueue, }; pub use asset_hub_westend_runtime::{AssetConversion, AssetDeposit, CollatorSelection, System}; use asset_test_utils::{ @@ -1250,56 +1250,6 @@ fn receive_reserve_asset_deposited_roc_from_asset_hub_rococo_fees_paid_by_suffic ) } -#[test] -fn report_bridge_status_from_xcm_bridge_router_for_rococo_works() { - asset_test_utils::test_cases_over_bridge::report_bridge_status_from_xcm_bridge_router_works::< - Runtime, - AllPalletsWithoutSystem, - XcmConfig, - LocationToAccountId, - ToRococoXcmRouterInstance, - >( - collator_session_keys(), - bridging_to_asset_hub_rococo, - || bp_asset_hub_westend::build_congestion_message(Default::default(), true).into(), - || bp_asset_hub_westend::build_congestion_message(Default::default(), false).into(), - ) -} - -#[test] -fn test_report_bridge_status_call_compatibility() { - // if this test fails, make sure `bp_asset_hub_rococo` has valid encoding - assert_eq!( - RuntimeCall::ToRococoXcmRouter(pallet_xcm_bridge_hub_router::Call::report_bridge_status { - bridge_id: Default::default(), - is_congested: true, - }) - .encode(), - bp_asset_hub_westend::Call::ToRococoXcmRouter( - bp_asset_hub_westend::XcmBridgeHubRouterCall::report_bridge_status { - bridge_id: Default::default(), - is_congested: true, - } - ) - .encode() - ) -} - -#[test] -fn check_sane_weight_report_bridge_status() { - use pallet_xcm_bridge_hub_router::WeightInfo; - let actual = >::WeightInfo::report_bridge_status(); - let max_weight = bp_asset_hub_westend::XcmBridgeHubRouterTransactCallMaxWeight::get(); - assert!( - actual.all_lte(max_weight), - "max_weight: {:?} should be adjusted to actual {:?}", - max_weight, - actual - ); -} - #[test] fn change_xcm_bridge_hub_router_byte_fee_by_governance_works() { asset_test_utils::test_cases::change_storage_constant_by_governance_works::< @@ -1496,19 +1446,3 @@ fn location_conversion_works() { assert_eq!(got, expected, "{}", tc.description); } } - -#[test] -fn xcm_payment_api_works() { - parachains_runtimes_test_utils::test_cases::xcm_payment_api_with_native_token_works::< - Runtime, - RuntimeCall, - RuntimeOrigin, - Block, - >(); - asset_test_utils::test_cases::xcm_payment_api_with_pools_works::< - Runtime, - RuntimeCall, - RuntimeOrigin, - Block, - >(); -} diff --git a/cumulus/parachains/runtimes/assets/common/Cargo.toml b/cumulus/parachains/runtimes/assets/common/Cargo.toml index de74f59f43c0..fb66f0de2322 100644 --- a/cumulus/parachains/runtimes/assets/common/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/common/Cargo.toml @@ -5,24 +5,22 @@ authors.workspace = true edition.workspace = true description = "Assets common utilities" license = "Apache-2.0" -homepage.workspace = true -repository.workspace = true [lints] workspace = true [dependencies] codec = { features = ["derive"], workspace = true } -impl-trait-for-tuples = { workspace = true } -log = { workspace = true } scale-info = { features = ["derive"], workspace = true } +log = { workspace = true } +impl-trait-for-tuples = { workspace = true } # Substrate frame-support = { workspace = true } -pallet-asset-conversion = { workspace = true } -pallet-assets = { workspace = true } sp-api = { workspace = true } sp-runtime = { workspace = true } +pallet-assets = { workspace = true } +pallet-asset-conversion = { workspace = true } # Polkadot pallet-xcm = { workspace = true } @@ -31,8 +29,8 @@ xcm-builder = { workspace = true } xcm-executor = { workspace = true } # Cumulus -cumulus-primitives-core = { workspace = true } parachains-common = { workspace = true } +cumulus-primitives-core = { workspace = true } [build-dependencies] substrate-wasm-builder = { workspace = true, default-features = true } @@ -66,5 +64,4 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", - "xcm/runtime-benchmarks", ] diff --git a/cumulus/parachains/runtimes/assets/common/src/lib.rs b/cumulus/parachains/runtimes/assets/common/src/lib.rs index 25c2df6b68d1..1d2d45b42c5d 100644 --- a/cumulus/parachains/runtimes/assets/common/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/common/src/lib.rs @@ -28,7 +28,7 @@ extern crate alloc; use crate::matching::{LocalLocationPattern, ParentLocation}; use alloc::vec::Vec; use codec::{Decode, EncodeLike}; -use core::{cmp::PartialEq, marker::PhantomData}; +use core::cmp::PartialEq; use frame_support::traits::{Equals, EverythingBut}; use parachains_common::{AssetIdForTrustBackedAssets, CollectionId, ItemId}; use sp_runtime::traits::TryConvertInto; @@ -137,62 +137,24 @@ pub type PoolAssetsConvertedConcreteId = TryConvertInto, >; -/// Adapter implementation for accessing pools (`pallet_asset_conversion`) that uses `AssetKind` as -/// a `xcm::v*` which could be different from the `xcm::latest`. -pub struct PoolAdapter(PhantomData); -impl< - Runtime: pallet_asset_conversion::Config, - L: TryFrom + TryInto + Clone + Decode + EncodeLike + PartialEq, - > PoolAdapter -{ - /// Returns a vector of all assets in a pool with `asset`. - /// - /// Should only be used in runtime APIs since it iterates over the whole - /// `pallet_asset_conversion::Pools` map. - /// - /// It takes in any version of an XCM Location but always returns the latest one. - /// This is to allow some margin of migrating the pools when updating the XCM version. - /// - /// An error of type `()` is returned if the version conversion fails for XCM locations. - /// This error should be mapped by the caller to a more descriptive one. - pub fn get_assets_in_pool_with(asset: Location) -> Result, ()> { - // convert latest to the `L` version. - let asset: L = asset.try_into().map_err(|_| ())?; - Self::iter_assets_in_pool_with(&asset) - .map(|location| { - // convert `L` to the latest `AssetId` - location.try_into().map_err(|_| ()).map(AssetId) - }) - .collect::, _>>() - } - - /// Provides a current prices. Wrapper over - /// `pallet_asset_conversion::Pallet::::quote_price_tokens_for_exact_tokens`. - /// - /// An error of type `()` is returned if the version conversion fails for XCM locations. - /// This error should be mapped by the caller to a more descriptive one. - pub fn quote_price_tokens_for_exact_tokens( - asset_1: Location, - asset_2: Location, - amount: Runtime::Balance, - include_fees: bool, - ) -> Result, ()> { - // Convert latest to the `L` version. - let asset_1: L = asset_1.try_into().map_err(|_| ())?; - let asset_2: L = asset_2.try_into().map_err(|_| ())?; - - // Quote swap price. - Ok(pallet_asset_conversion::Pallet::::quote_price_tokens_for_exact_tokens( - asset_1, - asset_2, - amount, - include_fees, - )) - } - - /// Helper function for filtering pool. - pub fn iter_assets_in_pool_with(asset: &L) -> impl Iterator + '_ { - pallet_asset_conversion::Pools::::iter_keys().filter_map(|(asset_1, asset_2)| { +/// Returns an iterator of all assets in a pool with `asset`. +/// +/// Should only be used in runtime APIs since it iterates over the whole +/// `pallet_asset_conversion::Pools` map. +/// +/// It takes in any version of an XCM Location but always returns the latest one. +/// This is to allow some margin of migrating the pools when updating the XCM version. +/// +/// An error of type `()` is returned if the version conversion fails for XCM locations. +/// This error should be mapped by the caller to a more descriptive one. +pub fn get_assets_in_pool_with< + Runtime: pallet_asset_conversion::Config, + L: TryInto + Clone + Decode + EncodeLike + PartialEq, +>( + asset: &L, +) -> Result, ()> { + pallet_asset_conversion::Pools::::iter_keys() + .filter_map(|(asset_1, asset_2)| { if asset_1 == *asset { Some(asset_2) } else if asset_2 == *asset { @@ -201,7 +163,8 @@ impl< None } }) - } + .map(|location| location.try_into().map_err(|_| ()).map(AssetId)) + .collect::, _>>() } #[cfg(test)] diff --git a/cumulus/parachains/runtimes/assets/test-utils/Cargo.toml b/cumulus/parachains/runtimes/assets/test-utils/Cargo.toml index cad8d10a7da3..529d6460fc4e 100644 --- a/cumulus/parachains/runtimes/assets/test-utils/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/test-utils/Cargo.toml @@ -5,8 +5,6 @@ authors.workspace = true edition.workspace = true description = "Test utils for Asset Hub runtimes." license = "Apache-2.0" -homepage.workspace = true -repository.workspace = true [lints] workspace = true @@ -17,29 +15,27 @@ codec = { features = ["derive", "max-encoded-len"], workspace = true } # Substrate frame-support = { workspace = true } frame-system = { workspace = true } -pallet-asset-conversion = { workspace = true } pallet-assets = { workspace = true } pallet-balances = { workspace = true } -pallet-session = { workspace = true } pallet-timestamp = { workspace = true } +pallet-session = { workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } # Cumulus cumulus-pallet-parachain-system = { workspace = true } cumulus-pallet-xcmp-queue = { workspace = true } -cumulus-primitives-core = { workspace = true } pallet-collator-selection = { workspace = true } -parachain-info = { workspace = true } parachains-common = { workspace = true } +cumulus-primitives-core = { workspace = true } +parachain-info = { workspace = true } parachains-runtimes-test-utils = { workspace = true } # Polkadot -pallet-xcm = { workspace = true } xcm = { workspace = true } xcm-builder = { workspace = true } xcm-executor = { workspace = true } -xcm-runtime-apis = { workspace = true } +pallet-xcm = { workspace = true } # Bridges pallet-xcm-bridge-hub-router = { workspace = true } @@ -59,7 +55,6 @@ std = [ "cumulus-primitives-core/std", "frame-support/std", "frame-system/std", - "pallet-asset-conversion/std", "pallet-assets/std", "pallet-balances/std", "pallet-collator-selection/std", @@ -74,6 +69,5 @@ std = [ "sp-runtime/std", "xcm-builder/std", "xcm-executor/std", - "xcm-runtime-apis/std", "xcm/std", ] diff --git a/cumulus/parachains/runtimes/assets/test-utils/src/test_cases.rs b/cumulus/parachains/runtimes/assets/test-utils/src/test_cases.rs index b1577e0ca7f6..8dc720e27753 100644 --- a/cumulus/parachains/runtimes/assets/test-utils/src/test_cases.rs +++ b/cumulus/parachains/runtimes/assets/test-utils/src/test_cases.rs @@ -34,14 +34,11 @@ use parachains_runtimes_test_utils::{ CollatorSessionKeys, ExtBuilder, SlotDurations, ValidatorIdOf, XcmReceivedFrom, }; use sp_runtime::{ - traits::{Block as BlockT, MaybeEquivalence, StaticLookup, Zero}, + traits::{MaybeEquivalence, StaticLookup, Zero}, DispatchError, Saturating, }; use xcm::{latest::prelude::*, VersionedAssets}; use xcm_executor::{traits::ConvertLocation, XcmExecutor}; -use xcm_runtime_apis::fees::{ - runtime_decl_for_xcm_payment_api::XcmPaymentApiV1, Error as XcmPaymentApiError, -}; type RuntimeHelper = parachains_runtimes_test_utils::RuntimeHelper; @@ -1205,20 +1202,14 @@ pub fn create_and_manage_foreign_assets_for_local_consensus_parachain_assets_wor let xcm = Xcm(vec![ WithdrawAsset(buy_execution_fee.clone().into()), BuyExecution { fees: buy_execution_fee.clone(), weight_limit: Unlimited }, - Transact { - origin_kind: OriginKind::Xcm, - call: foreign_asset_create.into(), - fallback_max_weight: None, - }, + Transact { origin_kind: OriginKind::Xcm, call: foreign_asset_create.into() }, Transact { origin_kind: OriginKind::SovereignAccount, call: foreign_asset_set_metadata.into(), - fallback_max_weight: None, }, Transact { origin_kind: OriginKind::SovereignAccount, call: foreign_asset_set_team.into(), - fallback_max_weight: None, }, ExpectTransactStatus(MaybeErrorCode::Success), ]); @@ -1324,11 +1315,7 @@ pub fn create_and_manage_foreign_assets_for_local_consensus_parachain_assets_wor let xcm = Xcm(vec![ WithdrawAsset(buy_execution_fee.clone().into()), BuyExecution { fees: buy_execution_fee.clone(), weight_limit: Unlimited }, - Transact { - origin_kind: OriginKind::Xcm, - call: foreign_asset_create.into(), - fallback_max_weight: None, - }, + Transact { origin_kind: OriginKind::Xcm, call: foreign_asset_create.into() }, ExpectTransactStatus(MaybeErrorCode::from(DispatchError::BadOrigin.encode())), ]); @@ -1597,108 +1584,3 @@ pub fn reserve_transfer_native_asset_to_non_teleport_para_works< ); }) } - -pub fn xcm_payment_api_with_pools_works() -where - Runtime: XcmPaymentApiV1 - + frame_system::Config - + pallet_balances::Config - + pallet_session::Config - + pallet_xcm::Config - + parachain_info::Config - + pallet_collator_selection::Config - + cumulus_pallet_parachain_system::Config - + cumulus_pallet_xcmp_queue::Config - + pallet_timestamp::Config - + pallet_assets::Config< - pallet_assets::Instance1, - AssetId = u32, - Balance = ::Balance, - > + pallet_asset_conversion::Config< - AssetKind = xcm::v5::Location, - Balance = ::Balance, - >, - ValidatorIdOf: From>, - RuntimeOrigin: OriginTrait::AccountId>, - <::Lookup as StaticLookup>::Source: - From<::AccountId>, - Block: BlockT, -{ - use xcm::prelude::*; - - ExtBuilder::::default().build().execute_with(|| { - let test_account = AccountId::from([0u8; 32]); - let transfer_amount = 100u128; - let xcm_to_weigh = Xcm::::builder_unsafe() - .withdraw_asset((Here, transfer_amount)) - .buy_execution((Here, transfer_amount), Unlimited) - .deposit_asset(AllCounted(1), [1u8; 32]) - .build(); - let versioned_xcm_to_weigh = VersionedXcm::from(xcm_to_weigh.clone().into()); - - let xcm_weight = Runtime::query_xcm_weight(versioned_xcm_to_weigh); - assert!(xcm_weight.is_ok()); - let native_token: Location = Parent.into(); - let native_token_versioned = VersionedAssetId::from(AssetId(native_token.clone())); - let execution_fees = - Runtime::query_weight_to_asset_fee(xcm_weight.unwrap(), native_token_versioned); - assert!(execution_fees.is_ok()); - - // We need some balance to create an asset. - assert_ok!( - pallet_balances::Pallet::::mint_into(&test_account, 3_000_000_000_000,) - ); - - // Now we try to use an asset that's not in a pool. - let asset_id = 1984u32; // USDT. - let asset_not_in_pool: Location = - (PalletInstance(50), GeneralIndex(asset_id.into())).into(); - assert_ok!(pallet_assets::Pallet::::create( - RuntimeOrigin::signed(test_account.clone()), - asset_id.into(), - test_account.clone().into(), - 1000 - )); - let execution_fees = Runtime::query_weight_to_asset_fee( - xcm_weight.unwrap(), - asset_not_in_pool.clone().into(), - ); - assert_eq!(execution_fees, Err(XcmPaymentApiError::AssetNotFound)); - - // We add it to a pool with native. - assert_ok!(pallet_asset_conversion::Pallet::::create_pool( - RuntimeOrigin::signed(test_account.clone()), - native_token.clone().try_into().unwrap(), - asset_not_in_pool.clone().try_into().unwrap() - )); - let execution_fees = Runtime::query_weight_to_asset_fee( - xcm_weight.unwrap(), - asset_not_in_pool.clone().into(), - ); - // Still not enough because it doesn't have any liquidity. - assert_eq!(execution_fees, Err(XcmPaymentApiError::AssetNotFound)); - - // We mint some of the asset... - assert_ok!(pallet_assets::Pallet::::mint( - RuntimeOrigin::signed(test_account.clone()), - asset_id.into(), - test_account.clone().into(), - 3_000_000_000_000, - )); - // ...so we can add liquidity to the pool. - assert_ok!(pallet_asset_conversion::Pallet::::add_liquidity( - RuntimeOrigin::signed(test_account.clone()), - native_token.try_into().unwrap(), - asset_not_in_pool.clone().try_into().unwrap(), - 1_000_000_000_000, - 2_000_000_000_000, - 0, - 0, - test_account - )); - let execution_fees = - Runtime::query_weight_to_asset_fee(xcm_weight.unwrap(), asset_not_in_pool.into()); - // Now it works! - assert_ok!(execution_fees); - }); -} diff --git a/cumulus/parachains/runtimes/assets/test-utils/src/test_cases_over_bridge.rs b/cumulus/parachains/runtimes/assets/test-utils/src/test_cases_over_bridge.rs index 9b05f2d46dfb..4f144e24aa30 100644 --- a/cumulus/parachains/runtimes/assets/test-utils/src/test_cases_over_bridge.rs +++ b/cumulus/parachains/runtimes/assets/test-utils/src/test_cases_over_bridge.rs @@ -551,7 +551,10 @@ pub fn report_bridge_status_from_xcm_bridge_router_works< Weight::zero(), ); assert_ok!(outcome.ensure_complete()); - assert_eq!(is_congested, pallet_xcm_bridge_hub_router::Pallet::::bridge().is_congested); + assert_eq!( + is_congested, + <>::LocalXcmChannelManager as pallet_xcm_bridge_hub_router::XcmChannelStatusProvider>::is_congested(&local_bridge_hub_location) + ); }; report_bridge_status(true); diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml index 3fabea3b02f4..4af8a9f43850 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml @@ -5,8 +5,6 @@ authors.workspace = true edition.workspace = true description = "Rococo's BridgeHub parachain runtime" license = "Apache-2.0" -homepage.workspace = true -repository.workspace = true [lints] workspace = true @@ -34,9 +32,9 @@ frame-try-runtime = { optional = true, workspace = true } pallet-aura = { workspace = true } pallet-authorship = { workspace = true } pallet-balances = { workspace = true } +pallet-session = { workspace = true } pallet-message-queue = { workspace = true } pallet-multisig = { workspace = true } -pallet-session = { workspace = true } pallet-timestamp = { workspace = true } pallet-transaction-payment = { workspace = true } pallet-transaction-payment-rpc-runtime-api = { workspace = true } @@ -45,10 +43,10 @@ sp-api = { workspace = true } sp-block-builder = { workspace = true } sp-consensus-aura = { workspace = true } sp-core = { workspace = true } +sp-keyring = { workspace = true } sp-genesis-builder = { workspace = true } sp-inherents = { workspace = true } sp-io = { workspace = true } -sp-keyring = { workspace = true } sp-offchain = { workspace = true } sp-runtime = { workspace = true } sp-session = { workspace = true } @@ -58,11 +56,11 @@ sp-transaction-pool = { workspace = true } sp-version = { workspace = true } # Polkadot +rococo-runtime-constants = { workspace = true } pallet-xcm = { workspace = true } pallet-xcm-benchmarks = { optional = true, workspace = true } polkadot-parachain-primitives = { workspace = true } polkadot-runtime-common = { workspace = true } -rococo-runtime-constants = { workspace = true } xcm = { workspace = true } xcm-builder = { workspace = true } xcm-executor = { workspace = true } @@ -95,28 +93,27 @@ bp-parachains = { workspace = true } bp-polkadot-bulletin = { workspace = true } bp-polkadot-core = { workspace = true } bp-relayers = { workspace = true } -bp-rococo = { workspace = true } bp-runtime = { workspace = true } +bp-rococo = { workspace = true } bp-westend = { workspace = true } -bp-xcm-bridge-hub-router = { workspace = true } -bridge-runtime-common = { workspace = true } pallet-bridge-grandpa = { workspace = true } pallet-bridge-messages = { workspace = true } pallet-bridge-parachains = { workspace = true } pallet-bridge-relayers = { workspace = true } pallet-xcm-bridge-hub = { workspace = true } +bridge-runtime-common = { workspace = true } # Ethereum Bridge (Snowbridge) snowbridge-beacon-primitives = { workspace = true } +snowbridge-pallet-system = { workspace = true } +snowbridge-system-runtime-api = { workspace = true } snowbridge-core = { workspace = true } -snowbridge-outbound-queue-runtime-api = { workspace = true } snowbridge-pallet-ethereum-client = { workspace = true } snowbridge-pallet-inbound-queue = { workspace = true } snowbridge-pallet-outbound-queue = { workspace = true } -snowbridge-pallet-system = { workspace = true } +snowbridge-outbound-queue-runtime-api = { workspace = true } snowbridge-router-primitives = { workspace = true } snowbridge-runtime-common = { workspace = true } -snowbridge-system-runtime-api = { workspace = true } bridge-hub-common = { workspace = true } @@ -124,7 +121,6 @@ bridge-hub-common = { workspace = true } bridge-hub-test-utils = { workspace = true, default-features = true } bridge-runtime-common = { features = ["integrity-test"], workspace = true, default-features = true } pallet-bridge-relayers = { features = ["integrity-test"], workspace = true } -parachains-runtimes-test-utils = { workspace = true, default-features = true } snowbridge-runtime-test-common = { workspace = true, default-features = true } [features] @@ -144,7 +140,6 @@ std = [ "bp-rococo/std", "bp-runtime/std", "bp-westend/std", - "bp-xcm-bridge-hub-router/std", "bridge-hub-common/std", "bridge-runtime-common/std", "codec/std", @@ -266,7 +261,6 @@ runtime-benchmarks = [ "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", "xcm-runtime-apis/runtime-benchmarks", - "xcm/runtime-benchmarks", ] try-runtime = [ diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_bulletin_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_bulletin_config.rs index 1e733503f43b..7e0385692375 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_bulletin_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_bulletin_config.rs @@ -22,13 +22,14 @@ use crate::{ bridge_common_config::RelayersForPermissionlessLanesInstance, weights, xcm_config::UniversalLocation, AccountId, Balance, Balances, BridgeRococoBulletinGrandpa, - BridgeRococoBulletinMessages, Runtime, RuntimeEvent, RuntimeHoldReason, XcmOverRococoBulletin, - XcmRouter, + BridgeRococoBulletinMessages, PolkadotXcm, Runtime, RuntimeEvent, RuntimeHoldReason, + XcmOverRococoBulletin, XcmRouter, }; use bp_messages::{ source_chain::FromBridgedChainMessagesDeliveryProof, - target_chain::FromBridgedChainMessagesProof, LegacyLaneId, + target_chain::FromBridgedChainMessagesProof, HashedLaneId, }; +use bridge_hub_common::xcm_version::XcmVersionOfDestAndRemoteBridge; use frame_support::{ parameter_types, @@ -45,7 +46,6 @@ use testnet_parachains_constants::rococo::currency::UNITS as ROC; use xcm::{ latest::prelude::*, prelude::{InteriorLocation, NetworkId}, - AlwaysV5, }; use xcm_builder::{BridgeBlobDispatcher, ParentIsPreset, SiblingParachainConvertsVia}; @@ -120,7 +120,7 @@ impl pallet_bridge_messages::Config for Runt type OutboundPayload = XcmAsPlainPayload; type InboundPayload = XcmAsPlainPayload; - type LaneId = LegacyLaneId; + type LaneId = HashedLaneId; type DeliveryPayments = (); type DeliveryConfirmationPayments = (); @@ -139,7 +139,8 @@ impl pallet_xcm_bridge_hub::Config for Runtime type BridgeMessagesPalletInstance = WithRococoBulletinMessagesInstance; type MessageExportPrice = (); - type DestinationVersion = AlwaysV5; + type DestinationVersion = + XcmVersionOfDestAndRemoteBridge; type ForceOrigin = EnsureRoot; // We don't want to allow creating bridges for this instance. @@ -200,6 +201,7 @@ mod tests { fn ensure_bridge_integrity() { assert_complete_bridge_types!( runtime: Runtime, + with_bridged_chain_grandpa_instance: BridgeGrandpaRococoBulletinInstance, with_bridged_chain_messages_instance: WithRococoBulletinMessagesInstance, this_chain: bp_bridge_hub_rococo::BridgeHubRococo, bridged_chain: bp_polkadot_bulletin::PolkadotBulletin, @@ -252,7 +254,7 @@ where let universal_source = [GlobalConsensus(ByGenesis(ROCOCO_GENESIS_HASH)), Parachain(sibling_para_id)].into(); let universal_destination = - [GlobalConsensus(RococoBulletinGlobalConsensusNetwork::get())].into(); + [GlobalConsensus(RococoBulletinGlobalConsensusNetwork::get()), Parachain(2075)].into(); let bridge_id = BridgeId::new(&universal_source, &universal_destination); // insert only bridge metadata, because the benchmarks create lanes @@ -278,3 +280,29 @@ where universal_source } + +/// Contains the migration for the PeopleRococo<>RococoBulletin bridge. +pub mod migration { + use super::*; + use frame_support::traits::ConstBool; + + parameter_types! { + pub BulletinRococoLocation: InteriorLocation = [GlobalConsensus(RococoBulletinGlobalConsensusNetwork::get())].into(); + pub RococoPeopleToRococoBulletinMessagesLane: HashedLaneId = pallet_xcm_bridge_hub::Pallet::< Runtime, XcmOverPolkadotBulletinInstance >::bridge_locations( + PeopleRococoLocation::get(), + BulletinRococoLocation::get() + ) + .unwrap() + .calculate_lane_id(xcm::latest::VERSION).expect("Valid locations"); + } + + /// Ensure that the existing lanes for the People<>Bulletin bridge are correctly configured. + pub type StaticToDynamicLanes = pallet_xcm_bridge_hub::migration::OpenBridgeForLane< + Runtime, + XcmOverPolkadotBulletinInstance, + RococoPeopleToRococoBulletinMessagesLane, + ConstBool, + PeopleRococoLocation, + BulletinRococoLocation, + >; +} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_westend_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_westend_config.rs index a14101eb454b..0eab3c74a7e2 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_westend_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_westend_config.rs @@ -24,14 +24,14 @@ use crate::{ weights, xcm_config::UniversalLocation, AccountId, Balance, Balances, BridgeWestendMessages, PolkadotXcm, Runtime, RuntimeEvent, - RuntimeHoldReason, XcmOverBridgeHubWestend, XcmRouter, XcmpQueue, + RuntimeHoldReason, XcmOverBridgeHubWestend, XcmRouter, }; use bp_messages::{ source_chain::FromBridgedChainMessagesDeliveryProof, target_chain::FromBridgedChainMessagesProof, LegacyLaneId, }; use bridge_hub_common::xcm_version::XcmVersionOfDestAndRemoteBridge; -use pallet_xcm_bridge_hub::{BridgeId, XcmAsPlainPayload}; +use pallet_xcm_bridge_hub::XcmAsPlainPayload; use frame_support::{parameter_types, traits::PalletInfoAccess}; use frame_system::{EnsureNever, EnsureRoot}; @@ -121,7 +121,6 @@ impl pallet_bridge_messages::Config for Ru type DeliveryConfirmationPayments = pallet_bridge_relayers::DeliveryConfirmationPaymentsAdapter< Runtime, WithBridgeHubWestendMessagesInstance, - RelayersForLegacyLaneIdsMessagesInstance, DeliveryRewardInBalance, >; @@ -157,46 +156,11 @@ impl pallet_xcm_bridge_hub::Config for Runtime type AllowWithoutBridgeDeposit = RelayOrOtherSystemParachains; - type LocalXcmChannelManager = CongestionManager; + // TODO:(bridges-v2) - add `LocalXcmChannelManager` impl - https://github.com/paritytech/parity-bridges-common/issues/3047 + type LocalXcmChannelManager = (); type BlobDispatcher = FromWestendMessageBlobDispatcher; } -/// Implementation of `bp_xcm_bridge_hub::LocalXcmChannelManager` for congestion management. -pub struct CongestionManager; -impl pallet_xcm_bridge_hub::LocalXcmChannelManager for CongestionManager { - type Error = SendError; - - fn is_congested(with: &Location) -> bool { - // This is used to check the inbound bridge queue/messages to determine if they can be - // dispatched and sent to the sibling parachain. Therefore, checking outbound `XcmpQueue` - // is sufficient here. - use bp_xcm_bridge_hub_router::XcmChannelStatusProvider; - cumulus_pallet_xcmp_queue::bridging::OutXcmpChannelStatusProvider::::is_congested( - with, - ) - } - - fn suspend_bridge(local_origin: &Location, bridge: BridgeId) -> Result<(), Self::Error> { - // This bridge is intended for AH<>AH communication with a hard-coded/static lane, - // so `local_origin` is expected to represent only the local AH. - send_xcm::( - local_origin.clone(), - bp_asset_hub_rococo::build_congestion_message(bridge.inner(), true).into(), - ) - .map(|_| ()) - } - - fn resume_bridge(local_origin: &Location, bridge: BridgeId) -> Result<(), Self::Error> { - // This bridge is intended for AH<>AH communication with a hard-coded/static lane, - // so `local_origin` is expected to represent only the local AH. - send_xcm::( - local_origin.clone(), - bp_asset_hub_rococo::build_congestion_message(bridge.inner(), false).into(), - ) - .map(|_| ()) - } -} - #[cfg(feature = "runtime-benchmarks")] pub(crate) fn open_bridge_for_benchmarks( with: pallet_xcm_bridge_hub::LaneIdOf, @@ -292,6 +256,7 @@ mod tests { fn ensure_bridge_integrity() { assert_complete_bridge_types!( runtime: Runtime, + with_bridged_chain_grandpa_instance: BridgeGrandpaWestendInstance, with_bridged_chain_messages_instance: WithBridgeHubWestendMessagesInstance, this_chain: bp_bridge_hub_rococo::BridgeHubRococo, bridged_chain: bp_bridge_hub_westend::BridgeHubWestend, @@ -301,6 +266,7 @@ mod tests { Runtime, BridgeGrandpaWestendInstance, WithBridgeHubWestendMessagesInstance, + bp_westend::Westend, >(AssertCompleteBridgeConstants { this_chain_constants: AssertChainConstants { block_length: bp_bridge_hub_rococo::BlockLength::get(), diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/genesis_config_presets.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/genesis_config_presets.rs index 55fd499c2f54..20ca88bbc542 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/genesis_config_presets.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/genesis_config_presets.rs @@ -18,7 +18,6 @@ use crate::*; use alloc::{vec, vec::Vec}; use cumulus_primitives_core::ParaId; -use frame_support::build_struct_json_patch; use parachains_common::{AccountId, AuraId}; use sp_genesis_builder::PresetId; use sp_keyring::Sr25519Keyring; @@ -35,7 +34,7 @@ fn bridge_hub_rococo_genesis( asset_hub_para_id: ParaId, opened_bridges: Vec<(Location, InteriorLocation, Option)>, ) -> serde_json::Value { - build_struct_json_patch!(RuntimeGenesisConfig { + let config = RuntimeGenesisConfig { balances: BalancesConfig { balances: endowed_accounts .iter() @@ -43,10 +42,11 @@ fn bridge_hub_rococo_genesis( .map(|k| (k, 1u128 << 60)) .collect::>(), }, - parachain_info: ParachainInfoConfig { parachain_id: id }, + parachain_info: ParachainInfoConfig { parachain_id: id, ..Default::default() }, collator_selection: CollatorSelectionConfig { invulnerables: invulnerables.iter().cloned().map(|(acc, _)| acc).collect(), candidacy_bond: BRIDGE_HUB_ROCOCO_ED * 16, + ..Default::default() }, session: SessionConfig { keys: invulnerables @@ -59,25 +59,33 @@ fn bridge_hub_rococo_genesis( ) }) .collect(), + ..Default::default() }, - polkadot_xcm: PolkadotXcmConfig { safe_xcm_version: Some(SAFE_XCM_VERSION) }, - bridge_polkadot_bulletin_grandpa: BridgePolkadotBulletinGrandpaConfig { + polkadot_xcm: PolkadotXcmConfig { + safe_xcm_version: Some(SAFE_XCM_VERSION), + ..Default::default() + }, + bridge_westend_grandpa: BridgeWestendGrandpaConfig { owner: bridges_pallet_owner.clone(), + ..Default::default() }, - bridge_westend_grandpa: BridgeWestendGrandpaConfig { owner: bridges_pallet_owner.clone() }, bridge_westend_messages: BridgeWestendMessagesConfig { owner: bridges_pallet_owner.clone(), + ..Default::default() }, - xcm_over_polkadot_bulletin: XcmOverPolkadotBulletinConfig { - opened_bridges: vec![( - Location::new(1, [Parachain(1004)]), - Junctions::from([GlobalConsensus(NetworkId::PolkadotBulletin).into()]), - Some(bp_messages::LegacyLaneId([0, 0, 0, 0])), - )], + xcm_over_bridge_hub_westend: XcmOverBridgeHubWestendConfig { + opened_bridges, + ..Default::default() + }, + ethereum_system: EthereumSystemConfig { + para_id: id, + asset_hub_para_id, + ..Default::default() }, - xcm_over_bridge_hub_westend: XcmOverBridgeHubWestendConfig { opened_bridges }, - ethereum_system: EthereumSystemConfig { para_id: id, asset_hub_para_id }, - }) + ..Default::default() + }; + + serde_json::to_value(config).expect("Could not build genesis config.") } /// Provides the JSON representation of predefined genesis config for given `id`. diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs index 88146cecb9ef..ff7af475f5e2 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs @@ -169,6 +169,7 @@ pub type Migrations = ( bridge_to_westend_config::WithBridgeHubWestendMessagesInstance, >, bridge_to_westend_config::migration::StaticToDynamicLanes, + bridge_to_bulletin_config::migration::StaticToDynamicLanes, frame_support::migrations::RemoveStorage< BridgeWestendMessagesPalletName, OutboundLanesCongestedSignalsKey, @@ -240,7 +241,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: alloc::borrow::Cow::Borrowed("bridge-hub-rococo"), impl_name: alloc::borrow::Cow::Borrowed("bridge-hub-rococo"), authoring_version: 1, - spec_version: 1_017_001, + spec_version: 1_016_001, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 6, @@ -536,7 +537,6 @@ impl pallet_multisig::Config for Runtime { type DepositFactor = DepositFactor; type MaxSignatories = ConstU32<100>; type WeightInfo = weights::pallet_multisig::WeightInfo; - type BlockNumberProvider = frame_system::Pallet; } impl pallet_utility::Config for Runtime { @@ -846,8 +846,7 @@ impl_runtime_apis! { } fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result { - let latest_asset_id: Result = asset.clone().try_into(); - match latest_asset_id { + match asset.try_as::() { Ok(asset_id) if asset_id.0 == xcm_config::TokenLocation::get() => { // for native token Ok(WeightToFee::weight_to_fee(&weight)) @@ -1498,8 +1497,18 @@ impl_runtime_apis! { } } - use frame_support::traits::WhitelistedStorageKeys; - let whitelist: Vec = AllPalletsWithSystem::whitelisted_storage_keys(); + let whitelist: Vec = vec![ + // Block Number + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(), + // Total Issuance + hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec().into(), + // Execution Phase + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec().into(), + // Event Count + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec().into(), + // System Events + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7").to_vec().into(), + ]; let mut batches = Vec::::new(); let params = (&config, &whitelist); diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_xcm.rs index 0a085b858251..a732e1a57343 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_xcm.rs @@ -17,27 +17,25 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-12-18, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `902e7ad7764b`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 // Executed Command: // target/production/polkadot-parachain // benchmark // pallet -// --extrinsic=* -// --chain=bridge-hub-rococo-dev -// --pallet=pallet_xcm -// --header=/__w/polkadot-sdk/polkadot-sdk/cumulus/file_header.txt -// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights -// --wasm-execution=compiled // --steps=50 // --repeat=20 +// --extrinsic=* +// --wasm-execution=compiled // --heap-pages=4096 -// --no-storage-info -// --no-min-squares -// --no-median-slopes +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_xcm +// --chain=bridge-hub-rococo-dev +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -66,16 +64,14 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 25_273_000 picoseconds. - Weight::from_parts(25_810_000, 0) + // Minimum execution time: 18_513_000 picoseconds. + Weight::from_parts(19_156_000, 0) .saturating_add(Weight::from_parts(0, 3503)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `PolkadotXcm::ShouldRecordXcm` (r:1 w:0) - /// Proof: `PolkadotXcm::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) @@ -94,10 +90,10 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `70` // Estimated: `3593` - // Minimum execution time: 112_156_000 picoseconds. - Weight::from_parts(115_999_000, 0) + // Minimum execution time: 88_096_000 picoseconds. + Weight::from_parts(89_732_000, 0) .saturating_add(Weight::from_parts(0, 3593)) - .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) } /// Storage: `Benchmark::Override` (r:0 w:0) @@ -112,8 +108,6 @@ impl pallet_xcm::WeightInfo for WeightInfo { } /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `PolkadotXcm::ShouldRecordXcm` (r:1 w:0) - /// Proof: `PolkadotXcm::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) @@ -132,22 +126,21 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `70` // Estimated: `3593` - // Minimum execution time: 110_987_000 picoseconds. - Weight::from_parts(114_735_000, 0) + // Minimum execution time: 88_239_000 picoseconds. + Weight::from_parts(89_729_000, 0) .saturating_add(Weight::from_parts(0, 3593)) - .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: `PolkadotXcm::ShouldRecordXcm` (r:1 w:0) - /// Proof: `PolkadotXcm::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Benchmark::Override` (r:0 w:0) + /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) fn execute() -> Weight { // Proof Size summary in bytes: - // Measured: `32` - // Estimated: `1517` - // Minimum execution time: 12_068_000 picoseconds. - Weight::from_parts(12_565_000, 0) - .saturating_add(Weight::from_parts(0, 1517)) - .saturating_add(T::DbWeight::get().reads(1)) + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. + Weight::from_parts(18_446_744_073_709_551_000, 0) + .saturating_add(Weight::from_parts(0, 0)) } /// Storage: `PolkadotXcm::SupportedVersion` (r:0 w:1) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -155,8 +148,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_155_000 picoseconds. - Weight::from_parts(7_606_000, 0) + // Minimum execution time: 5_955_000 picoseconds. + Weight::from_parts(6_266_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -166,8 +159,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_325_000 picoseconds. - Weight::from_parts(2_442_000, 0) + // Minimum execution time: 1_868_000 picoseconds. + Weight::from_parts(1_961_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -193,8 +186,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 31_747_000 picoseconds. - Weight::from_parts(33_122_000, 0) + // Minimum execution time: 24_388_000 picoseconds. + Weight::from_parts(25_072_000, 0) .saturating_add(Weight::from_parts(0, 3503)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(5)) @@ -219,8 +212,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `255` // Estimated: `3720` - // Minimum execution time: 36_396_000 picoseconds. - Weight::from_parts(37_638_000, 0) + // Minimum execution time: 26_762_000 picoseconds. + Weight::from_parts(27_631_000, 0) .saturating_add(Weight::from_parts(0, 3720)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(4)) @@ -231,45 +224,45 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_470_000 picoseconds. - Weight::from_parts(2_594_000, 0) + // Minimum execution time: 1_856_000 picoseconds. + Weight::from_parts(2_033_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `PolkadotXcm::SupportedVersion` (r:6 w:2) + /// Storage: `PolkadotXcm::SupportedVersion` (r:5 w:2) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_supported_version() -> Weight { // Proof Size summary in bytes: // Measured: `89` - // Estimated: `15929` - // Minimum execution time: 22_530_000 picoseconds. - Weight::from_parts(22_987_000, 0) - .saturating_add(Weight::from_parts(0, 15929)) - .saturating_add(T::DbWeight::get().reads(6)) + // Estimated: `13454` + // Minimum execution time: 17_718_000 picoseconds. + Weight::from_parts(18_208_000, 0) + .saturating_add(Weight::from_parts(0, 13454)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifiers` (r:6 w:2) + /// Storage: `PolkadotXcm::VersionNotifiers` (r:5 w:2) /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notifiers() -> Weight { // Proof Size summary in bytes: // Measured: `93` - // Estimated: `15933` - // Minimum execution time: 23_016_000 picoseconds. - Weight::from_parts(23_461_000, 0) - .saturating_add(Weight::from_parts(0, 15933)) - .saturating_add(T::DbWeight::get().reads(6)) + // Estimated: `13458` + // Minimum execution time: 17_597_000 picoseconds. + Weight::from_parts(18_090_000, 0) + .saturating_add(Weight::from_parts(0, 13458)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:7 w:0) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:6 w:0) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn already_notified_target() -> Weight { // Proof Size summary in bytes: // Measured: `106` - // Estimated: `18421` - // Minimum execution time: 26_216_000 picoseconds. - Weight::from_parts(26_832_000, 0) - .saturating_add(Weight::from_parts(0, 18421)) - .saturating_add(T::DbWeight::get().reads(7)) + // Estimated: `15946` + // Minimum execution time: 19_533_000 picoseconds. + Weight::from_parts(20_164_000, 0) + .saturating_add(Weight::from_parts(0, 15946)) + .saturating_add(T::DbWeight::get().reads(6)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:2 w:1) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -289,36 +282,36 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `6046` - // Minimum execution time: 31_060_000 picoseconds. - Weight::from_parts(32_513_000, 0) + // Minimum execution time: 24_958_000 picoseconds. + Weight::from_parts(25_628_000, 0) .saturating_add(Weight::from_parts(0, 6046)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:0) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:0) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn notify_target_migration_fail() -> Weight { // Proof Size summary in bytes: - // Measured: `109` - // Estimated: `13474` - // Minimum execution time: 17_334_000 picoseconds. - Weight::from_parts(17_747_000, 0) - .saturating_add(Weight::from_parts(0, 13474)) - .saturating_add(T::DbWeight::get().reads(5)) + // Measured: `136` + // Estimated: `11026` + // Minimum execution time: 12_209_000 picoseconds. + Weight::from_parts(12_612_000, 0) + .saturating_add(Weight::from_parts(0, 11026)) + .saturating_add(T::DbWeight::get().reads(4)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:6 w:2) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notify_targets() -> Weight { // Proof Size summary in bytes: // Measured: `100` - // Estimated: `15940` - // Minimum execution time: 22_535_000 picoseconds. - Weight::from_parts(23_386_000, 0) - .saturating_add(Weight::from_parts(0, 15940)) - .saturating_add(T::DbWeight::get().reads(6)) + // Estimated: `13465` + // Minimum execution time: 17_844_000 picoseconds. + Weight::from_parts(18_266_000, 0) + .saturating_add(Weight::from_parts(0, 13465)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:6 w:2) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) @@ -335,11 +328,11 @@ impl pallet_xcm::WeightInfo for WeightInfo { fn migrate_and_notify_old_targets() -> Weight { // Proof Size summary in bytes: // Measured: `106` - // Estimated: `15946` - // Minimum execution time: 43_437_000 picoseconds. - Weight::from_parts(44_588_000, 0) - .saturating_add(Weight::from_parts(0, 15946)) - .saturating_add(T::DbWeight::get().reads(12)) + // Estimated: `13471` + // Minimum execution time: 34_131_000 picoseconds. + Weight::from_parts(34_766_000, 0) + .saturating_add(Weight::from_parts(0, 13471)) + .saturating_add(T::DbWeight::get().reads(11)) .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) @@ -350,8 +343,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `32` // Estimated: `1517` - // Minimum execution time: 4_941_000 picoseconds. - Weight::from_parts(5_088_000, 0) + // Minimum execution time: 3_525_000 picoseconds. + Weight::from_parts(3_724_000, 0) .saturating_add(Weight::from_parts(0, 1517)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -362,24 +355,22 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7669` // Estimated: `11134` - // Minimum execution time: 29_996_000 picoseconds. - Weight::from_parts(30_700_000, 0) + // Minimum execution time: 24_975_000 picoseconds. + Weight::from_parts(25_517_000, 0) .saturating_add(Weight::from_parts(0, 11134)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `PolkadotXcm::ShouldRecordXcm` (r:1 w:0) - /// Proof: `PolkadotXcm::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::AssetTraps` (r:1 w:1) /// Proof: `PolkadotXcm::AssetTraps` (`max_values`: None, `max_size`: None, mode: `Measured`) fn claim_assets() -> Weight { // Proof Size summary in bytes: // Measured: `90` // Estimated: `3555` - // Minimum execution time: 41_828_000 picoseconds. - Weight::from_parts(43_026_000, 0) + // Minimum execution time: 33_761_000 picoseconds. + Weight::from_parts(34_674_000, 0) .saturating_add(Weight::from_parts(0, 3555)) - .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/mod.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/mod.rs index efc2798999bf..60a0fc005ca1 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/mod.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/mod.rs @@ -22,7 +22,6 @@ use codec::Encode; use frame_support::weights::Weight; use pallet_xcm_benchmarks_fungible::WeightInfo as XcmFungibleWeight; use pallet_xcm_benchmarks_generic::WeightInfo as XcmGeneric; -use sp_runtime::BoundedVec; use xcm::{ latest::{prelude::*, AssetTransferFilter}, DoubleEncoded, @@ -85,11 +84,7 @@ impl XcmWeightInfo for BridgeHubRococoXcmWeight { fn transfer_reserve_asset(assets: &Assets, _dest: &Location, _xcm: &Xcm<()>) -> Weight { assets.weigh_assets(XcmFungibleWeight::::transfer_reserve_asset()) } - fn transact( - _origin_type: &OriginKind, - _fallback_max_weight: &Option, - _call: &DoubleEncoded, - ) -> Weight { + fn transact(_origin_type: &OriginKind, _call: &DoubleEncoded) -> Weight { XcmGeneric::::transact() } fn hrmp_new_channel_open_request( @@ -258,18 +253,7 @@ impl XcmWeightInfo for BridgeHubRococoXcmWeight { fn unpaid_execution(_: &WeightLimit, _: &Option) -> Weight { XcmGeneric::::unpaid_execution() } - fn set_hints(hints: &BoundedVec) -> Weight { - let mut weight = Weight::zero(); - for hint in hints { - match hint { - AssetClaimer { .. } => { - weight = weight.saturating_add(XcmGeneric::::asset_claimer()); - }, - } - } - weight - } - fn execute_with_origin(_: &Option, _: &Xcm) -> Weight { - XcmGeneric::::execute_with_origin() + fn set_asset_claimer(_location: &Location) -> Weight { + XcmGeneric::::set_asset_claimer() } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs index daf22190a42b..b8bd4c4e2d44 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs @@ -373,18 +373,11 @@ impl WeightInfo { // Minimum execution time: 1_085_000 picoseconds. Weight::from_parts(1_161_000, 0) } - pub fn asset_claimer() -> Weight { + pub fn set_asset_claimer() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` // Minimum execution time: 707_000 picoseconds. Weight::from_parts(749_000, 0) } - pub fn execute_with_origin() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 713_000 picoseconds. - Weight::from_parts(776_000, 0) - } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs index b37945317f6c..d36075444f7b 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs @@ -57,7 +57,6 @@ use xcm_executor::{ }; parameter_types! { - pub const RootLocation: Location = Location::here(); pub const TokenLocation: Location = Location::parent(); pub RelayChainOrigin: RuntimeOrigin = cumulus_pallet_xcm::Origin::Relay.into(); pub RelayNetwork: NetworkId = NetworkId::ByGenesis(ROCOCO_GENESIS_HASH); @@ -165,7 +164,6 @@ pub type Barrier = TrailingSetTopicAsId< /// either execution or delivery. /// We only waive fees for system functions, which these locations represent. pub type WaivedLocations = ( - Equals, RelayOrOtherSystemParachains, Equals, ); diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/snowbridge.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/snowbridge.rs index d5baa1c71dfd..8be2993c68f4 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/snowbridge.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/snowbridge.rs @@ -29,7 +29,7 @@ use frame_support::parameter_types; use parachains_common::{AccountId, AuraId, Balance}; use snowbridge_pallet_ethereum_client::WeightInfo; use sp_core::H160; -use sp_keyring::Sr25519Keyring::Alice; +use sp_keyring::AccountKeyring::Alice; use sp_runtime::{ generic::{Era, SignedPayload}, AccountId32, @@ -166,7 +166,7 @@ pub fn ethereum_outbound_queue_processes_messages_before_message_queue_works() { } fn construct_extrinsic( - sender: sp_keyring::Sr25519Keyring, + sender: sp_keyring::AccountKeyring, call: RuntimeCall, ) -> UncheckedExtrinsic { let account_id = AccountId32::from(sender.public()); @@ -192,7 +192,7 @@ fn construct_extrinsic( } fn construct_and_apply_extrinsic( - origin: sp_keyring::Sr25519Keyring, + origin: sp_keyring::AccountKeyring, call: RuntimeCall, ) -> sp_runtime::DispatchOutcome { let xt = construct_extrinsic(origin, call); diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs index 8d74b221a609..2e7dd98e9dce 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs @@ -20,9 +20,9 @@ use bp_polkadot_core::Signature; use bridge_hub_rococo_runtime::{ bridge_common_config, bridge_to_bulletin_config, bridge_to_westend_config, xcm_config::{RelayNetwork, TokenLocation, XcmConfig}, - AllPalletsWithoutSystem, Block, BridgeRejectObsoleteHeadersAndMessages, Executive, - ExistentialDeposit, ParachainSystem, PolkadotXcm, Runtime, RuntimeCall, RuntimeEvent, - RuntimeOrigin, SessionKeys, TransactionPayment, TxExtension, UncheckedExtrinsic, + AllPalletsWithoutSystem, BridgeRejectObsoleteHeadersAndMessages, Executive, ExistentialDeposit, + ParachainSystem, PolkadotXcm, Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, SessionKeys, + TransactionPayment, TxExtension, UncheckedExtrinsic, }; use bridge_hub_test_utils::SlotDurations; use codec::{Decode, Encode}; @@ -31,7 +31,7 @@ use parachains_common::{AccountId, AuraId, Balance}; use snowbridge_core::ChannelId; use sp_consensus_aura::SlotDuration; use sp_core::{crypto::Ss58Codec, H160}; -use sp_keyring::Sr25519Keyring::Alice; +use sp_keyring::AccountKeyring::Alice; use sp_runtime::{ generic::{Era, SignedPayload}, AccountId32, Perbill, @@ -45,7 +45,7 @@ parameter_types! { } fn construct_extrinsic( - sender: sp_keyring::Sr25519Keyring, + sender: sp_keyring::AccountKeyring, call: RuntimeCall, ) -> UncheckedExtrinsic { let account_id = AccountId32::from(sender.public()); @@ -72,7 +72,7 @@ fn construct_extrinsic( } fn construct_and_apply_extrinsic( - relayer_at_target: sp_keyring::Sr25519Keyring, + relayer_at_target: sp_keyring::AccountKeyring, call: RuntimeCall, ) -> sp_runtime::DispatchOutcome { let xt = construct_extrinsic(relayer_at_target, call); @@ -324,12 +324,11 @@ mod bridge_hub_westend_tests { >( SiblingParachainLocation::get(), BridgedUniversalLocation::get(), - false, - |locations, _fee| { + |locations, fee| { bridge_hub_test_utils::open_bridge_with_storage::< Runtime, XcmOverBridgeHubWestendInstance - >(locations, LegacyLaneId([0, 0, 0, 1])) + >(locations, fee, LegacyLaneId([0, 0, 0, 1])) } ).1 }, @@ -389,12 +388,11 @@ mod bridge_hub_westend_tests { >( SiblingParachainLocation::get(), BridgedUniversalLocation::get(), - false, - |locations, _fee| { + |locations, fee| { bridge_hub_test_utils::open_bridge_with_storage::< Runtime, XcmOverBridgeHubWestendInstance, - >(locations, LegacyLaneId([0, 0, 0, 1])) + >(locations, fee, LegacyLaneId([0, 0, 0, 1])) }, ) .1 @@ -424,12 +422,11 @@ mod bridge_hub_westend_tests { >( SiblingParachainLocation::get(), BridgedUniversalLocation::get(), - false, - |locations, _fee| { + |locations, fee| { bridge_hub_test_utils::open_bridge_with_storage::< Runtime, XcmOverBridgeHubWestendInstance, - >(locations, LegacyLaneId([0, 0, 0, 1])) + >(locations, fee, LegacyLaneId([0, 0, 0, 1])) }, ) .1 @@ -501,10 +498,10 @@ mod bridge_hub_westend_tests { mod bridge_hub_bulletin_tests { use super::*; - use bp_messages::LegacyLaneId; + use bp_messages::{HashedLaneId, LaneIdType}; use bridge_common_config::BridgeGrandpaRococoBulletinInstance; use bridge_hub_rococo_runtime::{ - bridge_common_config::RelayersForLegacyLaneIdsMessagesInstance, + bridge_common_config::RelayersForPermissionlessLanesInstance, xcm_config::LocationToAccountId, }; use bridge_hub_test_utils::test_cases::from_grandpa_chain; @@ -528,7 +525,7 @@ mod bridge_hub_bulletin_tests { AllPalletsWithoutSystem, BridgeGrandpaRococoBulletinInstance, WithRococoBulletinMessagesInstance, - RelayersForLegacyLaneIdsMessagesInstance, + RelayersForPermissionlessLanesInstance, >; #[test] @@ -594,12 +591,11 @@ mod bridge_hub_bulletin_tests { >( SiblingPeopleParachainLocation::get(), BridgedBulletinLocation::get(), - false, - |locations, _fee| { + |locations, fee| { bridge_hub_test_utils::open_bridge_with_storage::< Runtime, XcmOverPolkadotBulletinInstance - >(locations, LegacyLaneId([0, 0, 0, 0])) + >(locations, fee, HashedLaneId::try_new(1, 2).unwrap()) } ).1 }, @@ -658,12 +654,11 @@ mod bridge_hub_bulletin_tests { >( SiblingPeopleParachainLocation::get(), BridgedBulletinLocation::get(), - false, - |locations, _fee| { + |locations, fee| { bridge_hub_test_utils::open_bridge_with_storage::< Runtime, XcmOverPolkadotBulletinInstance, - >(locations, LegacyLaneId([0, 0, 0, 0])) + >(locations, fee, HashedLaneId::try_new(1, 2).unwrap()) }, ) .1 @@ -692,12 +687,11 @@ mod bridge_hub_bulletin_tests { >( SiblingPeopleParachainLocation::get(), BridgedBulletinLocation::get(), - false, - |locations, _fee| { + |locations, fee| { bridge_hub_test_utils::open_bridge_with_storage::< Runtime, XcmOverPolkadotBulletinInstance, - >(locations, LegacyLaneId([0, 0, 0, 0])) + >(locations, fee, HashedLaneId::try_new(1, 2).unwrap()) }, ) .1 @@ -838,13 +832,3 @@ fn location_conversion_works() { assert_eq!(got, expected, "{}", tc.description); } } - -#[test] -fn xcm_payment_api_works() { - parachains_runtimes_test_utils::test_cases::xcm_payment_api_with_native_token_works::< - Runtime, - RuntimeCall, - RuntimeOrigin, - Block, - >(); -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml index 644aa72d1311..637e7c710640 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml @@ -5,8 +5,6 @@ authors.workspace = true edition.workspace = true description = "Westend's BridgeHub parachain runtime" license = "Apache-2.0" -homepage.workspace = true -repository.workspace = true [lints] workspace = true @@ -34,9 +32,9 @@ frame-try-runtime = { optional = true, workspace = true } pallet-aura = { workspace = true } pallet-authorship = { workspace = true } pallet-balances = { workspace = true } +pallet-session = { workspace = true } pallet-message-queue = { workspace = true } pallet-multisig = { workspace = true } -pallet-session = { workspace = true } pallet-timestamp = { workspace = true } pallet-transaction-payment = { workspace = true } pallet-transaction-payment-rpc-runtime-api = { workspace = true } @@ -45,10 +43,10 @@ sp-api = { workspace = true } sp-block-builder = { workspace = true } sp-consensus-aura = { workspace = true } sp-core = { workspace = true } +sp-keyring = { workspace = true } sp-genesis-builder = { workspace = true } sp-inherents = { workspace = true } sp-io = { workspace = true } -sp-keyring = { workspace = true } sp-offchain = { workspace = true } sp-runtime = { workspace = true } sp-session = { workspace = true } @@ -58,11 +56,11 @@ sp-transaction-pool = { workspace = true } sp-version = { workspace = true } # Polkadot +westend-runtime-constants = { workspace = true } pallet-xcm = { workspace = true } pallet-xcm-benchmarks = { optional = true, workspace = true } polkadot-parachain-primitives = { workspace = true } polkadot-runtime-common = { workspace = true } -westend-runtime-constants = { workspace = true } xcm = { workspace = true } xcm-builder = { workspace = true } xcm-executor = { workspace = true } @@ -76,8 +74,8 @@ cumulus-pallet-xcm = { workspace = true } cumulus-pallet-xcmp-queue = { features = ["bridging"], workspace = true } cumulus-primitives-aura = { workspace = true } cumulus-primitives-core = { workspace = true } -cumulus-primitives-storage-weight-reclaim = { workspace = true } cumulus-primitives-utility = { workspace = true } +cumulus-primitives-storage-weight-reclaim = { workspace = true } pallet-collator-selection = { workspace = true } parachain-info = { workspace = true } @@ -94,36 +92,34 @@ bp-messages = { workspace = true } bp-parachains = { workspace = true } bp-polkadot-core = { workspace = true } bp-relayers = { workspace = true } -bp-rococo = { workspace = true } bp-runtime = { workspace = true } +bp-rococo = { workspace = true } bp-westend = { workspace = true } -bp-xcm-bridge-hub-router = { workspace = true } -bridge-hub-common = { workspace = true } -bridge-runtime-common = { workspace = true } pallet-bridge-grandpa = { workspace = true } pallet-bridge-messages = { workspace = true } pallet-bridge-parachains = { workspace = true } pallet-bridge-relayers = { workspace = true } pallet-xcm-bridge-hub = { workspace = true } +bridge-runtime-common = { workspace = true } +bridge-hub-common = { workspace = true } # Ethereum Bridge (Snowbridge) snowbridge-beacon-primitives = { workspace = true } +snowbridge-pallet-system = { workspace = true } +snowbridge-system-runtime-api = { workspace = true } snowbridge-core = { workspace = true } -snowbridge-outbound-queue-runtime-api = { workspace = true } snowbridge-pallet-ethereum-client = { workspace = true } snowbridge-pallet-inbound-queue = { workspace = true } snowbridge-pallet-outbound-queue = { workspace = true } -snowbridge-pallet-system = { workspace = true } +snowbridge-outbound-queue-runtime-api = { workspace = true } snowbridge-router-primitives = { workspace = true } snowbridge-runtime-common = { workspace = true } -snowbridge-system-runtime-api = { workspace = true } [dev-dependencies] bridge-hub-test-utils = { workspace = true, default-features = true } bridge-runtime-common = { features = ["integrity-test"], workspace = true, default-features = true } pallet-bridge-relayers = { features = ["integrity-test"], workspace = true } -parachains-runtimes-test-utils = { workspace = true, default-features = true } snowbridge-runtime-test-common = { workspace = true, default-features = true } [features] @@ -141,7 +137,6 @@ std = [ "bp-rococo/std", "bp-runtime/std", "bp-westend/std", - "bp-xcm-bridge-hub-router/std", "bridge-hub-common/std", "bridge-runtime-common/std", "codec/std", @@ -263,7 +258,6 @@ runtime-benchmarks = [ "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", "xcm-runtime-apis/runtime-benchmarks", - "xcm/runtime-benchmarks", ] try-runtime = [ diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_rococo_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_rococo_config.rs index 24e5482b7b09..62c93da7c831 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_rococo_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_rococo_config.rs @@ -21,7 +21,7 @@ use crate::{ weights, xcm_config::UniversalLocation, AccountId, Balance, Balances, BridgeRococoMessages, PolkadotXcm, Runtime, RuntimeEvent, - RuntimeHoldReason, XcmOverBridgeHubRococo, XcmRouter, XcmpQueue, + RuntimeHoldReason, XcmOverBridgeHubRococo, XcmRouter, }; use bp_messages::{ source_chain::FromBridgedChainMessagesDeliveryProof, @@ -29,7 +29,7 @@ use bp_messages::{ }; use bp_parachains::SingleParaStoredHeaderDataBuilder; use bridge_hub_common::xcm_version::XcmVersionOfDestAndRemoteBridge; -use pallet_xcm_bridge_hub::{BridgeId, XcmAsPlainPayload}; +use pallet_xcm_bridge_hub::XcmAsPlainPayload; use frame_support::{ parameter_types, @@ -152,7 +152,6 @@ impl pallet_bridge_messages::Config for Run type DeliveryConfirmationPayments = pallet_bridge_relayers::DeliveryConfirmationPaymentsAdapter< Runtime, WithBridgeHubRococoMessagesInstance, - RelayersForLegacyLaneIdsMessagesInstance, DeliveryRewardInBalance, >; @@ -186,46 +185,11 @@ impl pallet_xcm_bridge_hub::Config for Runtime { type AllowWithoutBridgeDeposit = RelayOrOtherSystemParachains; - type LocalXcmChannelManager = CongestionManager; + // TODO:(bridges-v2) - add `LocalXcmChannelManager` impl - https://github.com/paritytech/parity-bridges-common/issues/3047 + type LocalXcmChannelManager = (); type BlobDispatcher = FromRococoMessageBlobDispatcher; } -/// Implementation of `bp_xcm_bridge_hub::LocalXcmChannelManager` for congestion management. -pub struct CongestionManager; -impl pallet_xcm_bridge_hub::LocalXcmChannelManager for CongestionManager { - type Error = SendError; - - fn is_congested(with: &Location) -> bool { - // This is used to check the inbound bridge queue/messages to determine if they can be - // dispatched and sent to the sibling parachain. Therefore, checking outbound `XcmpQueue` - // is sufficient here. - use bp_xcm_bridge_hub_router::XcmChannelStatusProvider; - cumulus_pallet_xcmp_queue::bridging::OutXcmpChannelStatusProvider::::is_congested( - with, - ) - } - - fn suspend_bridge(local_origin: &Location, bridge: BridgeId) -> Result<(), Self::Error> { - // This bridge is intended for AH<>AH communication with a hard-coded/static lane, - // so `local_origin` is expected to represent only the local AH. - send_xcm::( - local_origin.clone(), - bp_asset_hub_westend::build_congestion_message(bridge.inner(), true).into(), - ) - .map(|_| ()) - } - - fn resume_bridge(local_origin: &Location, bridge: BridgeId) -> Result<(), Self::Error> { - // This bridge is intended for AH<>AH communication with a hard-coded/static lane, - // so `local_origin` is expected to represent only the local AH. - send_xcm::( - local_origin.clone(), - bp_asset_hub_westend::build_congestion_message(bridge.inner(), false).into(), - ) - .map(|_| ()) - } -} - #[cfg(feature = "runtime-benchmarks")] pub(crate) fn open_bridge_for_benchmarks( with: pallet_xcm_bridge_hub::LaneIdOf, @@ -320,6 +284,7 @@ mod tests { fn ensure_bridge_integrity() { assert_complete_bridge_types!( runtime: Runtime, + with_bridged_chain_grandpa_instance: BridgeGrandpaRococoInstance, with_bridged_chain_messages_instance: WithBridgeHubRococoMessagesInstance, this_chain: bp_bridge_hub_westend::BridgeHubWestend, bridged_chain: bp_bridge_hub_rococo::BridgeHubRococo, @@ -329,6 +294,7 @@ mod tests { Runtime, BridgeGrandpaRococoInstance, WithBridgeHubRococoMessagesInstance, + bp_rococo::Rococo, >(AssertCompleteBridgeConstants { this_chain_constants: AssertChainConstants { block_length: bp_bridge_hub_westend::BlockLength::get(), diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/genesis_config_presets.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/genesis_config_presets.rs index 69ba9ca9ece7..421c36246774 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/genesis_config_presets.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/genesis_config_presets.rs @@ -18,7 +18,6 @@ use crate::*; use alloc::{vec, vec::Vec}; use cumulus_primitives_core::ParaId; -use frame_support::build_struct_json_patch; use parachains_common::{AccountId, AuraId}; use sp_genesis_builder::PresetId; use sp_keyring::Sr25519Keyring; @@ -35,7 +34,7 @@ fn bridge_hub_westend_genesis( asset_hub_para_id: ParaId, opened_bridges: Vec<(Location, InteriorLocation, Option)>, ) -> serde_json::Value { - build_struct_json_patch!(RuntimeGenesisConfig { + let config = RuntimeGenesisConfig { balances: BalancesConfig { balances: endowed_accounts .iter() @@ -43,10 +42,11 @@ fn bridge_hub_westend_genesis( .map(|k| (k, 1u128 << 60)) .collect::>(), }, - parachain_info: ParachainInfoConfig { parachain_id: id }, + parachain_info: ParachainInfoConfig { parachain_id: id, ..Default::default() }, collator_selection: CollatorSelectionConfig { invulnerables: invulnerables.iter().cloned().map(|(acc, _)| acc).collect(), candidacy_bond: BRIDGE_HUB_WESTEND_ED * 16, + ..Default::default() }, session: SessionConfig { keys: invulnerables @@ -59,13 +59,33 @@ fn bridge_hub_westend_genesis( ) }) .collect(), + ..Default::default() }, - polkadot_xcm: PolkadotXcmConfig { safe_xcm_version: Some(SAFE_XCM_VERSION) }, - bridge_rococo_grandpa: BridgeRococoGrandpaConfig { owner: bridges_pallet_owner.clone() }, - bridge_rococo_messages: BridgeRococoMessagesConfig { owner: bridges_pallet_owner.clone() }, - xcm_over_bridge_hub_rococo: XcmOverBridgeHubRococoConfig { opened_bridges }, - ethereum_system: EthereumSystemConfig { para_id: id, asset_hub_para_id }, - }) + polkadot_xcm: PolkadotXcmConfig { + safe_xcm_version: Some(SAFE_XCM_VERSION), + ..Default::default() + }, + bridge_rococo_grandpa: BridgeRococoGrandpaConfig { + owner: bridges_pallet_owner.clone(), + ..Default::default() + }, + bridge_rococo_messages: BridgeRococoMessagesConfig { + owner: bridges_pallet_owner.clone(), + ..Default::default() + }, + xcm_over_bridge_hub_rococo: XcmOverBridgeHubRococoConfig { + opened_bridges, + ..Default::default() + }, + ethereum_system: EthereumSystemConfig { + para_id: id, + asset_hub_para_id, + ..Default::default() + }, + ..Default::default() + }; + + serde_json::to_value(config).expect("Could not build genesis config.") } /// Provides the JSON representation of predefined genesis config for given `id`. diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs index 1ca709f0d8cb..065400016791 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs @@ -226,7 +226,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: alloc::borrow::Cow::Borrowed("bridge-hub-westend"), impl_name: alloc::borrow::Cow::Borrowed("bridge-hub-westend"), authoring_version: 1, - spec_version: 1_017_001, + spec_version: 1_016_001, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 6, @@ -513,7 +513,6 @@ impl pallet_multisig::Config for Runtime { type DepositFactor = DepositFactor; type MaxSignatories = ConstU32<100>; type WeightInfo = weights::pallet_multisig::WeightInfo; - type BlockNumberProvider = frame_system::Pallet; } impl pallet_utility::Config for Runtime { @@ -779,8 +778,7 @@ impl_runtime_apis! { } fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result { - let latest_asset_id: Result = asset.clone().try_into(); - match latest_asset_id { + match asset.try_as::() { Ok(asset_id) if asset_id.0 == xcm_config::WestendLocation::get() => { // for native token Ok(WeightToFee::weight_to_fee(&weight)) @@ -1315,8 +1313,18 @@ impl_runtime_apis! { } } - use frame_support::traits::WhitelistedStorageKeys; - let whitelist: Vec = AllPalletsWithSystem::whitelisted_storage_keys(); + let whitelist: Vec = vec![ + // Block Number + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(), + // Total Issuance + hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec().into(), + // Execution Phase + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec().into(), + // Event Count + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec().into(), + // System Events + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7").to_vec().into(), + ]; let mut batches = Vec::::new(); let params = (&config, &whitelist); diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_xcm.rs index fdae0c9a1522..a78ff2355efa 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_xcm.rs @@ -17,27 +17,25 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-12-18, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `27f89d982f9b`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-westend-dev")`, DB CACHE: 1024 // Executed Command: // target/production/polkadot-parachain // benchmark // pallet -// --extrinsic=* -// --chain=bridge-hub-westend-dev -// --pallet=pallet_xcm -// --header=/__w/polkadot-sdk/polkadot-sdk/cumulus/file_header.txt -// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights -// --wasm-execution=compiled // --steps=50 // --repeat=20 +// --extrinsic=* +// --wasm-execution=compiled // --heap-pages=4096 -// --no-storage-info -// --no-min-squares -// --no-median-slopes +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_xcm +// --chain=bridge-hub-westend-dev +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -66,16 +64,14 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 24_819_000 picoseconds. - Weight::from_parts(25_795_000, 0) + // Minimum execution time: 19_527_000 picoseconds. + Weight::from_parts(19_839_000, 0) .saturating_add(Weight::from_parts(0, 3503)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `PolkadotXcm::ShouldRecordXcm` (r:1 w:0) - /// Proof: `PolkadotXcm::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) @@ -94,10 +90,10 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `107` // Estimated: `3593` - // Minimum execution time: 110_536_000 picoseconds. - Weight::from_parts(115_459_000, 0) + // Minimum execution time: 90_938_000 picoseconds. + Weight::from_parts(92_822_000, 0) .saturating_add(Weight::from_parts(0, 3593)) - .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) } /// Storage: `Benchmark::Override` (r:0 w:0) @@ -112,8 +108,6 @@ impl pallet_xcm::WeightInfo for WeightInfo { } /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `PolkadotXcm::ShouldRecordXcm` (r:1 w:0) - /// Proof: `PolkadotXcm::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) @@ -132,22 +126,21 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `107` // Estimated: `3593` - // Minimum execution time: 109_742_000 picoseconds. - Weight::from_parts(114_362_000, 0) + // Minimum execution time: 90_133_000 picoseconds. + Weight::from_parts(92_308_000, 0) .saturating_add(Weight::from_parts(0, 3593)) - .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: `PolkadotXcm::ShouldRecordXcm` (r:1 w:0) - /// Proof: `PolkadotXcm::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Benchmark::Override` (r:0 w:0) + /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) fn execute() -> Weight { // Proof Size summary in bytes: - // Measured: `32` - // Estimated: `1517` - // Minimum execution time: 12_252_000 picoseconds. - Weight::from_parts(12_681_000, 0) - .saturating_add(Weight::from_parts(0, 1517)) - .saturating_add(T::DbWeight::get().reads(1)) + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. + Weight::from_parts(18_446_744_073_709_551_000, 0) + .saturating_add(Weight::from_parts(0, 0)) } /// Storage: `PolkadotXcm::SupportedVersion` (r:0 w:1) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -155,8 +148,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_988_000 picoseconds. - Weight::from_parts(7_161_000, 0) + // Minimum execution time: 6_205_000 picoseconds. + Weight::from_parts(6_595_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -166,8 +159,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_249_000 picoseconds. - Weight::from_parts(2_479_000, 0) + // Minimum execution time: 1_927_000 picoseconds. + Weight::from_parts(2_062_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -193,8 +186,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 31_668_000 picoseconds. - Weight::from_parts(32_129_000, 0) + // Minimum execution time: 25_078_000 picoseconds. + Weight::from_parts(25_782_000, 0) .saturating_add(Weight::from_parts(0, 3503)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(5)) @@ -219,8 +212,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `255` // Estimated: `3720` - // Minimum execution time: 36_002_000 picoseconds. - Weight::from_parts(37_341_000, 0) + // Minimum execution time: 28_188_000 picoseconds. + Weight::from_parts(28_826_000, 0) .saturating_add(Weight::from_parts(0, 3720)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(4)) @@ -231,45 +224,45 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_349_000 picoseconds. - Weight::from_parts(2_511_000, 0) + // Minimum execution time: 1_886_000 picoseconds. + Weight::from_parts(1_991_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `PolkadotXcm::SupportedVersion` (r:6 w:2) + /// Storage: `PolkadotXcm::SupportedVersion` (r:5 w:2) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_supported_version() -> Weight { // Proof Size summary in bytes: // Measured: `89` - // Estimated: `15929` - // Minimum execution time: 22_283_000 picoseconds. - Weight::from_parts(22_654_000, 0) - .saturating_add(Weight::from_parts(0, 15929)) - .saturating_add(T::DbWeight::get().reads(6)) + // Estimated: `13454` + // Minimum execution time: 17_443_000 picoseconds. + Weight::from_parts(17_964_000, 0) + .saturating_add(Weight::from_parts(0, 13454)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifiers` (r:6 w:2) + /// Storage: `PolkadotXcm::VersionNotifiers` (r:5 w:2) /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notifiers() -> Weight { // Proof Size summary in bytes: // Measured: `93` - // Estimated: `15933` - // Minimum execution time: 22_717_000 picoseconds. - Weight::from_parts(23_256_000, 0) - .saturating_add(Weight::from_parts(0, 15933)) - .saturating_add(T::DbWeight::get().reads(6)) + // Estimated: `13458` + // Minimum execution time: 17_357_000 picoseconds. + Weight::from_parts(18_006_000, 0) + .saturating_add(Weight::from_parts(0, 13458)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:7 w:0) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:6 w:0) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn already_notified_target() -> Weight { // Proof Size summary in bytes: // Measured: `106` - // Estimated: `18421` - // Minimum execution time: 25_988_000 picoseconds. - Weight::from_parts(26_794_000, 0) - .saturating_add(Weight::from_parts(0, 18421)) - .saturating_add(T::DbWeight::get().reads(7)) + // Estimated: `15946` + // Minimum execution time: 18_838_000 picoseconds. + Weight::from_parts(19_688_000, 0) + .saturating_add(Weight::from_parts(0, 15946)) + .saturating_add(T::DbWeight::get().reads(6)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:2 w:1) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -289,36 +282,36 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `6046` - // Minimum execution time: 31_112_000 picoseconds. - Weight::from_parts(32_395_000, 0) + // Minimum execution time: 25_517_000 picoseconds. + Weight::from_parts(26_131_000, 0) .saturating_add(Weight::from_parts(0, 6046)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:0) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:0) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn notify_target_migration_fail() -> Weight { // Proof Size summary in bytes: - // Measured: `109` - // Estimated: `13474` - // Minimum execution time: 17_401_000 picoseconds. - Weight::from_parts(17_782_000, 0) - .saturating_add(Weight::from_parts(0, 13474)) - .saturating_add(T::DbWeight::get().reads(5)) + // Measured: `136` + // Estimated: `11026` + // Minimum execution time: 11_587_000 picoseconds. + Weight::from_parts(11_963_000, 0) + .saturating_add(Weight::from_parts(0, 11026)) + .saturating_add(T::DbWeight::get().reads(4)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:6 w:2) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notify_targets() -> Weight { // Proof Size summary in bytes: // Measured: `100` - // Estimated: `15940` - // Minimum execution time: 22_772_000 picoseconds. - Weight::from_parts(23_194_000, 0) - .saturating_add(Weight::from_parts(0, 15940)) - .saturating_add(T::DbWeight::get().reads(6)) + // Estimated: `13465` + // Minimum execution time: 17_490_000 picoseconds. + Weight::from_parts(18_160_000, 0) + .saturating_add(Weight::from_parts(0, 13465)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:6 w:2) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) @@ -335,11 +328,11 @@ impl pallet_xcm::WeightInfo for WeightInfo { fn migrate_and_notify_old_targets() -> Weight { // Proof Size summary in bytes: // Measured: `106` - // Estimated: `15946` - // Minimum execution time: 43_571_000 picoseconds. - Weight::from_parts(44_891_000, 0) - .saturating_add(Weight::from_parts(0, 15946)) - .saturating_add(T::DbWeight::get().reads(12)) + // Estimated: `13471` + // Minimum execution time: 34_088_000 picoseconds. + Weight::from_parts(34_598_000, 0) + .saturating_add(Weight::from_parts(0, 13471)) + .saturating_add(T::DbWeight::get().reads(11)) .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) @@ -350,8 +343,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `32` // Estimated: `1517` - // Minimum execution time: 4_896_000 picoseconds. - Weight::from_parts(5_112_000, 0) + // Minimum execution time: 3_566_000 picoseconds. + Weight::from_parts(3_754_000, 0) .saturating_add(Weight::from_parts(0, 1517)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -362,24 +355,22 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7669` // Estimated: `11134` - // Minimum execution time: 30_117_000 picoseconds. - Weight::from_parts(31_027_000, 0) + // Minimum execution time: 25_078_000 picoseconds. + Weight::from_parts(25_477_000, 0) .saturating_add(Weight::from_parts(0, 11134)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `PolkadotXcm::ShouldRecordXcm` (r:1 w:0) - /// Proof: `PolkadotXcm::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::AssetTraps` (r:1 w:1) /// Proof: `PolkadotXcm::AssetTraps` (`max_values`: None, `max_size`: None, mode: `Measured`) fn claim_assets() -> Weight { // Proof Size summary in bytes: // Measured: `90` // Estimated: `3555` - // Minimum execution time: 41_870_000 picoseconds. - Weight::from_parts(42_750_000, 0) + // Minimum execution time: 34_661_000 picoseconds. + Weight::from_parts(35_411_000, 0) .saturating_add(Weight::from_parts(0, 3555)) - .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/xcm/mod.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/xcm/mod.rs index 15a1dae09d9b..473807ea5eb1 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/xcm/mod.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/xcm/mod.rs @@ -23,7 +23,6 @@ use codec::Encode; use frame_support::weights::Weight; use pallet_xcm_benchmarks_fungible::WeightInfo as XcmFungibleWeight; use pallet_xcm_benchmarks_generic::WeightInfo as XcmGeneric; -use sp_runtime::BoundedVec; use xcm::{ latest::{prelude::*, AssetTransferFilter}, DoubleEncoded, @@ -86,11 +85,7 @@ impl XcmWeightInfo for BridgeHubWestendXcmWeight { fn transfer_reserve_asset(assets: &Assets, _dest: &Location, _xcm: &Xcm<()>) -> Weight { assets.weigh_assets(XcmFungibleWeight::::transfer_reserve_asset()) } - fn transact( - _origin_type: &OriginKind, - _fallback_max_weight: &Option, - _call: &DoubleEncoded, - ) -> Weight { + fn transact(_origin_type: &OriginKind, _call: &DoubleEncoded) -> Weight { XcmGeneric::::transact() } fn hrmp_new_channel_open_request( @@ -179,16 +174,8 @@ impl XcmWeightInfo for BridgeHubWestendXcmWeight { fn clear_error() -> Weight { XcmGeneric::::clear_error() } - fn set_hints(hints: &BoundedVec) -> Weight { - let mut weight = Weight::zero(); - for hint in hints { - match hint { - AssetClaimer { .. } => { - weight = weight.saturating_add(XcmGeneric::::asset_claimer()); - }, - } - } - weight + fn set_asset_claimer(_location: &Location) -> Weight { + XcmGeneric::::set_asset_claimer() } fn claim_asset(_assets: &Assets, _ticket: &Location) -> Weight { XcmGeneric::::claim_asset() @@ -270,7 +257,4 @@ impl XcmWeightInfo for BridgeHubWestendXcmWeight { fn unpaid_execution(_: &WeightLimit, _: &Option) -> Weight { XcmGeneric::::unpaid_execution() } - fn execute_with_origin(_: &Option, _: &Xcm) -> Weight { - XcmGeneric::::execute_with_origin() - } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs index 03cbaa866ad8..849456af9255 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs @@ -373,18 +373,11 @@ impl WeightInfo { // Minimum execution time: 995_000 picoseconds. Weight::from_parts(1_060_000, 0) } - pub fn asset_claimer() -> Weight { + pub fn set_asset_claimer() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` // Minimum execution time: 707_000 picoseconds. Weight::from_parts(749_000, 0) } - pub fn execute_with_origin() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 713_000 picoseconds. - Weight::from_parts(776_000, 0) - } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/xcm_config.rs index befb63ef9709..e692568932fe 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/xcm_config.rs @@ -56,7 +56,6 @@ use xcm_executor::{ }; parameter_types! { - pub const RootLocation: Location = Location::here(); pub const WestendLocation: Location = Location::parent(); pub const RelayNetwork: NetworkId = NetworkId::ByGenesis(WESTEND_GENESIS_HASH); pub RelayChainOrigin: RuntimeOrigin = cumulus_pallet_xcm::Origin::Relay.into(); @@ -162,7 +161,6 @@ pub type Barrier = TrailingSetTopicAsId< /// either execution or delivery. /// We only waive fees for system functions, which these locations represent. pub type WaivedLocations = ( - Equals, RelayOrOtherSystemParachains, Equals, ); diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/snowbridge.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/snowbridge.rs index d71400fa71b6..1a1ce2a28ea3 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/snowbridge.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/snowbridge.rs @@ -30,7 +30,7 @@ use frame_support::parameter_types; use parachains_common::{AccountId, AuraId, Balance}; use snowbridge_pallet_ethereum_client::WeightInfo; use sp_core::H160; -use sp_keyring::Sr25519Keyring::Alice; +use sp_keyring::AccountKeyring::Alice; use sp_runtime::{ generic::{Era, SignedPayload}, AccountId32, @@ -167,7 +167,7 @@ pub fn ethereum_outbound_queue_processes_messages_before_message_queue_works() { } fn construct_extrinsic( - sender: sp_keyring::Sr25519Keyring, + sender: sp_keyring::AccountKeyring, call: RuntimeCall, ) -> UncheckedExtrinsic { let account_id = AccountId32::from(sender.public()); @@ -193,7 +193,7 @@ fn construct_extrinsic( } fn construct_and_apply_extrinsic( - origin: sp_keyring::Sr25519Keyring, + origin: sp_keyring::AccountKeyring, call: RuntimeCall, ) -> sp_runtime::DispatchOutcome { let xt = construct_extrinsic(origin, call); diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs index 9d32f28f4fc6..69301b34fe6b 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs @@ -27,9 +27,9 @@ use bridge_hub_westend_runtime::{ bridge_common_config, bridge_to_rococo_config, bridge_to_rococo_config::RococoGlobalConsensusNetwork, xcm_config::{LocationToAccountId, RelayNetwork, WestendLocation, XcmConfig}, - AllPalletsWithoutSystem, Block, BridgeRejectObsoleteHeadersAndMessages, Executive, - ExistentialDeposit, ParachainSystem, PolkadotXcm, Runtime, RuntimeCall, RuntimeEvent, - RuntimeOrigin, SessionKeys, TransactionPayment, TxExtension, UncheckedExtrinsic, + AllPalletsWithoutSystem, BridgeRejectObsoleteHeadersAndMessages, Executive, ExistentialDeposit, + ParachainSystem, PolkadotXcm, Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, SessionKeys, + TransactionPayment, TxExtension, UncheckedExtrinsic, }; use bridge_to_rococo_config::{ BridgeGrandpaRococoInstance, BridgeHubRococoLocation, BridgeParachainRococoInstance, @@ -40,7 +40,7 @@ use frame_support::{dispatch::GetDispatchInfo, parameter_types, traits::ConstU8} use parachains_common::{AccountId, AuraId, Balance}; use sp_consensus_aura::SlotDuration; use sp_core::crypto::Ss58Codec; -use sp_keyring::Sr25519Keyring::Alice; +use sp_keyring::AccountKeyring::Alice; use sp_runtime::{ generic::{Era, SignedPayload}, AccountId32, Perbill, @@ -77,7 +77,7 @@ parameter_types! { } fn construct_extrinsic( - sender: sp_keyring::Sr25519Keyring, + sender: sp_keyring::AccountKeyring, call: RuntimeCall, ) -> UncheckedExtrinsic { let account_id = AccountId32::from(sender.public()); @@ -104,7 +104,7 @@ fn construct_extrinsic( } fn construct_and_apply_extrinsic( - relayer_at_target: sp_keyring::Sr25519Keyring, + relayer_at_target: sp_keyring::AccountKeyring, call: RuntimeCall, ) -> sp_runtime::DispatchOutcome { let xt = construct_extrinsic(relayer_at_target, call); @@ -246,11 +246,10 @@ fn handle_export_message_from_system_parachain_add_to_outbound_queue_works() { >( SiblingParachainLocation::get(), BridgedUniversalLocation::get(), - false, - |locations, _fee| { + |locations, fee| { bridge_hub_test_utils::open_bridge_with_storage::< Runtime, XcmOverBridgeHubRococoInstance - >(locations, LegacyLaneId([0, 0, 0, 1])) + >(locations, fee, LegacyLaneId([0, 0, 0, 1])) } ).1 }, @@ -308,12 +307,11 @@ fn relayed_incoming_message_works() { >( SiblingParachainLocation::get(), BridgedUniversalLocation::get(), - false, - |locations, _fee| { + |locations, fee| { bridge_hub_test_utils::open_bridge_with_storage::< Runtime, XcmOverBridgeHubRococoInstance, - >(locations, LegacyLaneId([0, 0, 0, 1])) + >(locations, fee, LegacyLaneId([0, 0, 0, 1])) }, ) .1 @@ -343,12 +341,11 @@ fn free_relay_extrinsic_works() { >( SiblingParachainLocation::get(), BridgedUniversalLocation::get(), - false, - |locations, _fee| { + |locations, fee| { bridge_hub_test_utils::open_bridge_with_storage::< Runtime, XcmOverBridgeHubRococoInstance, - >(locations, LegacyLaneId([0, 0, 0, 1])) + >(locations, fee, LegacyLaneId([0, 0, 0, 1])) }, ) .1 @@ -525,13 +522,3 @@ fn location_conversion_works() { assert_eq!(got, expected, "{}", tc.description); } } - -#[test] -fn xcm_payment_api_works() { - parachains_runtimes_test_utils::test_cases::xcm_payment_api_with_native_token_works::< - Runtime, - RuntimeCall, - RuntimeOrigin, - Block, - >(); -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/common/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/common/Cargo.toml index 2fbb96d75163..9cb24a2b2820 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/common/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/common/Cargo.toml @@ -5,20 +5,18 @@ authors.workspace = true edition.workspace = true description = "Bridge hub common utilities" license = "Apache-2.0" -homepage.workspace = true -repository.workspace = true [dependencies] codec = { features = ["derive"], workspace = true } -cumulus-primitives-core = { workspace = true } -frame-support = { workspace = true } -pallet-message-queue = { workspace = true } scale-info = { features = ["derive"], workspace = true } -snowbridge-core = { workspace = true } +frame-support = { workspace = true } sp-core = { workspace = true } sp-runtime = { workspace = true } sp-std = { workspace = true } +cumulus-primitives-core = { workspace = true } xcm = { workspace = true } +pallet-message-queue = { workspace = true } +snowbridge-core = { workspace = true } [features] default = ["std"] @@ -41,5 +39,4 @@ runtime-benchmarks = [ "pallet-message-queue/runtime-benchmarks", "snowbridge-core/runtime-benchmarks", "sp-runtime/runtime-benchmarks", - "xcm/runtime-benchmarks", ] diff --git a/cumulus/parachains/runtimes/bridge-hubs/test-utils/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/test-utils/Cargo.toml index ace23e71c4d1..915b3090092f 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/test-utils/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/test-utils/Cargo.toml @@ -5,8 +5,6 @@ authors.workspace = true edition.workspace = true description = "Utils for BridgeHub testing" license = "Apache-2.0" -homepage.workspace = true -repository.workspace = true [lints] workspace = true @@ -19,14 +17,14 @@ log = { workspace = true } # Substrate frame-support = { workspace = true } frame-system = { workspace = true } -pallet-balances = { workspace = true } -pallet-timestamp = { workspace = true } -pallet-utility = { workspace = true } sp-core = { workspace = true } sp-io = { workspace = true } sp-keyring = { workspace = true, default-features = true } sp-runtime = { workspace = true } sp-tracing = { workspace = true, default-features = true } +pallet-balances = { workspace = true } +pallet-utility = { workspace = true } +pallet-timestamp = { workspace = true } # Cumulus asset-test-utils = { workspace = true, default-features = true } @@ -36,10 +34,10 @@ parachains-common = { workspace = true } parachains-runtimes-test-utils = { workspace = true } # Polkadot -pallet-xcm = { workspace = true } xcm = { workspace = true } xcm-builder = { workspace = true } xcm-executor = { workspace = true } +pallet-xcm = { workspace = true } # Bridges bp-header-chain = { workspace = true } @@ -50,12 +48,12 @@ bp-relayers = { workspace = true } bp-runtime = { workspace = true } bp-test-utils = { workspace = true } bp-xcm-bridge-hub = { workspace = true } -bridge-runtime-common = { workspace = true } pallet-bridge-grandpa = { workspace = true } -pallet-bridge-messages = { features = ["test-helpers"], workspace = true } pallet-bridge-parachains = { workspace = true } +pallet-bridge-messages = { features = ["test-helpers"], workspace = true } pallet-bridge-relayers = { workspace = true } pallet-xcm-bridge-hub = { workspace = true } +bridge-runtime-common = { workspace = true } [features] default = ["std"] diff --git a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/from_grandpa_chain.rs b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/from_grandpa_chain.rs index 358c184c815d..320f3030b60a 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/from_grandpa_chain.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/from_grandpa_chain.rs @@ -34,7 +34,7 @@ use parachains_runtimes_test_utils::{ AccountIdOf, BasicParachainRuntime, CollatorSessionKeys, RuntimeCallOf, SlotDurations, }; use sp_core::Get; -use sp_keyring::Sr25519Keyring::*; +use sp_keyring::AccountKeyring::*; use sp_runtime::{traits::Header as HeaderT, AccountId32}; use xcm::latest::prelude::*; @@ -103,7 +103,7 @@ pub fn relayed_incoming_message_works( local_relay_chain_id: NetworkId, prepare_configuration: impl Fn() -> LaneIdOf, construct_and_apply_extrinsic: fn( - sp_keyring::Sr25519Keyring, + sp_keyring::AccountKeyring, RuntimeCallOf, ) -> sp_runtime::DispatchOutcome, expect_rewards: bool, @@ -210,7 +210,7 @@ pub fn free_relay_extrinsic_works( local_relay_chain_id: NetworkId, prepare_configuration: impl Fn() -> LaneIdOf, construct_and_apply_extrinsic: fn( - sp_keyring::Sr25519Keyring, + sp_keyring::AccountKeyring, RuntimeCallOf, ) -> sp_runtime::DispatchOutcome, expect_rewards: bool, @@ -344,7 +344,7 @@ pub fn complex_relay_extrinsic_works( local_relay_chain_id: NetworkId, prepare_configuration: impl Fn() -> LaneIdOf, construct_and_apply_extrinsic: fn( - sp_keyring::Sr25519Keyring, + sp_keyring::AccountKeyring, RuntimeCallOf, ) -> sp_runtime::DispatchOutcome, ) where diff --git a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/from_parachain.rs b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/from_parachain.rs index d8fff55b4b50..1da901e0bcdf 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/from_parachain.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/from_parachain.rs @@ -36,7 +36,7 @@ use parachains_runtimes_test_utils::{ AccountIdOf, BasicParachainRuntime, CollatorSessionKeys, RuntimeCallOf, SlotDurations, }; use sp_core::Get; -use sp_keyring::Sr25519Keyring::*; +use sp_keyring::AccountKeyring::*; use sp_runtime::{traits::Header as HeaderT, AccountId32}; use xcm::latest::prelude::*; @@ -112,7 +112,7 @@ pub fn relayed_incoming_message_works( local_relay_chain_id: NetworkId, prepare_configuration: impl Fn() -> LaneIdOf, construct_and_apply_extrinsic: fn( - sp_keyring::Sr25519Keyring, + sp_keyring::AccountKeyring, ::RuntimeCall, ) -> sp_runtime::DispatchOutcome, expect_rewards: bool, @@ -246,7 +246,7 @@ pub fn free_relay_extrinsic_works( local_relay_chain_id: NetworkId, prepare_configuration: impl Fn() -> LaneIdOf, construct_and_apply_extrinsic: fn( - sp_keyring::Sr25519Keyring, + sp_keyring::AccountKeyring, ::RuntimeCall, ) -> sp_runtime::DispatchOutcome, expect_rewards: bool, @@ -414,7 +414,7 @@ pub fn complex_relay_extrinsic_works( local_relay_chain_id: NetworkId, prepare_configuration: impl Fn() -> LaneIdOf, construct_and_apply_extrinsic: fn( - sp_keyring::Sr25519Keyring, + sp_keyring::AccountKeyring, ::RuntimeCall, ) -> sp_runtime::DispatchOutcome, ) where diff --git a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/helpers.rs b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/helpers.rs index a99bda5bfdf4..aac60bba0b53 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/helpers.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/helpers.rs @@ -29,7 +29,7 @@ use core::marker::PhantomData; use frame_support::{ assert_ok, dispatch::GetDispatchInfo, - traits::{fungible::Mutate, Contains, OnFinalize, OnInitialize, PalletInfoAccess}, + traits::{fungible::Mutate, OnFinalize, OnInitialize, PalletInfoAccess}, }; use frame_system::pallet_prelude::BlockNumberFor; use pallet_bridge_grandpa::{BridgedBlockHash, BridgedHeader}; @@ -39,7 +39,7 @@ use parachains_runtimes_test_utils::{ mock_open_hrmp_channel, AccountIdOf, CollatorSessionKeys, RuntimeCallOf, SlotDurations, }; use sp_core::Get; -use sp_keyring::Sr25519Keyring::*; +use sp_keyring::AccountKeyring::*; use sp_runtime::{traits::TrailingZeroInput, AccountId32}; use xcm::latest::prelude::*; use xcm_executor::traits::ConvertLocation; @@ -264,7 +264,7 @@ pub fn relayed_incoming_message_works( sibling_parachain_id: u32, local_relay_chain_id: NetworkId, construct_and_apply_extrinsic: fn( - sp_keyring::Sr25519Keyring, + sp_keyring::AccountKeyring, RuntimeCallOf, ) -> sp_runtime::DispatchOutcome, prepare_message_proof_import: impl FnOnce( @@ -374,9 +374,9 @@ pub fn relayed_incoming_message_works( /// Execute every call and verify its outcome. fn execute_and_verify_calls( - submitter: sp_keyring::Sr25519Keyring, + submitter: sp_keyring::AccountKeyring, construct_and_apply_extrinsic: fn( - sp_keyring::Sr25519Keyring, + sp_keyring::AccountKeyring, RuntimeCallOf, ) -> sp_runtime::DispatchOutcome, calls_and_verifiers: CallsAndVerifiers, @@ -395,7 +395,7 @@ pub fn ensure_opened_bridge< XcmOverBridgePalletInstance, LocationToAccountId, TokenLocation> -(source: Location, destination: InteriorLocation, is_paid_xcm_execution: bool, bridge_opener: impl Fn(BridgeLocations, Option)) -> (BridgeLocations, pallet_xcm_bridge_hub::LaneIdOf) +(source: Location, destination: InteriorLocation, bridge_opener: impl Fn(BridgeLocations, Asset)) -> (BridgeLocations, pallet_xcm_bridge_hub::LaneIdOf) where Runtime: BasicParachainRuntime + BridgeXcmOverBridgeConfig, XcmOverBridgePalletInstance: 'static, @@ -416,37 +416,24 @@ TokenLocation: Get{ ) .is_none()); + // required balance: ED + fee + BridgeDeposit + let bridge_deposit = + >::BridgeDeposit::get( + ); + // random high enough value for `BuyExecution` fees + let buy_execution_fee_amount = 5_000_000_000_000_u128; + let buy_execution_fee = (TokenLocation::get(), buy_execution_fee_amount).into(); + let balance_needed = ::ExistentialDeposit::get() + + buy_execution_fee_amount.into() + + bridge_deposit.into(); + // SA of source location needs to have some required balance - if !>::AllowWithoutBridgeDeposit::contains(&source) { - // required balance: ED + fee + BridgeDeposit - let bridge_deposit = - >::BridgeDeposit::get( - ); - let balance_needed = ::ExistentialDeposit::get() + bridge_deposit.into(); - - let source_account_id = LocationToAccountId::convert_location(&source).expect("valid location"); - let _ = >::mint_into(&source_account_id, balance_needed) - .expect("mint_into passes"); - }; - - let maybe_paid_execution = if is_paid_xcm_execution { - // random high enough value for `BuyExecution` fees - let buy_execution_fee_amount = 5_000_000_000_000_u128; - let buy_execution_fee = (TokenLocation::get(), buy_execution_fee_amount).into(); - - let balance_needed = ::ExistentialDeposit::get() + - buy_execution_fee_amount.into(); - let source_account_id = - LocationToAccountId::convert_location(&source).expect("valid location"); - let _ = >::mint_into(&source_account_id, balance_needed) - .expect("mint_into passes"); - Some(buy_execution_fee) - } else { - None - }; + let source_account_id = LocationToAccountId::convert_location(&source).expect("valid location"); + let _ = >::mint_into(&source_account_id, balance_needed) + .expect("mint_into passes"); // call the bridge opener - bridge_opener(*locations.clone(), maybe_paid_execution); + bridge_opener(*locations.clone(), buy_execution_fee); // check opened bridge let bridge = pallet_xcm_bridge_hub::Bridges::::get( @@ -465,9 +452,8 @@ TokenLocation: Get{ /// Utility for opening bridge with dedicated `pallet_xcm_bridge_hub`'s extrinsic. pub fn open_bridge_with_extrinsic( - (origin, origin_kind): (Location, OriginKind), - bridge_destination_universal_location: InteriorLocation, - maybe_paid_execution: Option, + locations: BridgeLocations, + buy_execution_fee: Asset, ) where Runtime: frame_system::Config + pallet_xcm_bridge_hub::Config @@ -483,15 +469,15 @@ pub fn open_bridge_with_extrinsic( XcmOverBridgePalletInstance, >::open_bridge { bridge_destination_universal_location: Box::new( - bridge_destination_universal_location.clone().into(), + locations.bridge_destination_universal_location().clone().into(), ), }); // execute XCM as source origin would do with `Transact -> Origin::Xcm` - assert_ok!(RuntimeHelper::::execute_as_origin( - (origin, origin_kind), + assert_ok!(RuntimeHelper::::execute_as_origin_xcm( + locations.bridge_origin_relative_location().clone(), open_bridge_call, - maybe_paid_execution + buy_execution_fee ) .ensure_complete()); } @@ -500,6 +486,7 @@ pub fn open_bridge_with_extrinsic( /// purposes). pub fn open_bridge_with_storage( locations: BridgeLocations, + _buy_execution_fee: Asset, lane_id: pallet_xcm_bridge_hub::LaneIdOf, ) where Runtime: pallet_xcm_bridge_hub::Config, @@ -516,12 +503,8 @@ pub fn open_bridge_with_storage( } /// Helper function to close the bridge/lane for `source` and `destination`. -pub fn close_bridge( - expected_source: Location, - bridge_destination_universal_location: InteriorLocation, - (origin, origin_kind): (Location, OriginKind), - is_paid_xcm_execution: bool -) where +pub fn close_bridge(source: Location, destination: InteriorLocation) +where Runtime: BasicParachainRuntime + BridgeXcmOverBridgeConfig, XcmOverBridgePalletInstance: 'static, ::RuntimeCall: GetDispatchInfo + From>, @@ -532,8 +515,8 @@ TokenLocation: Get{ // construct expected bridge configuration let locations = pallet_xcm_bridge_hub::Pallet::::bridge_locations( - expected_source.clone().into(), - bridge_destination_universal_location.clone().into(), + source.clone().into(), + destination.clone().into(), ) .expect("valid bridge locations"); assert!(pallet_xcm_bridge_hub::Bridges::::get( @@ -542,38 +525,35 @@ TokenLocation: Get{ .is_some()); // required balance: ED + fee + BridgeDeposit - let maybe_paid_execution = if is_paid_xcm_execution { - // random high enough value for `BuyExecution` fees - let buy_execution_fee_amount = 2_500_000_000_000_u128; - let buy_execution_fee = (TokenLocation::get(), buy_execution_fee_amount).into(); - - let balance_needed = ::ExistentialDeposit::get() + - buy_execution_fee_amount.into(); - let source_account_id = - LocationToAccountId::convert_location(&expected_source).expect("valid location"); - let _ = >::mint_into(&source_account_id, balance_needed) - .expect("mint_into passes"); - Some(buy_execution_fee) - } else { - None - }; + let bridge_deposit = + >::BridgeDeposit::get( + ); + // random high enough value for `BuyExecution` fees + let buy_execution_fee_amount = 2_500_000_000_000_u128; + let buy_execution_fee = (TokenLocation::get(), buy_execution_fee_amount).into(); + let balance_needed = ::ExistentialDeposit::get() + + buy_execution_fee_amount.into() + + bridge_deposit.into(); + + // SA of source location needs to have some required balance + let source_account_id = LocationToAccountId::convert_location(&source).expect("valid location"); + let _ = >::mint_into(&source_account_id, balance_needed) + .expect("mint_into passes"); // close bridge with `Transact` call let close_bridge_call = RuntimeCallOf::::from(BridgeXcmOverBridgeCall::< Runtime, XcmOverBridgePalletInstance, >::close_bridge { - bridge_destination_universal_location: Box::new( - bridge_destination_universal_location.into(), - ), + bridge_destination_universal_location: Box::new(destination.into()), may_prune_messages: 16, }); // execute XCM as source origin would do with `Transact -> Origin::Xcm` - assert_ok!(RuntimeHelper::::execute_as_origin( - (origin, origin_kind), + assert_ok!(RuntimeHelper::::execute_as_origin_xcm( + source.clone(), close_bridge_call, - maybe_paid_execution + buy_execution_fee ) .ensure_complete()); diff --git a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/mod.rs b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/mod.rs index f96d0bf405b9..ad6db0b83e80 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/mod.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/mod.rs @@ -654,10 +654,8 @@ where pub fn open_and_close_bridge_works( collator_session_key: CollatorSessionKeys, runtime_para_id: u32, - expected_source: Location, + source: Location, destination: InteriorLocation, - origin_with_origin_kind: (Location, OriginKind), - is_paid_xcm_execution: bool, ) where Runtime: BasicParachainRuntime + BridgeXcmOverBridgeConfig, XcmOverBridgePalletInstance: 'static, @@ -671,7 +669,7 @@ pub fn open_and_close_bridge_works(collator_session_key, runtime_para_id, vec![], || { // construct expected bridge configuration let locations = pallet_xcm_bridge_hub::Pallet::::bridge_locations( - expected_source.clone().into(), + source.clone().into(), destination.clone().into(), ).expect("valid bridge locations"); let expected_lane_id = @@ -706,7 +704,7 @@ pub fn open_and_close_bridge_works( - expected_source.clone(), + source.clone(), destination.clone(), - is_paid_xcm_execution, - |locations, maybe_paid_execution| open_bridge_with_extrinsic::< - Runtime, - XcmOverBridgePalletInstance, - >( - origin_with_origin_kind.clone(), - locations.bridge_destination_universal_location().clone(), - maybe_paid_execution - ) + open_bridge_with_extrinsic:: ) .0 .bridge_id(), @@ -737,7 +727,7 @@ pub fn open_and_close_bridge_works(expected_source, destination, origin_with_origin_kind, is_paid_xcm_execution); + >(source.clone(), destination); // check bridge/lane DOES not exist assert_eq!( diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml b/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml index 9c70b65060dd..e03fc934ceaf 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml @@ -5,8 +5,6 @@ authors.workspace = true edition.workspace = true license = "Apache-2.0" description = "Westend Collectives Parachain Runtime" -homepage.workspace = true -repository.workspace = true [lints] workspace = true @@ -26,19 +24,15 @@ frame-system = { workspace = true } frame-system-benchmarking = { optional = true, workspace = true } frame-system-rpc-runtime-api = { workspace = true } frame-try-runtime = { optional = true, workspace = true } -pallet-alliance = { workspace = true } pallet-asset-rate = { workspace = true } +pallet-alliance = { workspace = true } pallet-aura = { workspace = true } pallet-authorship = { workspace = true } pallet-balances = { workspace = true } pallet-collective = { workspace = true } -pallet-core-fellowship = { workspace = true } pallet-multisig = { workspace = true } pallet-preimage = { workspace = true } pallet-proxy = { workspace = true } -pallet-ranked-collective = { workspace = true } -pallet-referenda = { workspace = true } -pallet-salary = { workspace = true } pallet-scheduler = { workspace = true } pallet-session = { workspace = true } pallet-state-trie-migration = { workspace = true } @@ -47,14 +41,18 @@ pallet-transaction-payment = { workspace = true } pallet-transaction-payment-rpc-runtime-api = { workspace = true } pallet-treasury = { workspace = true } pallet-utility = { workspace = true } +pallet-referenda = { workspace = true } +pallet-ranked-collective = { workspace = true } +pallet-core-fellowship = { workspace = true } +pallet-salary = { workspace = true } sp-api = { workspace = true } sp-arithmetic = { workspace = true } sp-block-builder = { workspace = true } sp-consensus-aura = { workspace = true } sp-core = { workspace = true } +sp-keyring = { workspace = true } sp-genesis-builder = { workspace = true } sp-inherents = { workspace = true } -sp-keyring = { workspace = true } sp-offchain = { workspace = true } sp-runtime = { workspace = true } sp-session = { workspace = true } @@ -67,23 +65,23 @@ sp-version = { workspace = true } pallet-xcm = { workspace = true } polkadot-parachain-primitives = { workspace = true } polkadot-runtime-common = { workspace = true } -westend-runtime-constants = { workspace = true } xcm = { workspace = true } xcm-builder = { workspace = true } xcm-executor = { workspace = true } +westend-runtime-constants = { workspace = true } xcm-runtime-apis = { workspace = true } # Cumulus cumulus-pallet-aura-ext = { workspace = true } +pallet-message-queue = { workspace = true } cumulus-pallet-parachain-system = { workspace = true } cumulus-pallet-session-benchmarking = { workspace = true } cumulus-pallet-xcm = { workspace = true } cumulus-pallet-xcmp-queue = { workspace = true } cumulus-primitives-aura = { workspace = true } cumulus-primitives-core = { workspace = true } -cumulus-primitives-storage-weight-reclaim = { workspace = true } cumulus-primitives-utility = { workspace = true } -pallet-message-queue = { workspace = true } +cumulus-primitives-storage-weight-reclaim = { workspace = true } pallet-collator-selection = { workspace = true } pallet-collective-content = { workspace = true } @@ -95,7 +93,6 @@ testnet-parachains-constants = { features = ["westend"], workspace = true } substrate-wasm-builder = { optional = true, workspace = true, default-features = true } [dev-dependencies] -parachains-runtimes-test-utils = { workspace = true, default-features = true } sp-io = { features = ["std"], workspace = true, default-features = true } [features] @@ -138,7 +135,6 @@ runtime-benchmarks = [ "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", "xcm-runtime-apis/runtime-benchmarks", - "xcm/runtime-benchmarks", ] try-runtime = [ "cumulus-pallet-aura-ext/try-runtime", diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/genesis_config_presets.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/genesis_config_presets.rs index 007ff6164a74..77e971ff8ad7 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/genesis_config_presets.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/genesis_config_presets.rs @@ -18,7 +18,6 @@ use crate::*; use alloc::{vec, vec::Vec}; use cumulus_primitives_core::ParaId; -use frame_support::build_struct_json_patch; use parachains_common::{AccountId, AuraId}; use sp_genesis_builder::PresetId; use sp_keyring::Sr25519Keyring; @@ -31,7 +30,7 @@ fn collectives_westend_genesis( endowed_accounts: Vec, id: ParaId, ) -> serde_json::Value { - build_struct_json_patch!(RuntimeGenesisConfig { + let config = RuntimeGenesisConfig { balances: BalancesConfig { balances: endowed_accounts .iter() @@ -39,10 +38,11 @@ fn collectives_westend_genesis( .map(|k| (k, COLLECTIVES_WESTEND_ED * 4096)) .collect::>(), }, - parachain_info: ParachainInfoConfig { parachain_id: id }, + parachain_info: ParachainInfoConfig { parachain_id: id, ..Default::default() }, collator_selection: CollatorSelectionConfig { invulnerables: invulnerables.iter().cloned().map(|(acc, _)| acc).collect(), candidacy_bond: COLLECTIVES_WESTEND_ED * 16, + ..Default::default() }, session: SessionConfig { keys: invulnerables @@ -55,9 +55,16 @@ fn collectives_westend_genesis( ) }) .collect(), + ..Default::default() }, - polkadot_xcm: PolkadotXcmConfig { safe_xcm_version: Some(SAFE_XCM_VERSION) }, - }) + polkadot_xcm: PolkadotXcmConfig { + safe_xcm_version: Some(SAFE_XCM_VERSION), + ..Default::default() + }, + ..Default::default() + }; + + serde_json::to_value(config).expect("Could not build genesis config.") } /// Provides the JSON representation of predefined genesis config for given `id`. diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs index d3cd285ba67a..c3e105a84fb6 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs @@ -126,7 +126,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: alloc::borrow::Cow::Borrowed("collectives-westend"), impl_name: alloc::borrow::Cow::Borrowed("collectives-westend"), authoring_version: 1, - spec_version: 1_017_001, + spec_version: 1_016_001, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 6, @@ -258,7 +258,6 @@ impl pallet_multisig::Config for Runtime { type DepositFactor = DepositFactor; type MaxSignatories = ConstU32<100>; type WeightInfo = weights::pallet_multisig::WeightInfo; - type BlockNumberProvider = frame_system::Pallet; } impl pallet_utility::Config for Runtime { @@ -383,7 +382,6 @@ impl pallet_proxy::Config for Runtime { type CallHasher = BlakeTwo256; type AnnouncementDepositBase = AnnouncementDepositBase; type AnnouncementDepositFactor = AnnouncementDepositFactor; - type BlockNumberProvider = frame_system::Pallet; } parameter_types! { @@ -963,8 +961,7 @@ impl_runtime_apis! { } fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result { - let latest_asset_id: Result = asset.clone().try_into(); - match latest_asset_id { + match asset.try_as::() { Ok(asset_id) if asset_id.0 == xcm_config::WndLocation::get() => { // for native token Ok(WeightToFee::weight_to_fee(&weight)) @@ -1139,8 +1136,18 @@ impl_runtime_apis! { } } - use frame_support::traits::WhitelistedStorageKeys; - let whitelist: Vec = AllPalletsWithSystem::whitelisted_storage_keys(); + let whitelist: Vec = vec![ + // Block Number + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(), + // Total Issuance + hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec().into(), + // Execution Phase + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec().into(), + // Event Count + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec().into(), + // System Events + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7").to_vec().into(), + ]; let mut batches = Vec::::new(); let params = (&config, &whitelist); diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_xcm.rs index ccf88873c2cd..5d427d850046 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_xcm.rs @@ -17,27 +17,25 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-12-18, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `47a5bbdc8de3`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-westend-dev")`, DB CACHE: 1024 // Executed Command: // target/production/polkadot-parachain // benchmark // pallet -// --extrinsic=* -// --chain=collectives-westend-dev -// --pallet=pallet_xcm -// --header=/__w/polkadot-sdk/polkadot-sdk/cumulus/file_header.txt -// --output=./cumulus/parachains/runtimes/collectives/collectives-westend/src/weights -// --wasm-execution=compiled // --steps=50 // --repeat=20 +// --extrinsic=* +// --wasm-execution=compiled // --heap-pages=4096 -// --no-storage-info -// --no-min-squares -// --no-median-slopes +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_xcm +// --chain=collectives-westend-dev +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,8 +48,6 @@ use core::marker::PhantomData; /// Weight functions for `pallet_xcm`. pub struct WeightInfo(PhantomData); impl pallet_xcm::WeightInfo for WeightInfo { - /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) - /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) @@ -66,18 +62,16 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn send() -> Weight { // Proof Size summary in bytes: - // Measured: `214` - // Estimated: `3679` - // Minimum execution time: 32_779_000 picoseconds. - Weight::from_parts(33_417_000, 0) - .saturating_add(Weight::from_parts(0, 3679)) - .saturating_add(T::DbWeight::get().reads(7)) + // Measured: `145` + // Estimated: `3610` + // Minimum execution time: 21_813_000 picoseconds. + Weight::from_parts(22_332_000, 0) + .saturating_add(Weight::from_parts(0, 3610)) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `PolkadotXcm::ShouldRecordXcm` (r:1 w:0) - /// Proof: `PolkadotXcm::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) @@ -96,10 +90,10 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `214` // Estimated: `3679` - // Minimum execution time: 116_031_000 picoseconds. - Weight::from_parts(118_863_000, 0) + // Minimum execution time: 93_243_000 picoseconds. + Weight::from_parts(95_650_000, 0) .saturating_add(Weight::from_parts(0, 3679)) - .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) } /// Storage: `Benchmark::Override` (r:0 w:0) @@ -114,8 +108,6 @@ impl pallet_xcm::WeightInfo for WeightInfo { } /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `PolkadotXcm::ShouldRecordXcm` (r:1 w:0) - /// Proof: `PolkadotXcm::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) @@ -134,22 +126,21 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `214` // Estimated: `3679` - // Minimum execution time: 116_267_000 picoseconds. - Weight::from_parts(119_519_000, 0) + // Minimum execution time: 96_199_000 picoseconds. + Weight::from_parts(98_620_000, 0) .saturating_add(Weight::from_parts(0, 3679)) - .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: `PolkadotXcm::ShouldRecordXcm` (r:1 w:0) - /// Proof: `PolkadotXcm::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Benchmark::Override` (r:0 w:0) + /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) fn execute() -> Weight { // Proof Size summary in bytes: - // Measured: `103` - // Estimated: `1588` - // Minimum execution time: 12_718_000 picoseconds. - Weight::from_parts(13_572_000, 0) - .saturating_add(Weight::from_parts(0, 1588)) - .saturating_add(T::DbWeight::get().reads(1)) + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. + Weight::from_parts(18_446_744_073_709_551_000, 0) + .saturating_add(Weight::from_parts(0, 0)) } /// Storage: `PolkadotXcm::SupportedVersion` (r:0 w:1) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -157,8 +148,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_568_000 picoseconds. - Weight::from_parts(7_913_000, 0) + // Minimum execution time: 6_442_000 picoseconds. + Weight::from_parts(6_682_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -168,8 +159,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_225_000 picoseconds. - Weight::from_parts(2_473_000, 0) + // Minimum execution time: 1_833_000 picoseconds. + Weight::from_parts(1_973_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -195,8 +186,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 35_869_000 picoseconds. - Weight::from_parts(37_848_000, 0) + // Minimum execution time: 27_318_000 picoseconds. + Weight::from_parts(28_224_000, 0) .saturating_add(Weight::from_parts(0, 3610)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(5)) @@ -221,8 +212,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `363` // Estimated: `3828` - // Minimum execution time: 38_649_000 picoseconds. - Weight::from_parts(39_842_000, 0) + // Minimum execution time: 29_070_000 picoseconds. + Weight::from_parts(30_205_000, 0) .saturating_add(Weight::from_parts(0, 3828)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(4)) @@ -233,45 +224,45 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_223_000 picoseconds. - Weight::from_parts(2_483_000, 0) + // Minimum execution time: 1_904_000 picoseconds. + Weight::from_parts(2_033_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `PolkadotXcm::SupportedVersion` (r:6 w:2) + /// Storage: `PolkadotXcm::SupportedVersion` (r:5 w:2) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_supported_version() -> Weight { // Proof Size summary in bytes: // Measured: `159` - // Estimated: `15999` - // Minimum execution time: 24_164_000 picoseconds. - Weight::from_parts(24_972_000, 0) - .saturating_add(Weight::from_parts(0, 15999)) - .saturating_add(T::DbWeight::get().reads(6)) + // Estimated: `13524` + // Minimum execution time: 18_348_000 picoseconds. + Weight::from_parts(18_853_000, 0) + .saturating_add(Weight::from_parts(0, 13524)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifiers` (r:6 w:2) + /// Storage: `PolkadotXcm::VersionNotifiers` (r:5 w:2) /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notifiers() -> Weight { // Proof Size summary in bytes: // Measured: `163` - // Estimated: `16003` - // Minimum execution time: 24_604_000 picoseconds. - Weight::from_parts(25_047_000, 0) - .saturating_add(Weight::from_parts(0, 16003)) - .saturating_add(T::DbWeight::get().reads(6)) + // Estimated: `13528` + // Minimum execution time: 17_964_000 picoseconds. + Weight::from_parts(18_548_000, 0) + .saturating_add(Weight::from_parts(0, 13528)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:7 w:0) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:6 w:0) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn already_notified_target() -> Weight { // Proof Size summary in bytes: // Measured: `173` - // Estimated: `18488` - // Minimum execution time: 28_088_000 picoseconds. - Weight::from_parts(28_431_000, 0) - .saturating_add(Weight::from_parts(0, 18488)) - .saturating_add(T::DbWeight::get().reads(7)) + // Estimated: `16013` + // Minimum execution time: 19_708_000 picoseconds. + Weight::from_parts(20_157_000, 0) + .saturating_add(Weight::from_parts(0, 16013)) + .saturating_add(T::DbWeight::get().reads(6)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:2 w:1) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -291,36 +282,36 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `212` // Estimated: `6152` - // Minimum execution time: 33_814_000 picoseconds. - Weight::from_parts(34_741_000, 0) + // Minimum execution time: 26_632_000 picoseconds. + Weight::from_parts(27_314_000, 0) .saturating_add(Weight::from_parts(0, 6152)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:0) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:0) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn notify_target_migration_fail() -> Weight { // Proof Size summary in bytes: - // Measured: `176` - // Estimated: `13541` - // Minimum execution time: 18_242_000 picoseconds. - Weight::from_parts(18_636_000, 0) - .saturating_add(Weight::from_parts(0, 13541)) - .saturating_add(T::DbWeight::get().reads(5)) + // Measured: `206` + // Estimated: `11096` + // Minimum execution time: 11_929_000 picoseconds. + Weight::from_parts(12_304_000, 0) + .saturating_add(Weight::from_parts(0, 11096)) + .saturating_add(T::DbWeight::get().reads(4)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:6 w:2) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notify_targets() -> Weight { // Proof Size summary in bytes: // Measured: `170` - // Estimated: `16010` - // Minimum execution time: 24_249_000 picoseconds. - Weight::from_parts(24_768_000, 0) - .saturating_add(Weight::from_parts(0, 16010)) - .saturating_add(T::DbWeight::get().reads(6)) + // Estimated: `13535` + // Minimum execution time: 18_599_000 picoseconds. + Weight::from_parts(19_195_000, 0) + .saturating_add(Weight::from_parts(0, 13535)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:6 w:2) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) @@ -337,11 +328,11 @@ impl pallet_xcm::WeightInfo for WeightInfo { fn migrate_and_notify_old_targets() -> Weight { // Proof Size summary in bytes: // Measured: `212` - // Estimated: `16052` - // Minimum execution time: 47_602_000 picoseconds. - Weight::from_parts(48_378_000, 0) - .saturating_add(Weight::from_parts(0, 16052)) - .saturating_add(T::DbWeight::get().reads(12)) + // Estimated: `13577` + // Minimum execution time: 35_524_000 picoseconds. + Weight::from_parts(36_272_000, 0) + .saturating_add(Weight::from_parts(0, 13577)) + .saturating_add(T::DbWeight::get().reads(11)) .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) @@ -352,8 +343,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `103` // Estimated: `1588` - // Minimum execution time: 5_566_000 picoseconds. - Weight::from_parts(5_768_000, 0) + // Minimum execution time: 4_044_000 picoseconds. + Weight::from_parts(4_238_000, 0) .saturating_add(Weight::from_parts(0, 1588)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -364,24 +355,22 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7740` // Estimated: `11205` - // Minimum execution time: 30_821_000 picoseconds. - Weight::from_parts(31_250_000, 0) + // Minimum execution time: 25_741_000 picoseconds. + Weight::from_parts(26_301_000, 0) .saturating_add(Weight::from_parts(0, 11205)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `PolkadotXcm::ShouldRecordXcm` (r:1 w:0) - /// Proof: `PolkadotXcm::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::AssetTraps` (r:1 w:1) /// Proof: `PolkadotXcm::AssetTraps` (`max_values`: None, `max_size`: None, mode: `Measured`) fn claim_assets() -> Weight { // Proof Size summary in bytes: // Measured: `160` // Estimated: `3625` - // Minimum execution time: 43_463_000 picoseconds. - Weight::from_parts(44_960_000, 0) + // Minimum execution time: 35_925_000 picoseconds. + Weight::from_parts(36_978_000, 0) .saturating_add(Weight::from_parts(0, 3625)) - .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } } diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/xcm_config.rs index 9eb9b85a3918..56ef2e8ba02f 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/xcm_config.rs @@ -35,8 +35,7 @@ use polkadot_runtime_common::xcm_sender::ExponentialPrice; use westend_runtime_constants::xcm as xcm_constants; use xcm::latest::{prelude::*, WESTEND_GENESIS_HASH}; use xcm_builder::{ - AccountId32Aliases, AliasChildLocation, AliasOriginRootUsingFilter, - AllowExplicitUnpaidExecutionFrom, AllowHrmpNotificationsFromRelayChain, + AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowHrmpNotificationsFromRelayChain, AllowKnownQueryResponses, AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, DenyReserveTransferToRelayChain, DenyThenTry, DescribeAllTerminal, DescribeFamily, EnsureXcmOrigin, FixedWeightBounds, FrameTransactionalProcessor, FungibleAdapter, @@ -192,10 +191,6 @@ pub type WaivedLocations = ( /// - DOT with the parent Relay Chain and sibling parachains. pub type TrustedTeleporters = ConcreteAssetFromSystem; -/// We allow locations to alias into their own child locations, as well as -/// AssetHub to alias into anything. -pub type Aliasers = (AliasChildLocation, AliasOriginRootUsingFilter); - pub struct XcmConfig; impl xcm_executor::Config for XcmConfig { type RuntimeCall = RuntimeCall; @@ -232,7 +227,7 @@ impl xcm_executor::Config for XcmConfig { type UniversalAliases = Nothing; type CallDispatcher = RuntimeCall; type SafeCallFilter = Everything; - type Aliasers = Aliasers; + type Aliasers = Nothing; type TransactionalProcessor = FrameTransactionalProcessor; type HrmpNewChannelOpenRequestHandler = (); type HrmpChannelAcceptedHandler = (); diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/tests/tests.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/tests/tests.rs index c9191eba49f6..7add10559d84 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/tests/tests.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/tests/tests.rs @@ -16,9 +16,7 @@ #![cfg(test)] -use collectives_westend_runtime::{ - xcm_config::LocationToAccountId, Block, Runtime, RuntimeCall, RuntimeOrigin, -}; +use collectives_westend_runtime::xcm_config::LocationToAccountId; use parachains_common::AccountId; use sp_core::crypto::Ss58Codec; use xcm::latest::prelude::*; @@ -134,13 +132,3 @@ fn location_conversion_works() { assert_eq!(got, expected, "{}", tc.description); } } - -#[test] -fn xcm_payment_api_works() { - parachains_runtimes_test_utils::test_cases::xcm_payment_api_with_native_token_works::< - Runtime, - RuntimeCall, - RuntimeOrigin, - Block, - >(); -} diff --git a/cumulus/parachains/runtimes/constants/Cargo.toml b/cumulus/parachains/runtimes/constants/Cargo.toml index 01b023e0fb89..d54f1e7db6c1 100644 --- a/cumulus/parachains/runtimes/constants/Cargo.toml +++ b/cumulus/parachains/runtimes/constants/Cargo.toml @@ -5,8 +5,6 @@ authors.workspace = true edition.workspace = true description = "Common constants for Testnet Parachains runtimes" license = "Apache-2.0" -homepage.workspace = true -repository.workspace = true [lints] workspace = true diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml b/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml index cb0655d70cf2..c98ca7ba3e74 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml @@ -5,8 +5,6 @@ description = "Parachain testnet runtime for FRAME Contracts pallet." authors.workspace = true edition.workspace = true license = "Apache-2.0" -homepage.workspace = true -repository.workspace = true [lints] workspace = true @@ -24,37 +22,37 @@ log = { workspace = true } scale-info = { features = ["derive"], workspace = true } # Substrate +sp-api = { workspace = true } +sp-block-builder = { workspace = true } +sp-consensus-aura = { workspace = true } +sp-core = { workspace = true } +sp-genesis-builder = { workspace = true } +sp-inherents = { workspace = true } +sp-offchain = { workspace = true } +sp-runtime = { workspace = true } +sp-session = { workspace = true } +sp-storage = { workspace = true } +sp-transaction-pool = { workspace = true } +sp-version = { workspace = true } frame-benchmarking = { optional = true, workspace = true } +frame-try-runtime = { optional = true, workspace = true } frame-executive = { workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } frame-system-benchmarking = { optional = true, workspace = true } frame-system-rpc-runtime-api = { workspace = true } -frame-try-runtime = { optional = true, workspace = true } pallet-aura = { workspace = true } pallet-authorship = { workspace = true } -pallet-balances = { workspace = true } -pallet-contracts = { workspace = true } pallet-insecure-randomness-collective-flip = { workspace = true } +pallet-balances = { workspace = true } pallet-multisig = { workspace = true } pallet-session = { workspace = true } -pallet-sudo = { workspace = true } pallet-timestamp = { workspace = true } pallet-transaction-payment = { workspace = true } pallet-transaction-payment-rpc-runtime-api = { workspace = true } pallet-utility = { workspace = true } -sp-api = { workspace = true } -sp-block-builder = { workspace = true } -sp-consensus-aura = { workspace = true } -sp-core = { workspace = true } -sp-genesis-builder = { workspace = true } -sp-inherents = { workspace = true } -sp-offchain = { workspace = true } -sp-runtime = { workspace = true } -sp-session = { workspace = true } -sp-storage = { workspace = true } -sp-transaction-pool = { workspace = true } -sp-version = { workspace = true } +pallet-sudo = { workspace = true } +pallet-contracts = { workspace = true } # Polkadot pallet-xcm = { workspace = true } @@ -68,15 +66,15 @@ xcm-runtime-apis = { workspace = true } # Cumulus cumulus-pallet-aura-ext = { workspace = true } +pallet-message-queue = { workspace = true } cumulus-pallet-parachain-system = { workspace = true } cumulus-pallet-session-benchmarking = { workspace = true } cumulus-pallet-xcm = { workspace = true } cumulus-pallet-xcmp-queue = { workspace = true } cumulus-primitives-aura = { workspace = true } cumulus-primitives-core = { workspace = true } -cumulus-primitives-storage-weight-reclaim = { workspace = true } cumulus-primitives-utility = { workspace = true } -pallet-message-queue = { workspace = true } +cumulus-primitives-storage-weight-reclaim = { workspace = true } pallet-collator-selection = { workspace = true } parachain-info = { workspace = true } @@ -173,7 +171,6 @@ runtime-benchmarks = [ "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", "xcm-runtime-apis/runtime-benchmarks", - "xcm/runtime-benchmarks", ] try-runtime = [ diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs index be369565dba9..f661a8bdccfe 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs @@ -144,7 +144,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: alloc::borrow::Cow::Borrowed("contracts-rococo"), impl_name: alloc::borrow::Cow::Borrowed("contracts-rococo"), authoring_version: 1, - spec_version: 1_017_001, + spec_version: 1_016_001, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 7, @@ -268,7 +268,6 @@ impl pallet_multisig::Config for Runtime { type DepositFactor = DepositFactor; type MaxSignatories = ConstU32<100>; type WeightInfo = pallet_multisig::weights::SubstrateWeight; - type BlockNumberProvider = frame_system::Pallet; } impl pallet_utility::Config for Runtime { @@ -849,8 +848,18 @@ impl_runtime_apis! { } } - use frame_support::traits::WhitelistedStorageKeys; - let whitelist: Vec = AllPalletsWithSystem::whitelisted_storage_keys(); + let whitelist: Vec = vec![ + // Block Number + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(), + // Total Issuance + hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec().into(), + // Execution Phase + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec().into(), + // Event Count + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec().into(), + // System Events + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7").to_vec().into(), + ]; let mut batches = Vec::::new(); let params = (&config, &whitelist); diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/xcm_config.rs index 532ad4ff4ce0..0151837aa351 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/xcm_config.rs @@ -51,7 +51,6 @@ use xcm_builder::{ use xcm_executor::XcmExecutor; parameter_types! { - pub const RootLocation: Location = Location::here(); pub const RelayLocation: Location = Location::parent(); pub const RelayNetwork: NetworkId = NetworkId::ByGenesis(ROCOCO_GENESIS_HASH); pub RelayChainOrigin: RuntimeOrigin = cumulus_pallet_xcm::Origin::Relay.into(); @@ -167,7 +166,6 @@ pub type Barrier = TrailingSetTopicAsId< /// either execution or delivery. /// We only waive fees for system functions, which these locations represent. pub type WaivedLocations = ( - Equals, RelayOrOtherSystemParachains, Equals, ); diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml b/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml index 2b5fab329293..a38b7400cfa3 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml @@ -5,8 +5,6 @@ authors.workspace = true edition.workspace = true description = "Rococo's Coretime parachain runtime" license = "Apache-2.0" -homepage.workspace = true -repository.workspace = true [lints] workspace = true @@ -33,8 +31,8 @@ frame-try-runtime = { optional = true, workspace = true } pallet-aura = { workspace = true } pallet-authorship = { workspace = true } pallet-balances = { workspace = true } -pallet-broker = { workspace = true } pallet-message-queue = { workspace = true } +pallet-broker = { workspace = true } pallet-multisig = { workspace = true } pallet-proxy = { workspace = true } pallet-session = { workspace = true } @@ -47,8 +45,8 @@ sp-api = { workspace = true } sp-block-builder = { workspace = true } sp-consensus-aura = { workspace = true } sp-core = { workspace = true } -sp-genesis-builder = { workspace = true } sp-inherents = { workspace = true } +sp-genesis-builder = { workspace = true } sp-offchain = { workspace = true } sp-runtime = { workspace = true } sp-session = { workspace = true } @@ -75,16 +73,13 @@ cumulus-pallet-xcm = { workspace = true } cumulus-pallet-xcmp-queue = { workspace = true } cumulus-primitives-aura = { workspace = true } cumulus-primitives-core = { workspace = true } -cumulus-primitives-storage-weight-reclaim = { workspace = true } cumulus-primitives-utility = { workspace = true } +cumulus-primitives-storage-weight-reclaim = { workspace = true } pallet-collator-selection = { workspace = true } parachain-info = { workspace = true } parachains-common = { workspace = true } testnet-parachains-constants = { features = ["rococo"], workspace = true } -[dev-dependencies] -parachains-runtimes-test-utils = { workspace = true } - [features] default = ["std"] std = [ @@ -125,7 +120,6 @@ std = [ "pallet-xcm/std", "parachain-info/std", "parachains-common/std", - "parachains-runtimes-test-utils/std", "polkadot-parachain-primitives/std", "polkadot-runtime-common/std", "rococo-runtime-constants/std", @@ -180,7 +174,6 @@ runtime-benchmarks = [ "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", "xcm-runtime-apis/runtime-benchmarks", - "xcm/runtime-benchmarks", ] try-runtime = [ diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/coretime.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/coretime.rs index 35c3dd8836a8..d76ac443a147 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/coretime.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/coretime.rs @@ -135,7 +135,6 @@ impl CoretimeInterface for CoretimeAllocator { Instruction::Transact { origin_kind: OriginKind::Native, call: request_core_count_call.encode().into(), - fallback_max_weight: Some(Weight::from_parts(1_000_000_000, 200_000)), }, ]); @@ -165,7 +164,6 @@ impl CoretimeInterface for CoretimeAllocator { Instruction::Transact { origin_kind: OriginKind::Native, call: request_revenue_info_at_call.encode().into(), - fallback_max_weight: Some(Weight::from_parts(1_000_000_000, 200_000)), }, ]); @@ -194,7 +192,6 @@ impl CoretimeInterface for CoretimeAllocator { Instruction::Transact { origin_kind: OriginKind::Native, call: credit_account_call.encode().into(), - fallback_max_weight: Some(Weight::from_parts(1_000_000_000, 200_000)), }, ]); @@ -259,7 +256,6 @@ impl CoretimeInterface for CoretimeAllocator { Instruction::Transact { origin_kind: OriginKind::Native, call: assign_core_call.encode().into(), - fallback_max_weight: Some(Weight::from_parts(1_000_000_000, 200_000)), }, ]); diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs index c4d43e4361fa..a4ff48bfc0a0 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs @@ -68,7 +68,7 @@ use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; pub use sp_runtime::BuildStorage; use sp_runtime::{ generic, impl_opaque_keys, - traits::{BlakeTwo256, Block as BlockT, BlockNumberProvider}, + traits::{BlakeTwo256, Block as BlockT}, transaction_validity::{TransactionSource, TransactionValidity}, ApplyExtrinsicResult, DispatchError, MultiAddress, Perbill, RuntimeDebug, }; @@ -124,7 +124,6 @@ pub type Migrations = ( pallet_broker::migration::MigrateV0ToV1, pallet_broker::migration::MigrateV1ToV2, pallet_broker::migration::MigrateV2ToV3, - pallet_broker::migration::MigrateV3ToV4, // permanent pallet_xcm::migration::MigrateToLatestXcmVersion, ); @@ -150,7 +149,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: alloc::borrow::Cow::Borrowed("coretime-rococo"), impl_name: alloc::borrow::Cow::Borrowed("coretime-rococo"), authoring_version: 1, - spec_version: 1_017_001, + spec_version: 1_016_001, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 2, @@ -445,7 +444,6 @@ impl pallet_multisig::Config for Runtime { type DepositFactor = DepositFactor; type MaxSignatories = ConstU32<100>; type WeightInfo = weights::pallet_multisig::WeightInfo; - type BlockNumberProvider = frame_system::Pallet; } /// The type used to represent the kinds of proxying allowed. @@ -578,7 +576,6 @@ impl pallet_proxy::Config for Runtime { type CallHasher = BlakeTwo256; type AnnouncementDepositBase = AnnouncementDepositBase; type AnnouncementDepositFactor = AnnouncementDepositFactor; - type BlockNumberProvider = frame_system::Pallet; } impl pallet_utility::Config for Runtime { @@ -594,25 +591,6 @@ impl pallet_sudo::Config for Runtime { type WeightInfo = pallet_sudo::weights::SubstrateWeight; } -pub struct BrokerMigrationV4BlockConversion; - -impl pallet_broker::migration::v4::BlockToRelayHeightConversion - for BrokerMigrationV4BlockConversion -{ - fn convert_block_number_to_relay_height(input_block_number: u32) -> u32 { - let relay_height = pallet_broker::RCBlockNumberProviderOf::< - ::Coretime, - >::current_block_number(); - let parachain_block_number = frame_system::Pallet::::block_number(); - let offset = relay_height - parachain_block_number * 2; - offset + input_block_number * 2 - } - - fn convert_block_length_to_relay_length(input_block_length: u32) -> u32 { - input_block_length * 2 - } -} - // Create the runtime by composing the FRAME pallets that were previously configured. construct_runtime!( pub enum Runtime @@ -835,8 +813,7 @@ impl_runtime_apis! { } fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result { - let latest_asset_id: Result = asset.clone().try_into(); - match latest_asset_id { + match asset.try_as::() { Ok(asset_id) if asset_id.0 == xcm_config::RocRelayLocation::get() => { // for native token Ok(WeightToFee::weight_to_fee(&weight)) @@ -1140,8 +1117,18 @@ impl_runtime_apis! { type XcmBalances = pallet_xcm_benchmarks::fungible::Pallet::; type XcmGeneric = pallet_xcm_benchmarks::generic::Pallet::; - use frame_support::traits::WhitelistedStorageKeys; - let whitelist: Vec = AllPalletsWithSystem::whitelisted_storage_keys(); + let whitelist: Vec = vec![ + // Block Number + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(), + // Total Issuance + hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec().into(), + // Execution Phase + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec().into(), + // Event Count + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec().into(), + // System Events + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7").to_vec().into(), + ]; let mut batches = Vec::::new(); let params = (&config, &whitelist); diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_broker.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_broker.rs index 3e4bbf379c3f..35708f22de20 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_broker.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_broker.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_broker` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-12-11, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-06-25, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-acd6uxux-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-x5tnzzy-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("coretime-rococo-dev")`, DB CACHE: 1024 // Executed Command: @@ -54,8 +54,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_250_000 picoseconds. - Weight::from_parts(2_419_000, 0) + // Minimum execution time: 2_024_000 picoseconds. + Weight::from_parts(2_121_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -65,8 +65,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `10888` // Estimated: `13506` - // Minimum execution time: 25_785_000 picoseconds. - Weight::from_parts(26_335_000, 0) + // Minimum execution time: 21_654_000 picoseconds. + Weight::from_parts(22_591_000, 0) .saturating_add(Weight::from_parts(0, 13506)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -77,8 +77,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `12090` // Estimated: `13506` - // Minimum execution time: 24_549_000 picoseconds. - Weight::from_parts(25_010_000, 0) + // Minimum execution time: 20_769_000 picoseconds. + Weight::from_parts(21_328_000, 0) .saturating_add(Weight::from_parts(0, 13506)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -93,8 +93,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `466` // Estimated: `1951` - // Minimum execution time: 14_135_000 picoseconds. - Weight::from_parts(14_603_000, 0) + // Minimum execution time: 10_404_000 picoseconds. + Weight::from_parts(10_941_000, 0) .saturating_add(Weight::from_parts(0, 1951)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -121,8 +121,6 @@ impl pallet_broker::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::LastRelayChainBlockNumber` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Broker::InstaPoolIo` (r:3 w:3) /// Proof: `Broker::InstaPoolIo` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) - /// Storage: `Broker::AutoRenewals` (r:1 w:1) - /// Proof: `Broker::AutoRenewals` (`max_values`: Some(1), `max_size`: Some(1002), added: 1497, mode: `MaxEncodedLen`) /// Storage: `Broker::SaleInfo` (r:0 w:1) /// Proof: `Broker::SaleInfo` (`max_values`: Some(1), `max_size`: Some(57), added: 552, mode: `MaxEncodedLen`) /// Storage: `Broker::Status` (r:0 w:1) @@ -134,33 +132,31 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `12599` // Estimated: `15065 + n * (1 ±0)` - // Minimum execution time: 54_087_000 picoseconds. - Weight::from_parts(145_466_213, 0) + // Minimum execution time: 44_085_000 picoseconds. + Weight::from_parts(127_668_002, 0) .saturating_add(Weight::from_parts(0, 15065)) - // Standard Error: 2_407 - .saturating_add(Weight::from_parts(20_971, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(14)) - .saturating_add(T::DbWeight::get().writes(60)) + // Standard Error: 2_231 + .saturating_add(Weight::from_parts(20_604, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(13)) + .saturating_add(T::DbWeight::get().writes(59)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } /// Storage: `Broker::Status` (r:1 w:0) /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) /// Storage: `Broker::SaleInfo` (r:1 w:1) /// Proof: `Broker::SaleInfo` (`max_values`: Some(1), `max_size`: Some(57), added: 552, mode: `MaxEncodedLen`) - /// Storage: `ParachainSystem::ValidationData` (r:1 w:0) - /// Proof: `ParachainSystem::ValidationData` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Broker::Regions` (r:0 w:1) /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) fn purchase() -> Weight { // Proof Size summary in bytes: - // Measured: `437` + // Measured: `332` // Estimated: `3593` - // Minimum execution time: 58_341_000 picoseconds. - Weight::from_parts(59_505_000, 0) + // Minimum execution time: 45_100_000 picoseconds. + Weight::from_parts(46_263_000, 0) .saturating_add(Weight::from_parts(0, 3593)) - .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) } /// Storage: `Broker::Configuration` (r:1 w:0) @@ -173,18 +169,16 @@ impl pallet_broker::WeightInfo for WeightInfo { /// Proof: `Broker::PotentialRenewals` (`max_values`: None, `max_size`: Some(1233), added: 3708, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `ParachainSystem::ValidationData` (r:1 w:0) - /// Proof: `ParachainSystem::ValidationData` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Broker::Workplan` (r:0 w:1) /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) fn renew() -> Weight { // Proof Size summary in bytes: - // Measured: `658` + // Measured: `553` // Estimated: `4698` - // Minimum execution time: 92_983_000 picoseconds. - Weight::from_parts(99_237_000, 0) + // Minimum execution time: 65_944_000 picoseconds. + Weight::from_parts(68_666_000, 0) .saturating_add(Weight::from_parts(0, 4698)) - .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(5)) } /// Storage: `Broker::Regions` (r:1 w:1) @@ -193,8 +187,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `358` // Estimated: `3551` - // Minimum execution time: 17_512_000 picoseconds. - Weight::from_parts(18_099_000, 0) + // Minimum execution time: 13_794_000 picoseconds. + Weight::from_parts(14_450_000, 0) .saturating_add(Weight::from_parts(0, 3551)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -205,8 +199,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `358` // Estimated: `3551` - // Minimum execution time: 18_715_000 picoseconds. - Weight::from_parts(19_768_000, 0) + // Minimum execution time: 15_316_000 picoseconds. + Weight::from_parts(15_787_000, 0) .saturating_add(Weight::from_parts(0, 3551)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -217,8 +211,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `358` // Estimated: `3551` - // Minimum execution time: 20_349_000 picoseconds. - Weight::from_parts(21_050_000, 0) + // Minimum execution time: 16_375_000 picoseconds. + Weight::from_parts(17_113_000, 0) .saturating_add(Weight::from_parts(0, 3551)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(3)) @@ -235,8 +229,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `937` // Estimated: `4681` - // Minimum execution time: 31_876_000 picoseconds. - Weight::from_parts(33_536_000, 0) + // Minimum execution time: 25_952_000 picoseconds. + Weight::from_parts(27_198_000, 0) .saturating_add(Weight::from_parts(0, 4681)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) @@ -255,8 +249,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `1003` // Estimated: `5996` - // Minimum execution time: 39_500_000 picoseconds. - Weight::from_parts(40_666_000, 0) + // Minimum execution time: 31_790_000 picoseconds. + Weight::from_parts(32_920_000, 0) .saturating_add(Weight::from_parts(0, 5996)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(5)) @@ -270,13 +264,13 @@ impl pallet_broker::WeightInfo for WeightInfo { /// The range of component `m` is `[1, 3]`. fn claim_revenue(m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `671` + // Measured: `652` // Estimated: `6196 + m * (2520 ±0)` - // Minimum execution time: 65_843_000 picoseconds. - Weight::from_parts(65_768_512, 0) + // Minimum execution time: 56_286_000 picoseconds. + Weight::from_parts(56_946_240, 0) .saturating_add(Weight::from_parts(0, 6196)) - // Standard Error: 40_994 - .saturating_add(Weight::from_parts(2_084_877, 0).saturating_mul(m.into())) + // Standard Error: 44_472 + .saturating_add(Weight::from_parts(1_684_838, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(m.into()))) .saturating_add(T::DbWeight::get().writes(5)) @@ -296,11 +290,11 @@ impl pallet_broker::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn purchase_credit() -> Weight { // Proof Size summary in bytes: - // Measured: `323` - // Estimated: `3788` - // Minimum execution time: 73_250_000 picoseconds. - Weight::from_parts(75_059_000, 0) - .saturating_add(Weight::from_parts(0, 3788)) + // Measured: `322` + // Estimated: `3787` + // Minimum execution time: 64_967_000 picoseconds. + Weight::from_parts(66_504_000, 0) + .saturating_add(Weight::from_parts(0, 3787)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(3)) } @@ -312,8 +306,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `466` // Estimated: `3551` - // Minimum execution time: 55_088_000 picoseconds. - Weight::from_parts(65_329_000, 0) + // Minimum execution time: 37_552_000 picoseconds. + Weight::from_parts(46_263_000, 0) .saturating_add(Weight::from_parts(0, 3551)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) @@ -328,8 +322,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `463` // Estimated: `3533` - // Minimum execution time: 102_280_000 picoseconds. - Weight::from_parts(130_319_000, 0) + // Minimum execution time: 79_625_000 picoseconds. + Weight::from_parts(86_227_000, 0) .saturating_add(Weight::from_parts(0, 3533)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -344,10 +338,10 @@ impl pallet_broker::WeightInfo for WeightInfo { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn drop_history() -> Weight { // Proof Size summary in bytes: - // Measured: `979` + // Measured: `857` // Estimated: `3593` - // Minimum execution time: 78_195_000 picoseconds. - Weight::from_parts(105_946_000, 0) + // Minimum execution time: 88_005_000 picoseconds. + Weight::from_parts(92_984_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(1)) @@ -360,8 +354,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `957` // Estimated: `4698` - // Minimum execution time: 41_642_000 picoseconds. - Weight::from_parts(48_286_000, 0) + // Minimum execution time: 38_877_000 picoseconds. + Weight::from_parts(40_408_000, 0) .saturating_add(Weight::from_parts(0, 4698)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) @@ -377,13 +371,15 @@ impl pallet_broker::WeightInfo for WeightInfo { /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `n` is `[0, 1000]`. - fn request_core_count(_n: u32, ) -> Weight { + fn request_core_count(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `74` // Estimated: `3539` - // Minimum execution time: 23_727_000 picoseconds. - Weight::from_parts(25_029_439, 0) + // Minimum execution time: 20_581_000 picoseconds. + Weight::from_parts(21_610_297, 0) .saturating_add(Weight::from_parts(0, 3539)) + // Standard Error: 119 + .saturating_add(Weight::from_parts(144, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -394,11 +390,11 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `266` // Estimated: `1487` - // Minimum execution time: 7_887_000 picoseconds. - Weight::from_parts(8_477_863, 0) + // Minimum execution time: 6_079_000 picoseconds. + Weight::from_parts(6_540_110, 0) .saturating_add(Weight::from_parts(0, 1487)) - // Standard Error: 18 - .saturating_add(Weight::from_parts(76, 0).saturating_mul(n.into())) + // Standard Error: 14 + .saturating_add(Weight::from_parts(10, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -410,50 +406,36 @@ impl pallet_broker::WeightInfo for WeightInfo { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn process_revenue() -> Weight { // Proof Size summary in bytes: - // Measured: `461` + // Measured: `442` // Estimated: `6196` - // Minimum execution time: 52_505_000 picoseconds. - Weight::from_parts(53_392_000, 0) + // Minimum execution time: 42_947_000 picoseconds. + Weight::from_parts(43_767_000, 0) .saturating_add(Weight::from_parts(0, 6196)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(4)) } - /// Storage: `ParachainSystem::ValidationData` (r:1 w:0) - /// Proof: `ParachainSystem::ValidationData` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Broker::InstaPoolIo` (r:3 w:3) /// Proof: `Broker::InstaPoolIo` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) /// Storage: `Broker::Reservations` (r:1 w:0) /// Proof: `Broker::Reservations` (`max_values`: Some(1), `max_size`: Some(12021), added: 12516, mode: `MaxEncodedLen`) /// Storage: `Broker::Leases` (r:1 w:1) /// Proof: `Broker::Leases` (`max_values`: Some(1), `max_size`: Some(401), added: 896, mode: `MaxEncodedLen`) - /// Storage: `Broker::AutoRenewals` (r:1 w:1) - /// Proof: `Broker::AutoRenewals` (`max_values`: Some(1), `max_size`: Some(1002), added: 1497, mode: `MaxEncodedLen`) - /// Storage: `Broker::Configuration` (r:1 w:0) - /// Proof: `Broker::Configuration` (`max_values`: Some(1), `max_size`: Some(31), added: 526, mode: `MaxEncodedLen`) - /// Storage: `Broker::Status` (r:1 w:0) - /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) - /// Storage: `Broker::PotentialRenewals` (r:100 w:200) - /// Proof: `Broker::PotentialRenewals` (`max_values`: None, `max_size`: Some(1233), added: 3708, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:101 w:101) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Broker::SaleInfo` (r:0 w:1) /// Proof: `Broker::SaleInfo` (`max_values`: Some(1), `max_size`: Some(57), added: 552, mode: `MaxEncodedLen`) - /// Storage: `Broker::Workplan` (r:0 w:1000) + /// Storage: `Broker::Workplan` (r:0 w:60) /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 1000]`. fn rotate_sale(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `32497` - // Estimated: `233641 + n * (198 ±9)` - // Minimum execution time: 28_834_000 picoseconds. - Weight::from_parts(2_467_159_777, 0) - .saturating_add(Weight::from_parts(0, 233641)) - // Standard Error: 149_483 - .saturating_add(Weight::from_parts(4_045_956, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(126)) - .saturating_add(T::DbWeight::get().writes(181)) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) - .saturating_add(Weight::from_parts(0, 198).saturating_mul(n.into())) + // Measured: `12514` + // Estimated: `13506` + // Minimum execution time: 93_426_000 picoseconds. + Weight::from_parts(96_185_447, 0) + .saturating_add(Weight::from_parts(0, 13506)) + // Standard Error: 116 + .saturating_add(Weight::from_parts(4, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(65)) } /// Storage: `Broker::InstaPoolIo` (r:1 w:0) /// Proof: `Broker::InstaPoolIo` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) @@ -463,8 +445,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `42` // Estimated: `3493` - // Minimum execution time: 7_689_000 picoseconds. - Weight::from_parts(7_988_000, 0) + // Minimum execution time: 5_842_000 picoseconds. + Weight::from_parts(6_077_000, 0) .saturating_add(Weight::from_parts(0, 3493)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -487,8 +469,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `1321` // Estimated: `4786` - // Minimum execution time: 37_394_000 picoseconds. - Weight::from_parts(38_379_000, 0) + // Minimum execution time: 33_278_000 picoseconds. + Weight::from_parts(34_076_000, 0) .saturating_add(Weight::from_parts(0, 4786)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(4)) @@ -507,8 +489,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `74` // Estimated: `3539` - // Minimum execution time: 19_203_000 picoseconds. - Weight::from_parts(19_797_000, 0) + // Minimum execution time: 15_779_000 picoseconds. + Weight::from_parts(16_213_000, 0) .saturating_add(Weight::from_parts(0, 3539)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -519,8 +501,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_129_000 picoseconds. - Weight::from_parts(2_266_000, 0) + // Minimum execution time: 1_774_000 picoseconds. + Weight::from_parts(1_873_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -530,8 +512,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_233_000 picoseconds. - Weight::from_parts(2_351_000, 0) + // Minimum execution time: 1_858_000 picoseconds. + Weight::from_parts(1_991_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -549,38 +531,20 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `408` // Estimated: `1893` - // Minimum execution time: 15_716_000 picoseconds. - Weight::from_parts(16_160_000, 0) + // Minimum execution time: 10_874_000 picoseconds. + Weight::from_parts(11_265_000, 0) .saturating_add(Weight::from_parts(0, 1893)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `Broker::SaleInfo` (r:1 w:0) - /// Proof: `Broker::SaleInfo` (`max_values`: Some(1), `max_size`: Some(57), added: 552, mode: `MaxEncodedLen`) - /// Storage: `Broker::Reservations` (r:1 w:1) - /// Proof: `Broker::Reservations` (`max_values`: Some(1), `max_size`: Some(12021), added: 12516, mode: `MaxEncodedLen`) - /// Storage: `Broker::Status` (r:1 w:0) - /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) - /// Storage: `Broker::Workplan` (r:0 w:2) - /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) - fn force_reserve() -> Weight { - // Proof Size summary in bytes: - // Measured: `11125` - // Estimated: `13506` - // Minimum execution time: 32_286_000 picoseconds. - Weight::from_parts(33_830_000, 0) - .saturating_add(Weight::from_parts(0, 13506)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(3)) - } /// Storage: `Broker::Leases` (r:1 w:1) /// Proof: `Broker::Leases` (`max_values`: Some(1), `max_size`: Some(401), added: 896, mode: `MaxEncodedLen`) fn swap_leases() -> Weight { // Proof Size summary in bytes: // Measured: `470` // Estimated: `1886` - // Minimum execution time: 8_887_000 picoseconds. - Weight::from_parts(9_178_000, 0) + // Minimum execution time: 6_525_000 picoseconds. + Weight::from_parts(6_769_000, 0) .saturating_add(Weight::from_parts(0, 1886)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -593,36 +557,36 @@ impl pallet_broker::WeightInfo for WeightInfo { /// Proof: `Broker::Configuration` (`max_values`: Some(1), `max_size`: Some(31), added: 526, mode: `MaxEncodedLen`) /// Storage: `Broker::Status` (r:1 w:0) /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:2 w:2) + /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `ParachainSystem::ValidationData` (r:1 w:0) - /// Proof: `ParachainSystem::ValidationData` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Authorship::Author` (r:1 w:0) + /// Proof: `Authorship::Author` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `System::Digest` (r:1 w:0) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Broker::AutoRenewals` (r:1 w:1) - /// Proof: `Broker::AutoRenewals` (`max_values`: Some(1), `max_size`: Some(1002), added: 1497, mode: `MaxEncodedLen`) + /// Proof: `Broker::AutoRenewals` (`max_values`: Some(1), `max_size`: Some(31), added: 526, mode: `MaxEncodedLen`) /// Storage: `Broker::Workplan` (r:0 w:1) /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) fn enable_auto_renew() -> Weight { // Proof Size summary in bytes: - // Measured: `2829` - // Estimated: `6196` - // Minimum execution time: 130_799_000 picoseconds. - Weight::from_parts(139_893_000, 0) - .saturating_add(Weight::from_parts(0, 6196)) - .saturating_add(T::DbWeight::get().reads(8)) - .saturating_add(T::DbWeight::get().writes(7)) + // Measured: `914` + // Estimated: `4698` + // Minimum execution time: 51_938_000 picoseconds. + Weight::from_parts(55_025_000, 4698) + .saturating_add(T::DbWeight::get().reads(8_u64)) + .saturating_add(T::DbWeight::get().writes(6_u64)) } /// Storage: `Broker::AutoRenewals` (r:1 w:1) - /// Proof: `Broker::AutoRenewals` (`max_values`: Some(1), `max_size`: Some(1002), added: 1497, mode: `MaxEncodedLen`) + /// Proof: `Broker::AutoRenewals` (`max_values`: Some(1), `max_size`: Some(31), added: 526, mode: `MaxEncodedLen`) fn disable_auto_renew() -> Weight { // Proof Size summary in bytes: - // Measured: `1307` - // Estimated: `2487` - // Minimum execution time: 22_945_000 picoseconds. - Weight::from_parts(24_855_000, 0) - .saturating_add(Weight::from_parts(0, 2487)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } + // Measured: `480` + // Estimated: `1516` + // Minimum execution time: 9_628_000 picoseconds. + Weight::from_parts(10_400_000, 1516) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) @@ -637,11 +601,11 @@ impl pallet_broker::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn on_new_timeslice() -> Weight { // Proof Size summary in bytes: - // Measured: `323` - // Estimated: `3788` - // Minimum execution time: 56_864_000 picoseconds. - Weight::from_parts(59_119_000, 0) - .saturating_add(Weight::from_parts(0, 3788)) + // Measured: `322` + // Estimated: `3787` + // Minimum execution time: 45_561_000 picoseconds. + Weight::from_parts(47_306_000, 0) + .saturating_add(Weight::from_parts(0, 3787)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(3)) } diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_xcm.rs index b2b8cd6e5349..7fb492173dad 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_xcm.rs @@ -17,27 +17,25 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-12-18, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-05-07, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `902e7ad7764b`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-unxyhko3-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("coretime-rococo-dev")`, DB CACHE: 1024 // Executed Command: // target/production/polkadot-parachain // benchmark // pallet -// --extrinsic=* -// --chain=coretime-rococo-dev -// --pallet=pallet_xcm -// --header=/__w/polkadot-sdk/polkadot-sdk/cumulus/file_header.txt -// --output=./cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights -// --wasm-execution=compiled // --steps=50 // --repeat=20 +// --extrinsic=* +// --wasm-execution=compiled // --heap-pages=4096 -// --no-storage-info -// --no-min-squares -// --no-median-slopes +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_xcm +// --chain=coretime-rococo-dev +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -64,16 +62,14 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `74` // Estimated: `3539` - // Minimum execution time: 23_660_000 picoseconds. - Weight::from_parts(24_537_000, 0) + // Minimum execution time: 19_121_000 picoseconds. + Weight::from_parts(19_582_000, 0) .saturating_add(Weight::from_parts(0, 3539)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `PolkadotXcm::ShouldRecordXcm` (r:1 w:0) - /// Proof: `PolkadotXcm::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -88,20 +84,18 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3571` - // Minimum execution time: 74_005_000 picoseconds. - Weight::from_parts(75_355_000, 0) + // Minimum execution time: 61_722_000 picoseconds. + Weight::from_parts(63_616_000, 0) .saturating_add(Weight::from_parts(0, 3571)) - .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `PolkadotXcm::ShouldRecordXcm` (r:1 w:0) - /// Proof: `PolkadotXcm::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Broker::Regions` (r:1 w:1) /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::DeliveryFeeFactor` (r:1 w:0) - /// Proof: `XcmpQueue::DeliveryFeeFactor` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) + /// Proof: `XcmpQueue::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -113,17 +107,17 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Storage: `ParachainSystem::RelevantMessagingState` (r:1 w:0) /// Proof: `ParachainSystem::RelevantMessagingState` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: Some(1282), added: 1777, mode: `MaxEncodedLen`) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `XcmpQueue::OutboundXcmpMessages` (r:0 w:1) - /// Proof: `XcmpQueue::OutboundXcmpMessages` (`max_values`: None, `max_size`: Some(105506), added: 107981, mode: `MaxEncodedLen`) + /// Proof: `XcmpQueue::OutboundXcmpMessages` (`max_values`: None, `max_size`: None, mode: `Measured`) fn reserve_transfer_assets() -> Weight { // Proof Size summary in bytes: // Measured: `377` // Estimated: `3842` - // Minimum execution time: 116_231_000 picoseconds. - Weight::from_parts(121_254_000, 0) + // Minimum execution time: 97_823_000 picoseconds. + Weight::from_parts(102_022_000, 0) .saturating_add(Weight::from_parts(0, 3842)) - .saturating_add(T::DbWeight::get().reads(10)) + .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(5)) } /// Storage: `Benchmark::Override` (r:0 w:0) @@ -136,16 +130,13 @@ impl pallet_xcm::WeightInfo for WeightInfo { Weight::from_parts(18_446_744_073_709_551_000, 0) .saturating_add(Weight::from_parts(0, 0)) } - /// Storage: `PolkadotXcm::ShouldRecordXcm` (r:1 w:0) - /// Proof: `PolkadotXcm::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn execute() -> Weight { // Proof Size summary in bytes: - // Measured: `32` - // Estimated: `1517` - // Minimum execution time: 11_498_000 picoseconds. - Weight::from_parts(11_867_000, 0) - .saturating_add(Weight::from_parts(0, 1517)) - .saturating_add(T::DbWeight::get().reads(1)) + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 8_397_000 picoseconds. + Weight::from_parts(8_773_000, 0) + .saturating_add(Weight::from_parts(0, 0)) } /// Storage: `PolkadotXcm::SupportedVersion` (r:0 w:1) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -153,8 +144,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_163_000 picoseconds. - Weight::from_parts(7_501_000, 0) + // Minimum execution time: 5_806_000 picoseconds. + Weight::from_parts(6_106_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -164,8 +155,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_188_000 picoseconds. - Weight::from_parts(2_356_000, 0) + // Minimum execution time: 1_802_000 picoseconds. + Weight::from_parts(1_939_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -189,8 +180,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `74` // Estimated: `3539` - // Minimum execution time: 30_503_000 picoseconds. - Weight::from_parts(31_361_000, 0) + // Minimum execution time: 24_300_000 picoseconds. + Weight::from_parts(25_359_000, 0) .saturating_add(Weight::from_parts(0, 3539)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(5)) @@ -213,8 +204,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `292` // Estimated: `3757` - // Minimum execution time: 35_562_000 picoseconds. - Weight::from_parts(36_710_000, 0) + // Minimum execution time: 27_579_000 picoseconds. + Weight::from_parts(28_414_000, 0) .saturating_add(Weight::from_parts(0, 3757)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) @@ -225,45 +216,45 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_223_000 picoseconds. - Weight::from_parts(2_432_000, 0) + // Minimum execution time: 1_762_000 picoseconds. + Weight::from_parts(1_884_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `PolkadotXcm::SupportedVersion` (r:6 w:2) + /// Storage: `PolkadotXcm::SupportedVersion` (r:5 w:2) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_supported_version() -> Weight { // Proof Size summary in bytes: // Measured: `89` - // Estimated: `15929` - // Minimum execution time: 21_863_000 picoseconds. - Weight::from_parts(22_213_000, 0) - .saturating_add(Weight::from_parts(0, 15929)) - .saturating_add(T::DbWeight::get().reads(6)) + // Estimated: `13454` + // Minimum execution time: 16_512_000 picoseconds. + Weight::from_parts(16_818_000, 0) + .saturating_add(Weight::from_parts(0, 13454)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifiers` (r:6 w:2) + /// Storage: `PolkadotXcm::VersionNotifiers` (r:5 w:2) /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notifiers() -> Weight { // Proof Size summary in bytes: // Measured: `93` - // Estimated: `15933` - // Minimum execution time: 22_044_000 picoseconds. - Weight::from_parts(22_548_000, 0) - .saturating_add(Weight::from_parts(0, 15933)) - .saturating_add(T::DbWeight::get().reads(6)) + // Estimated: `13458` + // Minimum execution time: 16_368_000 picoseconds. + Weight::from_parts(16_887_000, 0) + .saturating_add(Weight::from_parts(0, 13458)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:7 w:0) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:6 w:0) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn already_notified_target() -> Weight { // Proof Size summary in bytes: // Measured: `106` - // Estimated: `18421` - // Minimum execution time: 24_336_000 picoseconds. - Weight::from_parts(25_075_000, 0) - .saturating_add(Weight::from_parts(0, 18421)) - .saturating_add(T::DbWeight::get().reads(7)) + // Estimated: `15946` + // Minimum execution time: 17_661_000 picoseconds. + Weight::from_parts(17_963_000, 0) + .saturating_add(Weight::from_parts(0, 15946)) + .saturating_add(T::DbWeight::get().reads(6)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:2 w:1) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -281,36 +272,36 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `142` // Estimated: `6082` - // Minimum execution time: 30_160_000 picoseconds. - Weight::from_parts(30_807_000, 0) + // Minimum execution time: 24_498_000 picoseconds. + Weight::from_parts(25_339_000, 0) .saturating_add(Weight::from_parts(0, 6082)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:0) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:0) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn notify_target_migration_fail() -> Weight { // Proof Size summary in bytes: - // Measured: `109` - // Estimated: `13474` - // Minimum execution time: 16_129_000 picoseconds. - Weight::from_parts(16_686_000, 0) - .saturating_add(Weight::from_parts(0, 13474)) - .saturating_add(T::DbWeight::get().reads(5)) + // Measured: `136` + // Estimated: `11026` + // Minimum execution time: 10_675_000 picoseconds. + Weight::from_parts(11_106_000, 0) + .saturating_add(Weight::from_parts(0, 11026)) + .saturating_add(T::DbWeight::get().reads(4)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:6 w:2) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notify_targets() -> Weight { // Proof Size summary in bytes: // Measured: `100` - // Estimated: `15940` - // Minimum execution time: 21_844_000 picoseconds. - Weight::from_parts(22_452_000, 0) - .saturating_add(Weight::from_parts(0, 15940)) - .saturating_add(T::DbWeight::get().reads(6)) + // Estimated: `13465` + // Minimum execution time: 16_520_000 picoseconds. + Weight::from_parts(16_915_000, 0) + .saturating_add(Weight::from_parts(0, 13465)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:6 w:2) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -325,11 +316,11 @@ impl pallet_xcm::WeightInfo for WeightInfo { fn migrate_and_notify_old_targets() -> Weight { // Proof Size summary in bytes: // Measured: `142` - // Estimated: `15982` - // Minimum execution time: 42_336_000 picoseconds. - Weight::from_parts(43_502_000, 0) - .saturating_add(Weight::from_parts(0, 15982)) - .saturating_add(T::DbWeight::get().reads(11)) + // Estimated: `13507` + // Minimum execution time: 32_851_000 picoseconds. + Weight::from_parts(33_772_000, 0) + .saturating_add(Weight::from_parts(0, 13507)) + .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) @@ -340,8 +331,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `32` // Estimated: `1517` - // Minimum execution time: 4_682_000 picoseconds. - Weight::from_parts(4_902_000, 0) + // Minimum execution time: 3_373_000 picoseconds. + Weight::from_parts(3_534_000, 0) .saturating_add(Weight::from_parts(0, 1517)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -352,24 +343,22 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7669` // Estimated: `11134` - // Minimum execution time: 27_848_000 picoseconds. - Weight::from_parts(28_267_000, 0) + // Minimum execution time: 26_027_000 picoseconds. + Weight::from_parts(26_467_000, 0) .saturating_add(Weight::from_parts(0, 11134)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `PolkadotXcm::ShouldRecordXcm` (r:1 w:0) - /// Proof: `PolkadotXcm::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::AssetTraps` (r:1 w:1) /// Proof: `PolkadotXcm::AssetTraps` (`max_values`: None, `max_size`: None, mode: `Measured`) fn claim_assets() -> Weight { // Proof Size summary in bytes: // Measured: `90` // Estimated: `3555` - // Minimum execution time: 41_653_000 picoseconds. - Weight::from_parts(42_316_000, 0) + // Minimum execution time: 35_692_000 picoseconds. + Weight::from_parts(36_136_000, 0) .saturating_add(Weight::from_parts(0, 3555)) - .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } } diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/xcm/mod.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/xcm/mod.rs index dc21e2ea117f..48f1366e2c5f 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/xcm/mod.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/xcm/mod.rs @@ -22,7 +22,6 @@ use alloc::vec::Vec; use frame_support::weights::Weight; use pallet_xcm_benchmarks_fungible::WeightInfo as XcmFungibleWeight; use pallet_xcm_benchmarks_generic::WeightInfo as XcmGeneric; -use sp_runtime::BoundedVec; use xcm::{ latest::{prelude::*, AssetTransferFilter}, DoubleEncoded, @@ -85,11 +84,7 @@ impl XcmWeightInfo for CoretimeRococoXcmWeight { fn transfer_reserve_asset(assets: &Assets, _dest: &Location, _xcm: &Xcm<()>) -> Weight { assets.weigh_assets(XcmFungibleWeight::::transfer_reserve_asset()) } - fn transact( - _origin_type: &OriginKind, - _fallback_max_weight: &Option, - _call: &DoubleEncoded, - ) -> Weight { + fn transact(_origin_type: &OriginKind, _call: &DoubleEncoded) -> Weight { XcmGeneric::::transact() } fn hrmp_new_channel_open_request( @@ -256,18 +251,7 @@ impl XcmWeightInfo for CoretimeRococoXcmWeight { fn unpaid_execution(_: &WeightLimit, _: &Option) -> Weight { XcmGeneric::::unpaid_execution() } - fn set_hints(hints: &BoundedVec) -> Weight { - let mut weight = Weight::zero(); - for hint in hints { - match hint { - AssetClaimer { .. } => { - weight = weight.saturating_add(XcmGeneric::::asset_claimer()); - }, - } - } - weight - } - fn execute_with_origin(_: &Option, _: &Xcm) -> Weight { - XcmGeneric::::execute_with_origin() + fn set_asset_claimer(_location: &Location) -> Weight { + XcmGeneric::::set_asset_claimer() } } diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs index cdcba6134bf8..229dafb7c5ed 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs @@ -331,18 +331,11 @@ impl WeightInfo { // Minimum execution time: 650_000 picoseconds. Weight::from_parts(673_000, 0) } - pub fn asset_claimer() -> Weight { + pub fn set_asset_claimer() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` // Minimum execution time: 707_000 picoseconds. Weight::from_parts(749_000, 0) } - pub fn execute_with_origin() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 713_000 picoseconds. - Weight::from_parts(776_000, 0) - } } diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/xcm_config.rs index 33ad172962a1..37bf1e681447 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/xcm_config.rs @@ -52,7 +52,6 @@ use xcm_builder::{ use xcm_executor::XcmExecutor; parameter_types! { - pub const RootLocation: Location = Location::here(); pub const RocRelayLocation: Location = Location::parent(); pub const RelayNetwork: Option = Some(NetworkId::ByGenesis(ROCOCO_GENESIS_HASH)); pub RelayChainOrigin: RuntimeOrigin = cumulus_pallet_xcm::Origin::Relay.into(); @@ -178,7 +177,6 @@ parameter_types! { /// Locations that will not be charged fees in the executor, neither for execution nor delivery. /// We only waive fees for system functions, which these locations represent. pub type WaivedLocations = ( - Equals, RelayOrOtherSystemParachains, Equals, ); diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/tests/tests.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/tests/tests.rs index 89a593ab0f57..2cabce567b6e 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/tests/tests.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/tests/tests.rs @@ -16,9 +16,7 @@ #![cfg(test)] -use coretime_rococo_runtime::{ - xcm_config::LocationToAccountId, Block, Runtime, RuntimeCall, RuntimeOrigin, -}; +use coretime_rococo_runtime::xcm_config::LocationToAccountId; use parachains_common::AccountId; use sp_core::crypto::Ss58Codec; use xcm::latest::prelude::*; @@ -134,13 +132,3 @@ fn location_conversion_works() { assert_eq!(got, expected, "{}", tc.description); } } - -#[test] -fn xcm_payment_api_works() { - parachains_runtimes_test_utils::test_cases::xcm_payment_api_with_native_token_works::< - Runtime, - RuntimeCall, - RuntimeOrigin, - Block, - >(); -} diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml b/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml index 03df782bc266..149fa5d0b045 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml @@ -5,8 +5,6 @@ authors.workspace = true edition.workspace = true description = "Westend's Coretime parachain runtime" license = "Apache-2.0" -homepage.workspace = true -repository.workspace = true [lints] workspace = true @@ -33,8 +31,8 @@ frame-try-runtime = { optional = true, workspace = true } pallet-aura = { workspace = true } pallet-authorship = { workspace = true } pallet-balances = { workspace = true } -pallet-broker = { workspace = true } pallet-message-queue = { workspace = true } +pallet-broker = { workspace = true } pallet-multisig = { workspace = true } pallet-proxy = { workspace = true } pallet-session = { workspace = true } @@ -46,8 +44,8 @@ sp-api = { workspace = true } sp-block-builder = { workspace = true } sp-consensus-aura = { workspace = true } sp-core = { workspace = true } -sp-genesis-builder = { workspace = true } sp-inherents = { workspace = true } +sp-genesis-builder = { workspace = true } sp-offchain = { workspace = true } sp-runtime = { workspace = true } sp-session = { workspace = true } @@ -74,17 +72,14 @@ cumulus-pallet-xcm = { workspace = true } cumulus-pallet-xcmp-queue = { workspace = true } cumulus-primitives-aura = { workspace = true } cumulus-primitives-core = { workspace = true } -cumulus-primitives-storage-weight-reclaim = { workspace = true } cumulus-primitives-utility = { workspace = true } +cumulus-primitives-storage-weight-reclaim = { workspace = true } pallet-collator-selection = { workspace = true } parachain-info = { workspace = true } parachains-common = { workspace = true } testnet-parachains-constants = { features = ["westend"], workspace = true } -[dev-dependencies] -parachains-runtimes-test-utils = { workspace = true, default-features = true } - [features] default = ["std"] std = [ @@ -177,7 +172,6 @@ runtime-benchmarks = [ "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", "xcm-runtime-apis/runtime-benchmarks", - "xcm/runtime-benchmarks", ] try-runtime = [ diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/coretime.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/coretime.rs index 985e64fb76f9..f0c03849750a 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/coretime.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/coretime.rs @@ -127,12 +127,6 @@ impl CoretimeInterface for CoretimeAllocator { use crate::coretime::CoretimeProviderCalls::RequestCoreCount; let request_core_count_call = RelayRuntimePallets::Coretime(RequestCoreCount(count)); - // Weight for `request_core_count` from westend benchmarks: - // `ref_time` = 7889000 + (3 * 25000000) + (1 * 100000000) = 182889000 - // `proof_size` = 1636 - // Add 5% to each component and round to 2 significant figures. - let call_weight = Weight::from_parts(190_000_000, 1700); - let message = Xcm(vec![ Instruction::UnpaidExecution { weight_limit: WeightLimit::Unlimited, @@ -141,7 +135,6 @@ impl CoretimeInterface for CoretimeAllocator { Instruction::Transact { origin_kind: OriginKind::Native, call: request_core_count_call.encode().into(), - fallback_max_weight: Some(call_weight), }, ]); @@ -171,7 +164,6 @@ impl CoretimeInterface for CoretimeAllocator { Instruction::Transact { origin_kind: OriginKind::Native, call: request_revenue_info_at_call.encode().into(), - fallback_max_weight: Some(Weight::from_parts(1_000_000_000, 200_000)), }, ]); @@ -200,7 +192,6 @@ impl CoretimeInterface for CoretimeAllocator { Instruction::Transact { origin_kind: OriginKind::Native, call: credit_account_call.encode().into(), - fallback_max_weight: Some(Weight::from_parts(1_000_000_000, 200_000)), }, ]); @@ -225,12 +216,6 @@ impl CoretimeInterface for CoretimeAllocator { ) { use crate::coretime::CoretimeProviderCalls::AssignCore; - // Weight for `assign_core` from westend benchmarks: - // `ref_time` = 10177115 + (1 * 25000000) + (2 * 100000000) + (57600 * 13932) = 937660315 - // `proof_size` = 3612 - // Add 5% to each component and round to 2 significant figures. - let call_weight = Weight::from_parts(980_000_000, 3800); - // The relay chain currently only allows `assign_core` to be called with a complete mask // and only ever with increasing `begin`. The assignments must be truncated to avoid // dropping that core's assignment completely. @@ -271,7 +256,6 @@ impl CoretimeInterface for CoretimeAllocator { Instruction::Transact { origin_kind: OriginKind::Native, call: assign_core_call.encode().into(), - fallback_max_weight: Some(call_weight), }, ]); diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs index 431bfc8a63ba..edede5aeb46b 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs @@ -68,7 +68,7 @@ use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; pub use sp_runtime::BuildStorage; use sp_runtime::{ generic, impl_opaque_keys, - traits::{BlakeTwo256, Block as BlockT, BlockNumberProvider}, + traits::{BlakeTwo256, Block as BlockT}, transaction_validity::{TransactionSource, TransactionValidity}, ApplyExtrinsicResult, DispatchError, MultiAddress, Perbill, RuntimeDebug, }; @@ -124,7 +124,6 @@ pub type Migrations = ( pallet_broker::migration::MigrateV0ToV1, pallet_broker::migration::MigrateV1ToV2, pallet_broker::migration::MigrateV2ToV3, - pallet_broker::migration::MigrateV3ToV4, // permanent pallet_xcm::migration::MigrateToLatestXcmVersion, ); @@ -150,7 +149,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: alloc::borrow::Cow::Borrowed("coretime-westend"), impl_name: alloc::borrow::Cow::Borrowed("coretime-westend"), authoring_version: 1, - spec_version: 1_017_001, + spec_version: 1_016_001, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 2, @@ -446,7 +445,6 @@ impl pallet_multisig::Config for Runtime { type DepositFactor = DepositFactor; type MaxSignatories = ConstU32<100>; type WeightInfo = weights::pallet_multisig::WeightInfo; - type BlockNumberProvider = frame_system::Pallet; } /// The type used to represent the kinds of proxying allowed. @@ -579,7 +577,6 @@ impl pallet_proxy::Config for Runtime { type CallHasher = BlakeTwo256; type AnnouncementDepositBase = AnnouncementDepositBase; type AnnouncementDepositFactor = AnnouncementDepositFactor; - type BlockNumberProvider = frame_system::Pallet; } impl pallet_utility::Config for Runtime { @@ -589,25 +586,6 @@ impl pallet_utility::Config for Runtime { type WeightInfo = weights::pallet_utility::WeightInfo; } -pub struct BrokerMigrationV4BlockConversion; - -impl pallet_broker::migration::v4::BlockToRelayHeightConversion - for BrokerMigrationV4BlockConversion -{ - fn convert_block_number_to_relay_height(input_block_number: u32) -> u32 { - let relay_height = pallet_broker::RCBlockNumberProviderOf::< - ::Coretime, - >::current_block_number(); - let parachain_block_number = frame_system::Pallet::::block_number(); - let offset = relay_height - parachain_block_number * 2; - offset + input_block_number * 2 - } - - fn convert_block_length_to_relay_length(input_block_length: u32) -> u32 { - input_block_length * 2 - } -} - // Create the runtime by composing the FRAME pallets that were previously configured. construct_runtime!( pub enum Runtime @@ -827,8 +805,7 @@ impl_runtime_apis! { } fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result { - let latest_asset_id: Result = asset.clone().try_into(); - match latest_asset_id { + match asset.try_as::() { Ok(asset_id) if asset_id.0 == xcm_config::TokenRelayLocation::get() => { // for native token Ok(WeightToFee::weight_to_fee(&weight)) @@ -1126,17 +1103,25 @@ impl_runtime_apis! { } fn alias_origin() -> Result<(Location, Location), BenchmarkError> { - let origin = Location::new(1, [Parachain(1000)]); - let target = Location::new(1, [Parachain(1000), AccountId32 { id: [128u8; 32], network: None }]); - Ok((origin, target)) + Err(BenchmarkError::Skip) } } type XcmBalances = pallet_xcm_benchmarks::fungible::Pallet::; type XcmGeneric = pallet_xcm_benchmarks::generic::Pallet::; - use frame_support::traits::WhitelistedStorageKeys; - let whitelist: Vec = AllPalletsWithSystem::whitelisted_storage_keys(); + let whitelist: Vec = vec![ + // Block Number + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(), + // Total Issuance + hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec().into(), + // Execution Phase + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec().into(), + // Event Count + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec().into(), + // System Events + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7").to_vec().into(), + ]; let mut batches = Vec::::new(); let params = (&config, &whitelist); diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_broker.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_broker.rs index a0eee2d99efa..74b1c4e47029 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_broker.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_broker.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_broker` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-12-11, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-06-25, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-acd6uxux-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-x5tnzzy-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("coretime-westend-dev")`, DB CACHE: 1024 // Executed Command: @@ -54,8 +54,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_274_000 picoseconds. - Weight::from_parts(2_421_000, 0) + // Minimum execution time: 1_899_000 picoseconds. + Weight::from_parts(2_051_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -65,8 +65,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `10888` // Estimated: `13506` - // Minimum execution time: 26_257_000 picoseconds. - Weight::from_parts(26_802_000, 0) + // Minimum execution time: 21_965_000 picoseconds. + Weight::from_parts(22_774_000, 0) .saturating_add(Weight::from_parts(0, 13506)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -77,8 +77,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `12090` // Estimated: `13506` - // Minimum execution time: 24_692_000 picoseconds. - Weight::from_parts(25_275_000, 0) + // Minimum execution time: 20_748_000 picoseconds. + Weight::from_parts(21_464_000, 0) .saturating_add(Weight::from_parts(0, 13506)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -93,8 +93,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `146` // Estimated: `1631` - // Minimum execution time: 13_872_000 picoseconds. - Weight::from_parts(14_509_000, 0) + // Minimum execution time: 10_269_000 picoseconds. + Weight::from_parts(10_508_000, 0) .saturating_add(Weight::from_parts(0, 1631)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -121,8 +121,6 @@ impl pallet_broker::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::LastRelayChainBlockNumber` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Broker::InstaPoolIo` (r:3 w:3) /// Proof: `Broker::InstaPoolIo` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) - /// Storage: `Broker::AutoRenewals` (r:1 w:1) - /// Proof: `Broker::AutoRenewals` (`max_values`: Some(1), `max_size`: Some(201), added: 696, mode: `MaxEncodedLen`) /// Storage: `Broker::SaleInfo` (r:0 w:1) /// Proof: `Broker::SaleInfo` (`max_values`: Some(1), `max_size`: Some(57), added: 552, mode: `MaxEncodedLen`) /// Storage: `Broker::Status` (r:0 w:1) @@ -134,34 +132,32 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `12279` // Estimated: `14805 + n * (1 ±0)` - // Minimum execution time: 52_916_000 picoseconds. - Weight::from_parts(96_122_236, 0) + // Minimum execution time: 41_900_000 picoseconds. + Weight::from_parts(80_392_728, 0) .saturating_add(Weight::from_parts(0, 14805)) - // Standard Error: 969 - .saturating_add(Weight::from_parts(5_732, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(14)) - .saturating_add(T::DbWeight::get().writes(27)) + // Standard Error: 870 + .saturating_add(Weight::from_parts(4_361, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(13)) + .saturating_add(T::DbWeight::get().writes(26)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } /// Storage: `Broker::Status` (r:1 w:0) /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) /// Storage: `Broker::SaleInfo` (r:1 w:1) /// Proof: `Broker::SaleInfo` (`max_values`: Some(1), `max_size`: Some(57), added: 552, mode: `MaxEncodedLen`) - /// Storage: `ParachainSystem::ValidationData` (r:1 w:0) - /// Proof: `ParachainSystem::ValidationData` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `System::Account` (r:1 w:1) + /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Broker::Regions` (r:0 w:1) /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) fn purchase() -> Weight { // Proof Size summary in bytes: - // Measured: `437` + // Measured: `332` // Estimated: `3593` - // Minimum execution time: 56_955_000 picoseconds. - Weight::from_parts(59_005_000, 0) + // Minimum execution time: 40_911_000 picoseconds. + Weight::from_parts(43_102_000, 0) .saturating_add(Weight::from_parts(0, 3593)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `Broker::Configuration` (r:1 w:0) /// Proof: `Broker::Configuration` (`max_values`: Some(1), `max_size`: Some(31), added: 526, mode: `MaxEncodedLen`) @@ -173,18 +169,16 @@ impl pallet_broker::WeightInfo for WeightInfo { /// Proof: `Broker::PotentialRenewals` (`max_values`: None, `max_size`: Some(1233), added: 3708, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `ParachainSystem::ValidationData` (r:1 w:0) - /// Proof: `ParachainSystem::ValidationData` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Broker::Workplan` (r:0 w:1) /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) fn renew() -> Weight { // Proof Size summary in bytes: - // Measured: `658` + // Measured: `450` // Estimated: `4698` - // Minimum execution time: 108_853_000 picoseconds. - Weight::from_parts(117_467_000, 0) + // Minimum execution time: 70_257_000 picoseconds. + Weight::from_parts(73_889_000, 0) .saturating_add(Weight::from_parts(0, 4698)) - .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `Broker::Regions` (r:1 w:1) @@ -193,8 +187,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `358` // Estimated: `3551` - // Minimum execution time: 16_922_000 picoseconds. - Weight::from_parts(17_544_000, 0) + // Minimum execution time: 13_302_000 picoseconds. + Weight::from_parts(13_852_000, 0) .saturating_add(Weight::from_parts(0, 3551)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -205,8 +199,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `358` // Estimated: `3551` - // Minimum execution time: 18_762_000 picoseconds. - Weight::from_parts(19_162_000, 0) + // Minimum execution time: 14_927_000 picoseconds. + Weight::from_parts(15_553_000, 0) .saturating_add(Weight::from_parts(0, 3551)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -217,8 +211,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `358` // Estimated: `3551` - // Minimum execution time: 20_297_000 picoseconds. - Weight::from_parts(20_767_000, 0) + // Minimum execution time: 16_237_000 picoseconds. + Weight::from_parts(16_995_000, 0) .saturating_add(Weight::from_parts(0, 3551)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(3)) @@ -235,8 +229,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `736` // Estimated: `4681` - // Minimum execution time: 31_347_000 picoseconds. - Weight::from_parts(32_259_000, 0) + // Minimum execution time: 24_621_000 picoseconds. + Weight::from_parts(25_165_000, 0) .saturating_add(Weight::from_parts(0, 4681)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) @@ -255,8 +249,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `802` // Estimated: `5996` - // Minimum execution time: 38_310_000 picoseconds. - Weight::from_parts(39_777_000, 0) + // Minimum execution time: 29_832_000 picoseconds. + Weight::from_parts(30_894_000, 0) .saturating_add(Weight::from_parts(0, 5996)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(5)) @@ -270,13 +264,13 @@ impl pallet_broker::WeightInfo for WeightInfo { /// The range of component `m` is `[1, 3]`. fn claim_revenue(m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `671` + // Measured: `652` // Estimated: `6196 + m * (2520 ±0)` - // Minimum execution time: 65_960_000 picoseconds. - Weight::from_parts(66_194_985, 0) + // Minimum execution time: 55_390_000 picoseconds. + Weight::from_parts(56_124_789, 0) .saturating_add(Weight::from_parts(0, 6196)) - // Standard Error: 42_455 - .saturating_add(Weight::from_parts(1_808_497, 0).saturating_mul(m.into())) + // Standard Error: 41_724 + .saturating_add(Weight::from_parts(1_551_266, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(m.into()))) .saturating_add(T::DbWeight::get().writes(5)) @@ -296,11 +290,11 @@ impl pallet_broker::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn purchase_credit() -> Weight { // Proof Size summary in bytes: - // Measured: `321` - // Estimated: `3786` - // Minimum execution time: 69_918_000 picoseconds. - Weight::from_parts(72_853_000, 0) - .saturating_add(Weight::from_parts(0, 3786)) + // Measured: `320` + // Estimated: `3785` + // Minimum execution time: 59_759_000 picoseconds. + Weight::from_parts(61_310_000, 0) + .saturating_add(Weight::from_parts(0, 3785)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(3)) } @@ -312,8 +306,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `466` // Estimated: `3551` - // Minimum execution time: 44_775_000 picoseconds. - Weight::from_parts(58_978_000, 0) + // Minimum execution time: 37_007_000 picoseconds. + Weight::from_parts(51_927_000, 0) .saturating_add(Weight::from_parts(0, 3551)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) @@ -328,8 +322,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `463` // Estimated: `3533` - // Minimum execution time: 67_098_000 picoseconds. - Weight::from_parts(93_626_000, 0) + // Minimum execution time: 86_563_000 picoseconds. + Weight::from_parts(91_274_000, 0) .saturating_add(Weight::from_parts(0, 3533)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -344,10 +338,10 @@ impl pallet_broker::WeightInfo for WeightInfo { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn drop_history() -> Weight { // Proof Size summary in bytes: - // Measured: `979` + // Measured: `857` // Estimated: `3593` - // Minimum execution time: 89_463_000 picoseconds. - Weight::from_parts(113_286_000, 0) + // Minimum execution time: 93_655_000 picoseconds. + Weight::from_parts(98_160_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(1)) @@ -360,8 +354,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `556` // Estimated: `4698` - // Minimum execution time: 42_073_000 picoseconds. - Weight::from_parts(52_211_000, 0) + // Minimum execution time: 33_985_000 picoseconds. + Weight::from_parts(43_618_000, 0) .saturating_add(Weight::from_parts(0, 4698)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) @@ -377,26 +371,30 @@ impl pallet_broker::WeightInfo for WeightInfo { /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `n` is `[0, 1000]`. - fn request_core_count(_n: u32, ) -> Weight { + fn request_core_count(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `74` // Estimated: `3539` - // Minimum execution time: 22_937_000 picoseconds. - Weight::from_parts(23_898_154, 0) + // Minimum execution time: 18_778_000 picoseconds. + Weight::from_parts(19_543_425, 0) .saturating_add(Weight::from_parts(0, 3539)) + // Standard Error: 41 + .saturating_add(Weight::from_parts(33, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `Broker::CoreCountInbox` (r:1 w:1) /// Proof: `Broker::CoreCountInbox` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 1000]`. - fn process_core_count(_n: u32, ) -> Weight { + fn process_core_count(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `266` // Estimated: `1487` - // Minimum execution time: 7_650_000 picoseconds. - Weight::from_parts(8_166_809, 0) + // Minimum execution time: 5_505_000 picoseconds. + Weight::from_parts(5_982_015, 0) .saturating_add(Weight::from_parts(0, 1487)) + // Standard Error: 13 + .saturating_add(Weight::from_parts(44, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -404,54 +402,40 @@ impl pallet_broker::WeightInfo for WeightInfo { /// Proof: `Broker::RevenueInbox` (`max_values`: Some(1), `max_size`: Some(20), added: 515, mode: `MaxEncodedLen`) /// Storage: `Broker::InstaPoolHistory` (r:1 w:1) /// Proof: `Broker::InstaPoolHistory` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:2 w:2) + /// Storage: `System::Account` (r:2 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn process_revenue() -> Weight { // Proof Size summary in bytes: - // Measured: `461` + // Measured: `442` // Estimated: `6196` - // Minimum execution time: 53_023_000 picoseconds. - Weight::from_parts(54_564_000, 0) + // Minimum execution time: 38_128_000 picoseconds. + Weight::from_parts(40_979_000, 0) .saturating_add(Weight::from_parts(0, 6196)) .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(4)) + .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: `ParachainSystem::ValidationData` (r:1 w:0) - /// Proof: `ParachainSystem::ValidationData` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Broker::InstaPoolIo` (r:3 w:3) /// Proof: `Broker::InstaPoolIo` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) /// Storage: `Broker::Reservations` (r:1 w:0) /// Proof: `Broker::Reservations` (`max_values`: Some(1), `max_size`: Some(12021), added: 12516, mode: `MaxEncodedLen`) /// Storage: `Broker::Leases` (r:1 w:1) /// Proof: `Broker::Leases` (`max_values`: Some(1), `max_size`: Some(81), added: 576, mode: `MaxEncodedLen`) - /// Storage: `Broker::AutoRenewals` (r:1 w:1) - /// Proof: `Broker::AutoRenewals` (`max_values`: Some(1), `max_size`: Some(201), added: 696, mode: `MaxEncodedLen`) - /// Storage: `Broker::Configuration` (r:1 w:0) - /// Proof: `Broker::Configuration` (`max_values`: Some(1), `max_size`: Some(31), added: 526, mode: `MaxEncodedLen`) - /// Storage: `Broker::Status` (r:1 w:0) - /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) - /// Storage: `Broker::PotentialRenewals` (r:20 w:40) - /// Proof: `Broker::PotentialRenewals` (`max_values`: None, `max_size`: Some(1233), added: 3708, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:21 w:20) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Broker::SaleInfo` (r:0 w:1) /// Proof: `Broker::SaleInfo` (`max_values`: Some(1), `max_size`: Some(57), added: 552, mode: `MaxEncodedLen`) - /// Storage: `Broker::Workplan` (r:0 w:1000) + /// Storage: `Broker::Workplan` (r:0 w:20) /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 1000]`. fn rotate_sale(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `16480` - // Estimated: `69404 + n * (8 ±1)` - // Minimum execution time: 29_313_000 picoseconds. - Weight::from_parts(746_062_644, 0) - .saturating_add(Weight::from_parts(0, 69404)) - // Standard Error: 22_496 - .saturating_add(Weight::from_parts(1_545_204, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(44)) - .saturating_add(T::DbWeight::get().writes(57)) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) - .saturating_add(Weight::from_parts(0, 8).saturating_mul(n.into())) + // Measured: `12194` + // Estimated: `13506` + // Minimum execution time: 49_041_000 picoseconds. + Weight::from_parts(50_522_788, 0) + .saturating_add(Weight::from_parts(0, 13506)) + // Standard Error: 72 + .saturating_add(Weight::from_parts(78, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(25)) } /// Storage: `Broker::InstaPoolIo` (r:1 w:0) /// Proof: `Broker::InstaPoolIo` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) @@ -461,8 +445,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `42` // Estimated: `3493` - // Minimum execution time: 7_625_000 picoseconds. - Weight::from_parts(7_910_000, 0) + // Minimum execution time: 5_903_000 picoseconds. + Weight::from_parts(6_202_000, 0) .saturating_add(Weight::from_parts(0, 3493)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -485,8 +469,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `1321` // Estimated: `4786` - // Minimum execution time: 36_572_000 picoseconds. - Weight::from_parts(37_316_000, 0) + // Minimum execution time: 31_412_000 picoseconds. + Weight::from_parts(31_964_000, 0) .saturating_add(Weight::from_parts(0, 4786)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(4)) @@ -505,8 +489,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `74` // Estimated: `3539` - // Minimum execution time: 18_362_000 picoseconds. - Weight::from_parts(18_653_000, 0) + // Minimum execution time: 14_098_000 picoseconds. + Weight::from_parts(14_554_000, 0) .saturating_add(Weight::from_parts(0, 3539)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -517,8 +501,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_193_000 picoseconds. - Weight::from_parts(2_393_000, 0) + // Minimum execution time: 1_723_000 picoseconds. + Weight::from_parts(1_822_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -528,8 +512,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_344_000 picoseconds. - Weight::from_parts(2_486_000, 0) + // Minimum execution time: 1_865_000 picoseconds. + Weight::from_parts(1_983_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -547,38 +531,20 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `408` // Estimated: `1893` - // Minimum execution time: 15_443_000 picoseconds. - Weight::from_parts(15_753_000, 0) + // Minimum execution time: 10_387_000 picoseconds. + Weight::from_parts(10_819_000, 0) .saturating_add(Weight::from_parts(0, 1893)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `Broker::SaleInfo` (r:1 w:0) - /// Proof: `Broker::SaleInfo` (`max_values`: Some(1), `max_size`: Some(57), added: 552, mode: `MaxEncodedLen`) - /// Storage: `Broker::Reservations` (r:1 w:1) - /// Proof: `Broker::Reservations` (`max_values`: Some(1), `max_size`: Some(12021), added: 12516, mode: `MaxEncodedLen`) - /// Storage: `Broker::Status` (r:1 w:0) - /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) - /// Storage: `Broker::Workplan` (r:0 w:2) - /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) - fn force_reserve() -> Weight { - // Proof Size summary in bytes: - // Measured: `11125` - // Estimated: `13506` - // Minimum execution time: 31_464_000 picoseconds. - Weight::from_parts(32_798_000, 0) - .saturating_add(Weight::from_parts(0, 13506)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(3)) - } /// Storage: `Broker::Leases` (r:1 w:1) /// Proof: `Broker::Leases` (`max_values`: Some(1), `max_size`: Some(81), added: 576, mode: `MaxEncodedLen`) fn swap_leases() -> Weight { // Proof Size summary in bytes: // Measured: `150` // Estimated: `1566` - // Minimum execution time: 8_637_000 picoseconds. - Weight::from_parts(8_883_000, 0) + // Minimum execution time: 5_996_000 picoseconds. + Weight::from_parts(6_278_000, 0) .saturating_add(Weight::from_parts(0, 1566)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -591,44 +557,44 @@ impl pallet_broker::WeightInfo for WeightInfo { /// Proof: `Broker::Configuration` (`max_values`: Some(1), `max_size`: Some(31), added: 526, mode: `MaxEncodedLen`) /// Storage: `Broker::Status` (r:1 w:0) /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:2 w:1) + /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `ParachainSystem::ValidationData` (r:1 w:0) - /// Proof: `ParachainSystem::ValidationData` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Authorship::Author` (r:1 w:0) + /// Proof: `Authorship::Author` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `System::Digest` (r:1 w:0) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Broker::AutoRenewals` (r:1 w:1) - /// Proof: `Broker::AutoRenewals` (`max_values`: Some(1), `max_size`: Some(201), added: 696, mode: `MaxEncodedLen`) + /// Proof: `Broker::AutoRenewals` (`max_values`: Some(1), `max_size`: Some(31), added: 526, mode: `MaxEncodedLen`) /// Storage: `Broker::Workplan` (r:0 w:1) /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) fn enable_auto_renew() -> Weight { // Proof Size summary in bytes: - // Measured: `1451` - // Estimated: `6196` - // Minimum execution time: 120_585_000 picoseconds. - Weight::from_parts(148_755_000, 0) - .saturating_add(Weight::from_parts(0, 6196)) - .saturating_add(T::DbWeight::get().reads(8)) - .saturating_add(T::DbWeight::get().writes(6)) + // Measured: `914` + // Estimated: `4698` + // Minimum execution time: 51_938_000 picoseconds. + Weight::from_parts(55_025_000, 4698) + .saturating_add(T::DbWeight::get().reads(8_u64)) + .saturating_add(T::DbWeight::get().writes(6_u64)) } /// Storage: `Broker::AutoRenewals` (r:1 w:1) - /// Proof: `Broker::AutoRenewals` (`max_values`: Some(1), `max_size`: Some(201), added: 696, mode: `MaxEncodedLen`) + /// Proof: `Broker::AutoRenewals` (`max_values`: Some(1), `max_size`: Some(31), added: 526, mode: `MaxEncodedLen`) fn disable_auto_renew() -> Weight { // Proof Size summary in bytes: - // Measured: `506` - // Estimated: `1686` - // Minimum execution time: 18_235_000 picoseconds. - Weight::from_parts(19_113_000, 0) - .saturating_add(Weight::from_parts(0, 1686)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } + // Measured: `480` + // Estimated: `1516` + // Minimum execution time: 9_628_000 picoseconds. + Weight::from_parts(10_400_000, 1516) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn on_new_timeslice() -> Weight { // Proof Size summary in bytes: - // Measured: `103` + // Measured: `0` // Estimated: `3593` - // Minimum execution time: 4_863_000 picoseconds. - Weight::from_parts(5_045_000, 0) + // Minimum execution time: 2_187_000 picoseconds. + Weight::from_parts(2_372_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(1)) } diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_xcm.rs index 7659b8a1ac7e..fa588e982f09 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_xcm.rs @@ -17,27 +17,25 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-12-18, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-05-07, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `eded932c29e2`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-unxyhko3-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("coretime-westend-dev")`, DB CACHE: 1024 // Executed Command: // target/production/polkadot-parachain // benchmark // pallet -// --extrinsic=* -// --chain=coretime-westend-dev -// --pallet=pallet_xcm -// --header=/__w/polkadot-sdk/polkadot-sdk/cumulus/file_header.txt -// --output=./cumulus/parachains/runtimes/coretime/coretime-westend/src/weights -// --wasm-execution=compiled // --steps=50 // --repeat=20 +// --extrinsic=* +// --wasm-execution=compiled // --heap-pages=4096 -// --no-storage-info -// --no-min-squares -// --no-median-slopes +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_xcm +// --chain=coretime-westend-dev +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -64,16 +62,14 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `74` // Estimated: `3539` - // Minimum execution time: 23_956_000 picoseconds. - Weight::from_parts(24_860_000, 0) + // Minimum execution time: 18_707_000 picoseconds. + Weight::from_parts(19_391_000, 0) .saturating_add(Weight::from_parts(0, 3539)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `PolkadotXcm::ShouldRecordXcm` (r:1 w:0) - /// Proof: `PolkadotXcm::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -88,20 +84,18 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3571` - // Minimum execution time: 74_020_000 picoseconds. - Weight::from_parts(76_288_000, 0) + // Minimum execution time: 61_874_000 picoseconds. + Weight::from_parts(63_862_000, 0) .saturating_add(Weight::from_parts(0, 3571)) - .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `PolkadotXcm::ShouldRecordXcm` (r:1 w:0) - /// Proof: `PolkadotXcm::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Broker::Regions` (r:1 w:1) /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::DeliveryFeeFactor` (r:1 w:0) - /// Proof: `XcmpQueue::DeliveryFeeFactor` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) + /// Proof: `XcmpQueue::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -113,17 +107,17 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Storage: `ParachainSystem::RelevantMessagingState` (r:1 w:0) /// Proof: `ParachainSystem::RelevantMessagingState` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: Some(1282), added: 1777, mode: `MaxEncodedLen`) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `XcmpQueue::OutboundXcmpMessages` (r:0 w:1) - /// Proof: `XcmpQueue::OutboundXcmpMessages` (`max_values`: None, `max_size`: Some(105506), added: 107981, mode: `MaxEncodedLen`) + /// Proof: `XcmpQueue::OutboundXcmpMessages` (`max_values`: None, `max_size`: None, mode: `Measured`) fn reserve_transfer_assets() -> Weight { // Proof Size summary in bytes: // Measured: `377` // Estimated: `3842` - // Minimum execution time: 118_691_000 picoseconds. - Weight::from_parts(128_472_000, 0) + // Minimum execution time: 98_657_000 picoseconds. + Weight::from_parts(101_260_000, 0) .saturating_add(Weight::from_parts(0, 3842)) - .saturating_add(T::DbWeight::get().reads(10)) + .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(5)) } /// Storage: `Benchmark::Override` (r:0 w:0) @@ -136,16 +130,13 @@ impl pallet_xcm::WeightInfo for WeightInfo { Weight::from_parts(18_446_744_073_709_551_000, 0) .saturating_add(Weight::from_parts(0, 0)) } - /// Storage: `PolkadotXcm::ShouldRecordXcm` (r:1 w:0) - /// Proof: `PolkadotXcm::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn execute() -> Weight { // Proof Size summary in bytes: - // Measured: `32` - // Estimated: `1517` - // Minimum execution time: 11_608_000 picoseconds. - Weight::from_parts(12_117_000, 0) - .saturating_add(Weight::from_parts(0, 1517)) - .saturating_add(T::DbWeight::get().reads(1)) + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 8_455_000 picoseconds. + Weight::from_parts(8_842_000, 0) + .saturating_add(Weight::from_parts(0, 0)) } /// Storage: `PolkadotXcm::SupportedVersion` (r:0 w:1) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -153,8 +144,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_574_000 picoseconds. - Weight::from_parts(8_305_000, 0) + // Minimum execution time: 5_850_000 picoseconds. + Weight::from_parts(6_044_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -164,8 +155,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_438_000 picoseconds. - Weight::from_parts(2_663_000, 0) + // Minimum execution time: 1_754_000 picoseconds. + Weight::from_parts(1_832_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -189,8 +180,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `74` // Estimated: `3539` - // Minimum execution time: 31_482_000 picoseconds. - Weight::from_parts(33_926_000, 0) + // Minimum execution time: 24_886_000 picoseconds. + Weight::from_parts(25_403_000, 0) .saturating_add(Weight::from_parts(0, 3539)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(5)) @@ -213,8 +204,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `292` // Estimated: `3757` - // Minimum execution time: 35_869_000 picoseconds. - Weight::from_parts(37_030_000, 0) + // Minimum execution time: 28_114_000 picoseconds. + Weight::from_parts(28_414_000, 0) .saturating_add(Weight::from_parts(0, 3757)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) @@ -225,45 +216,45 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_385_000 picoseconds. - Weight::from_parts(2_588_000, 0) + // Minimum execution time: 1_713_000 picoseconds. + Weight::from_parts(1_810_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `PolkadotXcm::SupportedVersion` (r:6 w:2) + /// Storage: `PolkadotXcm::SupportedVersion` (r:5 w:2) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_supported_version() -> Weight { // Proof Size summary in bytes: // Measured: `89` - // Estimated: `15929` - // Minimum execution time: 21_919_000 picoseconds. - Weight::from_parts(22_926_000, 0) - .saturating_add(Weight::from_parts(0, 15929)) - .saturating_add(T::DbWeight::get().reads(6)) + // Estimated: `13454` + // Minimum execution time: 15_910_000 picoseconds. + Weight::from_parts(16_256_000, 0) + .saturating_add(Weight::from_parts(0, 13454)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifiers` (r:6 w:2) + /// Storage: `PolkadotXcm::VersionNotifiers` (r:5 w:2) /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notifiers() -> Weight { // Proof Size summary in bytes: // Measured: `93` - // Estimated: `15933` - // Minimum execution time: 22_588_000 picoseconds. - Weight::from_parts(23_144_000, 0) - .saturating_add(Weight::from_parts(0, 15933)) - .saturating_add(T::DbWeight::get().reads(6)) + // Estimated: `13458` + // Minimum execution time: 15_801_000 picoseconds. + Weight::from_parts(16_298_000, 0) + .saturating_add(Weight::from_parts(0, 13458)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:7 w:0) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:6 w:0) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn already_notified_target() -> Weight { // Proof Size summary in bytes: // Measured: `106` - // Estimated: `18421` - // Minimum execution time: 25_527_000 picoseconds. - Weight::from_parts(26_002_000, 0) - .saturating_add(Weight::from_parts(0, 18421)) - .saturating_add(T::DbWeight::get().reads(7)) + // Estimated: `15946` + // Minimum execution time: 17_976_000 picoseconds. + Weight::from_parts(18_390_000, 0) + .saturating_add(Weight::from_parts(0, 15946)) + .saturating_add(T::DbWeight::get().reads(6)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:2 w:1) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -281,36 +272,36 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `142` // Estimated: `6082` - // Minimum execution time: 30_751_000 picoseconds. - Weight::from_parts(31_977_000, 0) + // Minimum execution time: 24_723_000 picoseconds. + Weight::from_parts(25_531_000, 0) .saturating_add(Weight::from_parts(0, 6082)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:0) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:0) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn notify_target_migration_fail() -> Weight { // Proof Size summary in bytes: - // Measured: `109` - // Estimated: `13474` - // Minimum execution time: 16_496_000 picoseconds. - Weight::from_parts(16_800_000, 0) - .saturating_add(Weight::from_parts(0, 13474)) - .saturating_add(T::DbWeight::get().reads(5)) + // Measured: `136` + // Estimated: `11026` + // Minimum execution time: 10_954_000 picoseconds. + Weight::from_parts(11_199_000, 0) + .saturating_add(Weight::from_parts(0, 11026)) + .saturating_add(T::DbWeight::get().reads(4)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:6 w:2) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notify_targets() -> Weight { // Proof Size summary in bytes: // Measured: `100` - // Estimated: `15940` - // Minimum execution time: 22_667_000 picoseconds. - Weight::from_parts(23_049_000, 0) - .saturating_add(Weight::from_parts(0, 15940)) - .saturating_add(T::DbWeight::get().reads(6)) + // Estimated: `13465` + // Minimum execution time: 16_561_000 picoseconds. + Weight::from_parts(16_908_000, 0) + .saturating_add(Weight::from_parts(0, 13465)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:6 w:2) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -325,11 +316,11 @@ impl pallet_xcm::WeightInfo for WeightInfo { fn migrate_and_notify_old_targets() -> Weight { // Proof Size summary in bytes: // Measured: `142` - // Estimated: `15982` - // Minimum execution time: 43_208_000 picoseconds. - Weight::from_parts(44_012_000, 0) - .saturating_add(Weight::from_parts(0, 15982)) - .saturating_add(T::DbWeight::get().reads(11)) + // Estimated: `13507` + // Minimum execution time: 33_279_000 picoseconds. + Weight::from_parts(33_869_000, 0) + .saturating_add(Weight::from_parts(0, 13507)) + .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) @@ -340,8 +331,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `32` // Estimated: `1517` - // Minimum execution time: 4_726_000 picoseconds. - Weight::from_parts(4_989_000, 0) + // Minimum execution time: 3_405_000 picoseconds. + Weight::from_parts(3_489_000, 0) .saturating_add(Weight::from_parts(0, 1517)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -352,24 +343,22 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7669` // Estimated: `11134` - // Minimum execution time: 28_064_000 picoseconds. - Weight::from_parts(28_676_000, 0) + // Minimum execution time: 24_387_000 picoseconds. + Weight::from_parts(25_143_000, 0) .saturating_add(Weight::from_parts(0, 11134)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `PolkadotXcm::ShouldRecordXcm` (r:1 w:0) - /// Proof: `PolkadotXcm::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::AssetTraps` (r:1 w:1) /// Proof: `PolkadotXcm::AssetTraps` (`max_values`: None, `max_size`: None, mode: `Measured`) fn claim_assets() -> Weight { // Proof Size summary in bytes: // Measured: `90` // Estimated: `3555` - // Minimum execution time: 41_106_000 picoseconds. - Weight::from_parts(41_949_000, 0) + // Minimum execution time: 35_229_000 picoseconds. + Weight::from_parts(36_035_000, 0) .saturating_add(Weight::from_parts(0, 3555)) - .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } } diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/xcm/mod.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/xcm/mod.rs index 2f7529481543..1f4b4aa5c5a8 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/xcm/mod.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/xcm/mod.rs @@ -21,7 +21,6 @@ use alloc::vec::Vec; use frame_support::weights::Weight; use pallet_xcm_benchmarks_fungible::WeightInfo as XcmFungibleWeight; use pallet_xcm_benchmarks_generic::WeightInfo as XcmGeneric; -use sp_runtime::BoundedVec; use xcm::{ latest::{prelude::*, AssetTransferFilter}, DoubleEncoded, @@ -84,11 +83,7 @@ impl XcmWeightInfo for CoretimeWestendXcmWeight { fn transfer_reserve_asset(assets: &Assets, _dest: &Location, _xcm: &Xcm<()>) -> Weight { assets.weigh_assets(XcmFungibleWeight::::transfer_reserve_asset()) } - fn transact( - _origin_type: &OriginKind, - _fallback_max_weight: &Option, - _call: &DoubleEncoded, - ) -> Weight { + fn transact(_origin_type: &OriginKind, _call: &DoubleEncoded) -> Weight { XcmGeneric::::transact() } fn hrmp_new_channel_open_request( @@ -177,16 +172,8 @@ impl XcmWeightInfo for CoretimeWestendXcmWeight { fn clear_error() -> Weight { XcmGeneric::::clear_error() } - fn set_hints(hints: &BoundedVec) -> Weight { - let mut weight = Weight::zero(); - for hint in hints { - match hint { - AssetClaimer { .. } => { - weight = weight.saturating_add(XcmGeneric::::asset_claimer()); - }, - } - } - weight + fn set_asset_claimer(_location: &Location) -> Weight { + XcmGeneric::::set_asset_claimer() } fn claim_asset(_assets: &Assets, _ticket: &Location) -> Weight { XcmGeneric::::claim_asset() @@ -261,12 +248,10 @@ impl XcmWeightInfo for CoretimeWestendXcmWeight { XcmGeneric::::clear_topic() } fn alias_origin(_: &Location) -> Weight { - XcmGeneric::::alias_origin() + // XCM Executor does not currently support alias origin operations + Weight::MAX } fn unpaid_execution(_: &WeightLimit, _: &Option) -> Weight { XcmGeneric::::unpaid_execution() } - fn execute_with_origin(_: &Option, _: &Xcm) -> Weight { - XcmGeneric::::execute_with_origin() - } } diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs index 2d10ac16ea26..bd70bc4f4bd9 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs @@ -17,28 +17,26 @@ //! Autogenerated weights for `pallet_xcm_benchmarks::generic` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-12-10, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-08-27, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `9340d096ec0f`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-svzsllib-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: Compiled, CHAIN: Some("coretime-westend-dev"), DB CACHE: 1024 // Executed Command: // target/production/polkadot-parachain // benchmark // pallet -// --extrinsic=* -// --chain=coretime-westend-dev -// --pallet=pallet_xcm_benchmarks::generic -// --header=/__w/polkadot-sdk/polkadot-sdk/cumulus/file_header.txt -// --output=./cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/xcm -// --wasm-execution=compiled // --steps=50 // --repeat=20 +// --extrinsic=* +// --wasm-execution=compiled // --heap-pages=4096 -// --template=cumulus/templates/xcm-bench-template.hbs -// --no-storage-info -// --no-min-squares -// --no-median-slopes +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_xcm_benchmarks::generic +// --chain=coretime-westend-dev +// --header=./cumulus/file_header.txt +// --template=./cumulus/templates/xcm-bench-template.hbs +// --output=./cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/xcm/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -66,8 +64,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3571` - // Minimum execution time: 30_717_000 picoseconds. - Weight::from_parts(31_651_000, 3571) + // Minimum execution time: 29_463_000 picoseconds. + Weight::from_parts(30_178_000, 3571) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -75,26 +73,15 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 618_000 picoseconds. - Weight::from_parts(659_000, 0) + // Minimum execution time: 568_000 picoseconds. + Weight::from_parts(608_000, 0) } - // Storage: `System::Account` (r:1 w:1) - // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) pub fn pay_fees() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `3593` - // Minimum execution time: 3_504_000 picoseconds. - Weight::from_parts(3_757_000, 3593) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - pub fn asset_claimer() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 643_000 picoseconds. - Weight::from_parts(702_000, 0) + // Minimum execution time: 1_530_000 picoseconds. + Weight::from_parts(1_585_000, 0) } // Storage: `PolkadotXcm::Queries` (r:1 w:0) // Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -102,65 +89,58 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `32` // Estimated: `3497` - // Minimum execution time: 7_799_000 picoseconds. - Weight::from_parts(8_037_000, 3497) + // Minimum execution time: 7_400_000 picoseconds. + Weight::from_parts(7_572_000, 3497) .saturating_add(T::DbWeight::get().reads(1)) } pub fn transact() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_910_000 picoseconds. - Weight::from_parts(7_086_000, 0) + // Minimum execution time: 6_951_000 picoseconds. + Weight::from_parts(7_173_000, 0) } pub fn refund_surplus() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_257_000 picoseconds. - Weight::from_parts(1_384_000, 0) + // Minimum execution time: 1_245_000 picoseconds. + Weight::from_parts(1_342_000, 0) } pub fn set_error_handler() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 634_000 picoseconds. - Weight::from_parts(687_000, 0) + // Minimum execution time: 613_000 picoseconds. + Weight::from_parts(657_000, 0) } pub fn set_appendix() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 604_000 picoseconds. - Weight::from_parts(672_000, 0) + // Minimum execution time: 613_000 picoseconds. + Weight::from_parts(656_000, 0) } pub fn clear_error() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 593_000 picoseconds. - Weight::from_parts(643_000, 0) + // Minimum execution time: 570_000 picoseconds. + Weight::from_parts(608_000, 0) } pub fn descend_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 630_000 picoseconds. - Weight::from_parts(694_000, 0) - } - pub fn execute_with_origin() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 706_000 picoseconds. - Weight::from_parts(764_000, 0) + // Minimum execution time: 557_000 picoseconds. + Weight::from_parts(607_000, 0) } pub fn clear_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 606_000 picoseconds. - Weight::from_parts(705_000, 0) + // Minimum execution time: 557_000 picoseconds. + Weight::from_parts(578_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -178,8 +158,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3571` - // Minimum execution time: 27_188_000 picoseconds. - Weight::from_parts(27_847_000, 3571) + // Minimum execution time: 26_179_000 picoseconds. + Weight::from_parts(27_089_000, 3571) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -189,8 +169,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `90` // Estimated: `3555` - // Minimum execution time: 11_170_000 picoseconds. - Weight::from_parts(11_416_000, 3555) + // Minimum execution time: 10_724_000 picoseconds. + Weight::from_parts(10_896_000, 3555) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -198,8 +178,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 590_000 picoseconds. - Weight::from_parts(653_000, 0) + // Minimum execution time: 567_000 picoseconds. + Weight::from_parts(623_000, 0) } // Storage: `PolkadotXcm::VersionNotifyTargets` (r:1 w:1) // Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -217,8 +197,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `74` // Estimated: `3539` - // Minimum execution time: 25_196_000 picoseconds. - Weight::from_parts(25_641_000, 3539) + // Minimum execution time: 24_367_000 picoseconds. + Weight::from_parts(25_072_000, 3539) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(3)) } @@ -228,44 +208,44 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_686_000 picoseconds. - Weight::from_parts(2_827_000, 0) + // Minimum execution time: 2_554_000 picoseconds. + Weight::from_parts(2_757_000, 0) .saturating_add(T::DbWeight::get().writes(1)) } pub fn burn_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 989_000 picoseconds. - Weight::from_parts(1_051_000, 0) + // Minimum execution time: 922_000 picoseconds. + Weight::from_parts(992_000, 0) } pub fn expect_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 713_000 picoseconds. - Weight::from_parts(766_000, 0) + // Minimum execution time: 688_000 picoseconds. + Weight::from_parts(723_000, 0) } pub fn expect_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 626_000 picoseconds. - Weight::from_parts(657_000, 0) + // Minimum execution time: 607_000 picoseconds. + Weight::from_parts(647_000, 0) } pub fn expect_error() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 595_000 picoseconds. - Weight::from_parts(639_000, 0) + // Minimum execution time: 591_000 picoseconds. + Weight::from_parts(620_000, 0) } pub fn expect_transact_status() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 755_000 picoseconds. - Weight::from_parts(820_000, 0) + // Minimum execution time: 735_000 picoseconds. + Weight::from_parts(802_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -283,8 +263,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3571` - // Minimum execution time: 31_409_000 picoseconds. - Weight::from_parts(32_098_000, 3571) + // Minimum execution time: 29_923_000 picoseconds. + Weight::from_parts(30_770_000, 3571) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -292,8 +272,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_258_000 picoseconds. - Weight::from_parts(3_448_000, 0) + // Minimum execution time: 2_884_000 picoseconds. + Weight::from_parts(3_088_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -311,8 +291,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3571` - // Minimum execution time: 27_200_000 picoseconds. - Weight::from_parts(28_299_000, 3571) + // Minimum execution time: 26_632_000 picoseconds. + Weight::from_parts(27_228_000, 3571) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -320,42 +300,42 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 659_000 picoseconds. - Weight::from_parts(699_000, 0) + // Minimum execution time: 599_000 picoseconds. + Weight::from_parts(655_000, 0) } pub fn set_topic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 595_000 picoseconds. - Weight::from_parts(647_000, 0) + // Minimum execution time: 587_000 picoseconds. + Weight::from_parts(628_000, 0) } pub fn clear_topic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 583_000 picoseconds. - Weight::from_parts(617_000, 0) + // Minimum execution time: 572_000 picoseconds. + Weight::from_parts(631_000, 0) } pub fn set_fees_mode() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 595_000 picoseconds. - Weight::from_parts(633_000, 0) + // Minimum execution time: 570_000 picoseconds. + Weight::from_parts(615_000, 0) } pub fn unpaid_execution() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 610_000 picoseconds. - Weight::from_parts(670_000, 0) + // Minimum execution time: 624_000 picoseconds. + Weight::from_parts(659_000, 0) } - pub fn alias_origin() -> Weight { + pub fn set_asset_claimer() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 630_000 picoseconds. - Weight::from_parts(700_000, 0) + // Minimum execution time: 707_000 picoseconds. + Weight::from_parts(749_000, 0) } } diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/xcm_config.rs index 8a4879a1506e..5616c585a13c 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/xcm_config.rs @@ -39,8 +39,7 @@ use polkadot_runtime_common::xcm_sender::ExponentialPrice; use sp_runtime::traits::AccountIdConversion; use xcm::latest::{prelude::*, WESTEND_GENESIS_HASH}; use xcm_builder::{ - AccountId32Aliases, AliasChildLocation, AliasOriginRootUsingFilter, - AllowExplicitUnpaidExecutionFrom, AllowHrmpNotificationsFromRelayChain, + AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowHrmpNotificationsFromRelayChain, AllowKnownQueryResponses, AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, DenyReserveTransferToRelayChain, DenyThenTry, DescribeAllTerminal, DescribeFamily, EnsureXcmOrigin, FrameTransactionalProcessor, FungibleAdapter, HashedDescription, IsConcrete, @@ -53,9 +52,7 @@ use xcm_builder::{ use xcm_executor::XcmExecutor; parameter_types! { - pub const RootLocation: Location = Location::here(); pub const TokenRelayLocation: Location = Location::parent(); - pub AssetHubLocation: Location = Location::new(1, [Parachain(1000)]); pub const RelayNetwork: Option = Some(NetworkId::ByGenesis(WESTEND_GENESIS_HASH)); pub RelayChainOrigin: RuntimeOrigin = cumulus_pallet_xcm::Origin::Relay.into(); pub UniversalLocation: InteriorLocation = @@ -188,15 +185,10 @@ parameter_types! { /// Locations that will not be charged fees in the executor, neither for execution nor delivery. /// We only waive fees for system functions, which these locations represent. pub type WaivedLocations = ( - Equals, RelayOrOtherSystemParachains, Equals, ); -/// We allow locations to alias into their own child locations, as well as -/// AssetHub to alias into anything. -pub type Aliasers = (AliasChildLocation, AliasOriginRootUsingFilter); - pub struct XcmConfig; impl xcm_executor::Config for XcmConfig { type RuntimeCall = RuntimeCall; @@ -238,7 +230,7 @@ impl xcm_executor::Config for XcmConfig { type UniversalAliases = Nothing; type CallDispatcher = RuntimeCall; type SafeCallFilter = Everything; - type Aliasers = Aliasers; + type Aliasers = Nothing; type TransactionalProcessor = FrameTransactionalProcessor; type HrmpNewChannelOpenRequestHandler = (); type HrmpChannelAcceptedHandler = (); diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/tests/tests.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/tests/tests.rs index 976ce23d6e87..e391d71a9ab7 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/tests/tests.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/tests/tests.rs @@ -16,9 +16,7 @@ #![cfg(test)] -use coretime_westend_runtime::{ - xcm_config::LocationToAccountId, Block, Runtime, RuntimeCall, RuntimeOrigin, -}; +use coretime_westend_runtime::xcm_config::LocationToAccountId; use parachains_common::AccountId; use sp_core::crypto::Ss58Codec; use xcm::latest::prelude::*; @@ -134,13 +132,3 @@ fn location_conversion_works() { assert_eq!(got, expected, "{}", tc.description); } } - -#[test] -fn xcm_payment_api_works() { - parachains_runtimes_test_utils::test_cases::xcm_payment_api_with_native_token_works::< - Runtime, - RuntimeCall, - RuntimeOrigin, - Block, - >(); -} diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/Cargo.toml b/cumulus/parachains/runtimes/glutton/glutton-westend/Cargo.toml index 1c1041a4317e..09b4ef679d24 100644 --- a/cumulus/parachains/runtimes/glutton/glutton-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/Cargo.toml @@ -5,8 +5,6 @@ authors.workspace = true edition.workspace = true license = "Apache-2.0" description = "Glutton parachain runtime." -homepage.workspace = true -repository.workspace = true [lints] workspace = true @@ -20,12 +18,11 @@ frame-benchmarking = { optional = true, workspace = true } frame-executive = { workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } -frame-system-benchmarking = { optional = true, workspace = true } frame-system-rpc-runtime-api = { workspace = true } +frame-system-benchmarking = { optional = true, workspace = true } frame-try-runtime = { optional = true, workspace = true } pallet-aura = { workspace = true } pallet-glutton = { workspace = true } -pallet-message-queue = { workspace = true } pallet-sudo = { workspace = true } pallet-timestamp = { workspace = true } sp-api = { workspace = true } @@ -34,6 +31,7 @@ sp-consensus-aura = { workspace = true } sp-core = { workspace = true } sp-genesis-builder = { workspace = true } sp-inherents = { workspace = true } +pallet-message-queue = { workspace = true } sp-offchain = { workspace = true } sp-runtime = { workspace = true } sp-session = { workspace = true } @@ -77,7 +75,6 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", - "xcm/runtime-benchmarks", ] std = [ "codec/std", diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs index 763f8abea34a..fdf467ab64b8 100644 --- a/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs @@ -102,7 +102,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: alloc::borrow::Cow::Borrowed("glutton-westend"), impl_name: alloc::borrow::Cow::Borrowed("glutton-westend"), authoring_version: 1, - spec_version: 1_017_001, + spec_version: 1_016_001, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 1, diff --git a/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml b/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml index de2898046c0d..34458c2352fb 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml @@ -5,8 +5,6 @@ authors.workspace = true edition.workspace = true description = "Rococo's People parachain runtime" license = "Apache-2.0" -homepage.workspace = true -repository.workspace = true [build-dependencies] substrate-wasm-builder = { optional = true, workspace = true, default-features = true } @@ -72,16 +70,13 @@ cumulus-pallet-xcm = { workspace = true } cumulus-pallet-xcmp-queue = { workspace = true } cumulus-primitives-aura = { workspace = true } cumulus-primitives-core = { workspace = true } -cumulus-primitives-storage-weight-reclaim = { workspace = true } cumulus-primitives-utility = { workspace = true } +cumulus-primitives-storage-weight-reclaim = { workspace = true } pallet-collator-selection = { workspace = true } parachain-info = { workspace = true } parachains-common = { workspace = true } testnet-parachains-constants = { features = ["rococo"], workspace = true } -[dev-dependencies] -parachains-runtimes-test-utils = { workspace = true, default-features = true } - [features] default = ["std"] std = [ @@ -176,7 +171,6 @@ runtime-benchmarks = [ "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", "xcm-runtime-apis/runtime-benchmarks", - "xcm/runtime-benchmarks", ] try-runtime = [ diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs index ef3c90ace826..25356a84806d 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs @@ -137,7 +137,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: alloc::borrow::Cow::Borrowed("people-rococo"), impl_name: alloc::borrow::Cow::Borrowed("people-rococo"), authoring_version: 1, - spec_version: 1_017_001, + spec_version: 1_016_001, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 1, @@ -407,7 +407,6 @@ impl pallet_multisig::Config for Runtime { type DepositFactor = DepositFactor; type MaxSignatories = ConstU32<100>; type WeightInfo = weights::pallet_multisig::WeightInfo; - type BlockNumberProvider = frame_system::Pallet; } /// The type used to represent the kinds of proxying allowed. @@ -521,7 +520,6 @@ impl pallet_proxy::Config for Runtime { type CallHasher = BlakeTwo256; type AnnouncementDepositBase = AnnouncementDepositBase; type AnnouncementDepositFactor = AnnouncementDepositFactor; - type BlockNumberProvider = frame_system::Pallet; } impl pallet_utility::Config for Runtime { @@ -783,8 +781,7 @@ impl_runtime_apis! { } fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result { - let latest_asset_id: Result = asset.clone().try_into(); - match latest_asset_id { + match asset.try_as::() { Ok(asset_id) if asset_id.0 == xcm_config::RelayLocation::get() => { // for native token Ok(WeightToFee::weight_to_fee(&weight)) @@ -1055,8 +1052,18 @@ impl_runtime_apis! { type XcmBalances = pallet_xcm_benchmarks::fungible::Pallet::; type XcmGeneric = pallet_xcm_benchmarks::generic::Pallet::; - use frame_support::traits::WhitelistedStorageKeys; - let whitelist: Vec = AllPalletsWithSystem::whitelisted_storage_keys(); + let whitelist: Vec = vec![ + // Block Number + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(), + // Total Issuance + hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec().into(), + // Execution Phase + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec().into(), + // Event Count + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec().into(), + // System Events + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7").to_vec().into(), + ]; let mut batches = Vec::::new(); let params = (&config, &whitelist); diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/people/people-rococo/src/weights/pallet_xcm.rs index d50afdbee475..fabce29b5fd9 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/weights/pallet_xcm.rs @@ -17,27 +17,25 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-12-18, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `902e7ad7764b`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("people-rococo-dev")`, DB CACHE: 1024 // Executed Command: // target/production/polkadot-parachain // benchmark // pallet -// --extrinsic=* -// --chain=people-rococo-dev -// --pallet=pallet_xcm -// --header=/__w/polkadot-sdk/polkadot-sdk/cumulus/file_header.txt -// --output=./cumulus/parachains/runtimes/people/people-rococo/src/weights -// --wasm-execution=compiled // --steps=50 // --repeat=20 +// --extrinsic=* +// --wasm-execution=compiled // --heap-pages=4096 -// --no-storage-info -// --no-min-squares -// --no-median-slopes +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_xcm +// --chain=people-rococo-dev +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/people/people-rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,8 +48,6 @@ use core::marker::PhantomData; /// Weight functions for `pallet_xcm`. pub struct WeightInfo(PhantomData); impl pallet_xcm::WeightInfo for WeightInfo { - /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) - /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -64,18 +60,16 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn send() -> Weight { // Proof Size summary in bytes: - // Measured: `107` - // Estimated: `3572` - // Minimum execution time: 29_029_000 picoseconds. - Weight::from_parts(29_911_000, 0) - .saturating_add(Weight::from_parts(0, 3572)) - .saturating_add(T::DbWeight::get().reads(6)) + // Measured: `38` + // Estimated: `3503` + // Minimum execution time: 17_830_000 picoseconds. + Weight::from_parts(18_411_000, 0) + .saturating_add(Weight::from_parts(0, 3503)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `PolkadotXcm::ShouldRecordXcm` (r:1 w:0) - /// Proof: `PolkadotXcm::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -88,12 +82,12 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn teleport_assets() -> Weight { // Proof Size summary in bytes: - // Measured: `107` - // Estimated: `3572` - // Minimum execution time: 73_046_000 picoseconds. - Weight::from_parts(76_061_000, 0) - .saturating_add(Weight::from_parts(0, 3572)) - .saturating_add(T::DbWeight::get().reads(7)) + // Measured: `70` + // Estimated: `3535` + // Minimum execution time: 55_456_000 picoseconds. + Weight::from_parts(56_808_000, 0) + .saturating_add(Weight::from_parts(0, 3535)) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `Benchmark::Override` (r:0 w:0) @@ -116,16 +110,15 @@ impl pallet_xcm::WeightInfo for WeightInfo { Weight::from_parts(18_446_744_073_709_551_000, 0) .saturating_add(Weight::from_parts(0, 0)) } - /// Storage: `PolkadotXcm::ShouldRecordXcm` (r:1 w:0) - /// Proof: `PolkadotXcm::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Benchmark::Override` (r:0 w:0) + /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) fn execute() -> Weight { // Proof Size summary in bytes: - // Measured: `32` - // Estimated: `1517` - // Minimum execution time: 11_580_000 picoseconds. - Weight::from_parts(12_050_000, 0) - .saturating_add(Weight::from_parts(0, 1517)) - .saturating_add(T::DbWeight::get().reads(1)) + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. + Weight::from_parts(18_446_744_073_709_551_000, 0) + .saturating_add(Weight::from_parts(0, 0)) } /// Storage: `PolkadotXcm::SupportedVersion` (r:0 w:1) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -133,8 +126,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_963_000 picoseconds. - Weight::from_parts(7_371_000, 0) + // Minimum execution time: 5_996_000 picoseconds. + Weight::from_parts(6_154_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -144,8 +137,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_281_000 picoseconds. - Weight::from_parts(2_417_000, 0) + // Minimum execution time: 1_768_000 picoseconds. + Weight::from_parts(1_914_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -169,8 +162,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 30_422_000 picoseconds. - Weight::from_parts(31_342_000, 0) + // Minimum execution time: 24_120_000 picoseconds. + Weight::from_parts(24_745_000, 0) .saturating_add(Weight::from_parts(0, 3503)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(5)) @@ -193,8 +186,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `255` // Estimated: `3720` - // Minimum execution time: 35_290_000 picoseconds. - Weight::from_parts(36_161_000, 0) + // Minimum execution time: 26_630_000 picoseconds. + Weight::from_parts(27_289_000, 0) .saturating_add(Weight::from_parts(0, 3720)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) @@ -205,45 +198,45 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_115_000 picoseconds. - Weight::from_parts(2_389_000, 0) + // Minimum execution time: 1_821_000 picoseconds. + Weight::from_parts(1_946_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `PolkadotXcm::SupportedVersion` (r:6 w:2) + /// Storage: `PolkadotXcm::SupportedVersion` (r:5 w:2) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_supported_version() -> Weight { // Proof Size summary in bytes: // Measured: `89` - // Estimated: `15929` - // Minimum execution time: 22_355_000 picoseconds. - Weight::from_parts(23_011_000, 0) - .saturating_add(Weight::from_parts(0, 15929)) - .saturating_add(T::DbWeight::get().reads(6)) + // Estimated: `13454` + // Minimum execution time: 16_586_000 picoseconds. + Weight::from_parts(16_977_000, 0) + .saturating_add(Weight::from_parts(0, 13454)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifiers` (r:6 w:2) + /// Storage: `PolkadotXcm::VersionNotifiers` (r:5 w:2) /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notifiers() -> Weight { // Proof Size summary in bytes: // Measured: `93` - // Estimated: `15933` - // Minimum execution time: 22_043_000 picoseconds. - Weight::from_parts(22_506_000, 0) - .saturating_add(Weight::from_parts(0, 15933)) - .saturating_add(T::DbWeight::get().reads(6)) + // Estimated: `13458` + // Minimum execution time: 16_923_000 picoseconds. + Weight::from_parts(17_415_000, 0) + .saturating_add(Weight::from_parts(0, 13458)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:7 w:0) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:6 w:0) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn already_notified_target() -> Weight { // Proof Size summary in bytes: // Measured: `106` - // Estimated: `18421` - // Minimum execution time: 26_143_000 picoseconds. - Weight::from_parts(26_577_000, 0) - .saturating_add(Weight::from_parts(0, 18421)) - .saturating_add(T::DbWeight::get().reads(7)) + // Estimated: `15946` + // Minimum execution time: 18_596_000 picoseconds. + Weight::from_parts(18_823_000, 0) + .saturating_add(Weight::from_parts(0, 15946)) + .saturating_add(T::DbWeight::get().reads(6)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:2 w:1) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -261,36 +254,36 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `6046` - // Minimum execution time: 30_489_000 picoseconds. - Weight::from_parts(31_415_000, 0) + // Minimum execution time: 23_817_000 picoseconds. + Weight::from_parts(24_520_000, 0) .saturating_add(Weight::from_parts(0, 6046)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:0) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:0) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn notify_target_migration_fail() -> Weight { // Proof Size summary in bytes: - // Measured: `109` - // Estimated: `13474` - // Minimum execution time: 16_848_000 picoseconds. - Weight::from_parts(17_169_000, 0) - .saturating_add(Weight::from_parts(0, 13474)) - .saturating_add(T::DbWeight::get().reads(5)) + // Measured: `136` + // Estimated: `11026` + // Minimum execution time: 11_042_000 picoseconds. + Weight::from_parts(11_578_000, 0) + .saturating_add(Weight::from_parts(0, 11026)) + .saturating_add(T::DbWeight::get().reads(4)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:6 w:2) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notify_targets() -> Weight { // Proof Size summary in bytes: // Measured: `100` - // Estimated: `15940` - // Minimum execution time: 22_556_000 picoseconds. - Weight::from_parts(22_875_000, 0) - .saturating_add(Weight::from_parts(0, 15940)) - .saturating_add(T::DbWeight::get().reads(6)) + // Estimated: `13465` + // Minimum execution time: 17_306_000 picoseconds. + Weight::from_parts(17_817_000, 0) + .saturating_add(Weight::from_parts(0, 13465)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:6 w:2) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -305,11 +298,11 @@ impl pallet_xcm::WeightInfo for WeightInfo { fn migrate_and_notify_old_targets() -> Weight { // Proof Size summary in bytes: // Measured: `106` - // Estimated: `15946` - // Minimum execution time: 42_772_000 picoseconds. - Weight::from_parts(43_606_000, 0) - .saturating_add(Weight::from_parts(0, 15946)) - .saturating_add(T::DbWeight::get().reads(11)) + // Estimated: `13471` + // Minimum execution time: 32_141_000 picoseconds. + Weight::from_parts(32_954_000, 0) + .saturating_add(Weight::from_parts(0, 13471)) + .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) @@ -320,8 +313,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `32` // Estimated: `1517` - // Minimum execution time: 4_811_000 picoseconds. - Weight::from_parts(5_060_000, 0) + // Minimum execution time: 3_410_000 picoseconds. + Weight::from_parts(3_556_000, 0) .saturating_add(Weight::from_parts(0, 1517)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -332,24 +325,22 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7669` // Estimated: `11134` - // Minimum execution time: 31_925_000 picoseconds. - Weight::from_parts(32_294_000, 0) + // Minimum execution time: 25_021_000 picoseconds. + Weight::from_parts(25_240_000, 0) .saturating_add(Weight::from_parts(0, 11134)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `PolkadotXcm::ShouldRecordXcm` (r:1 w:0) - /// Proof: `PolkadotXcm::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::AssetTraps` (r:1 w:1) /// Proof: `PolkadotXcm::AssetTraps` (`max_values`: None, `max_size`: None, mode: `Measured`) fn claim_assets() -> Weight { // Proof Size summary in bytes: // Measured: `90` // Estimated: `3555` - // Minimum execution time: 41_804_000 picoseconds. - Weight::from_parts(42_347_000, 0) + // Minimum execution time: 33_801_000 picoseconds. + Weight::from_parts(34_655_000, 0) .saturating_add(Weight::from_parts(0, 3555)) - .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } } diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/weights/xcm/mod.rs b/cumulus/parachains/runtimes/people/people-rococo/src/weights/xcm/mod.rs index d55198f60a00..b82872a1cbf2 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/weights/xcm/mod.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/weights/xcm/mod.rs @@ -21,7 +21,6 @@ use alloc::vec::Vec; use frame_support::weights::Weight; use pallet_xcm_benchmarks_fungible::WeightInfo as XcmFungibleWeight; use pallet_xcm_benchmarks_generic::WeightInfo as XcmGeneric; -use sp_runtime::BoundedVec; use xcm::{ latest::{prelude::*, AssetTransferFilter}, DoubleEncoded, @@ -84,11 +83,7 @@ impl XcmWeightInfo for PeopleRococoXcmWeight { fn transfer_reserve_asset(assets: &Assets, _dest: &Location, _xcm: &Xcm<()>) -> Weight { assets.weigh_assets(XcmFungibleWeight::::transfer_reserve_asset()) } - fn transact( - _origin_type: &OriginKind, - _fallback_max_weight: &Option, - _call: &DoubleEncoded, - ) -> Weight { + fn transact(_origin_type: &OriginKind, _call: &DoubleEncoded) -> Weight { XcmGeneric::::transact() } fn hrmp_new_channel_open_request( @@ -255,18 +250,7 @@ impl XcmWeightInfo for PeopleRococoXcmWeight { fn unpaid_execution(_: &WeightLimit, _: &Option) -> Weight { XcmGeneric::::unpaid_execution() } - fn set_hints(hints: &BoundedVec) -> Weight { - let mut weight = Weight::zero(); - for hint in hints { - match hint { - AssetClaimer { .. } => { - weight = weight.saturating_add(XcmGeneric::::asset_claimer()); - }, - } - } - weight - } - fn execute_with_origin(_: &Option, _: &Xcm) -> Weight { - XcmGeneric::::execute_with_origin() + fn set_asset_claimer(_location: &Location) -> Weight { + XcmGeneric::::set_asset_claimer() } } diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs b/cumulus/parachains/runtimes/people/people-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs index caa916507348..30e28fac7e57 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs @@ -331,18 +331,11 @@ impl WeightInfo { // Minimum execution time: 685_000 picoseconds. Weight::from_parts(757_000, 0) } - pub fn asset_claimer() -> Weight { + pub fn set_asset_claimer() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` // Minimum execution time: 707_000 picoseconds. Weight::from_parts(749_000, 0) } - pub fn execute_with_origin() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 713_000 picoseconds. - Weight::from_parts(776_000, 0) - } } diff --git a/cumulus/parachains/runtimes/people/people-rococo/tests/tests.rs b/cumulus/parachains/runtimes/people/people-rococo/tests/tests.rs index 00fe7781822a..3627d9c40ec2 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/tests/tests.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/tests/tests.rs @@ -17,9 +17,7 @@ #![cfg(test)] use parachains_common::AccountId; -use people_rococo_runtime::{ - xcm_config::LocationToAccountId, Block, Runtime, RuntimeCall, RuntimeOrigin, -}; +use people_rococo_runtime::xcm_config::LocationToAccountId; use sp_core::crypto::Ss58Codec; use xcm::latest::prelude::*; use xcm_runtime_apis::conversions::LocationToAccountHelper; @@ -134,13 +132,3 @@ fn location_conversion_works() { assert_eq!(got, expected, "{}", tc.description); } } - -#[test] -fn xcm_payment_api_works() { - parachains_runtimes_test_utils::test_cases::xcm_payment_api_with_native_token_works::< - Runtime, - RuntimeCall, - RuntimeOrigin, - Block, - >(); -} diff --git a/cumulus/parachains/runtimes/people/people-westend/Cargo.toml b/cumulus/parachains/runtimes/people/people-westend/Cargo.toml index 65bc8264934f..6840b97d8c3f 100644 --- a/cumulus/parachains/runtimes/people/people-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/people/people-westend/Cargo.toml @@ -5,8 +5,6 @@ authors.workspace = true edition.workspace = true description = "Westend's People parachain runtime" license = "Apache-2.0" -homepage.workspace = true -repository.workspace = true [build-dependencies] substrate-wasm-builder = { optional = true, workspace = true, default-features = true } @@ -72,16 +70,13 @@ cumulus-pallet-xcm = { workspace = true } cumulus-pallet-xcmp-queue = { workspace = true } cumulus-primitives-aura = { workspace = true } cumulus-primitives-core = { workspace = true } -cumulus-primitives-storage-weight-reclaim = { workspace = true } cumulus-primitives-utility = { workspace = true } +cumulus-primitives-storage-weight-reclaim = { workspace = true } pallet-collator-selection = { workspace = true } parachain-info = { workspace = true } parachains-common = { workspace = true } testnet-parachains-constants = { features = ["westend"], workspace = true } -[dev-dependencies] -parachains-runtimes-test-utils = { workspace = true, default-features = true } - [features] default = ["std"] std = [ @@ -176,7 +171,6 @@ runtime-benchmarks = [ "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", "xcm-runtime-apis/runtime-benchmarks", - "xcm/runtime-benchmarks", ] try-runtime = [ diff --git a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs index ebf8fcb33bd8..1c5183636c49 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs @@ -136,10 +136,10 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: alloc::borrow::Cow::Borrowed("people-westend"), impl_name: alloc::borrow::Cow::Borrowed("people-westend"), authoring_version: 1, - spec_version: 1_017_001, + spec_version: 1_016_001, impl_version: 0, apis: RUNTIME_API_VERSIONS, - transaction_version: 2, + transaction_version: 1, system_version: 1, }; @@ -406,7 +406,6 @@ impl pallet_multisig::Config for Runtime { type DepositFactor = DepositFactor; type MaxSignatories = ConstU32<100>; type WeightInfo = weights::pallet_multisig::WeightInfo; - type BlockNumberProvider = frame_system::Pallet; } /// The type used to represent the kinds of proxying allowed. @@ -520,7 +519,6 @@ impl pallet_proxy::Config for Runtime { type CallHasher = BlakeTwo256; type AnnouncementDepositBase = AnnouncementDepositBase; type AnnouncementDepositFactor = AnnouncementDepositFactor; - type BlockNumberProvider = frame_system::Pallet; } impl pallet_utility::Config for Runtime { @@ -781,8 +779,7 @@ impl_runtime_apis! { } fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result { - let latest_asset_id: Result = asset.clone().try_into(); - match latest_asset_id { + match asset.try_as::() { Ok(asset_id) if asset_id.0 == xcm_config::RelayLocation::get() => { // for native token Ok(WeightToFee::weight_to_fee(&weight)) @@ -1046,17 +1043,25 @@ impl_runtime_apis! { } fn alias_origin() -> Result<(Location, Location), BenchmarkError> { - let origin = Location::new(1, [Parachain(1000)]); - let target = Location::new(1, [Parachain(1000), AccountId32 { id: [128u8; 32], network: None }]); - Ok((origin, target)) + Err(BenchmarkError::Skip) } } type XcmBalances = pallet_xcm_benchmarks::fungible::Pallet::; type XcmGeneric = pallet_xcm_benchmarks::generic::Pallet::; - use frame_support::traits::WhitelistedStorageKeys; - let whitelist: Vec = AllPalletsWithSystem::whitelisted_storage_keys(); + let whitelist: Vec = vec![ + // Block Number + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(), + // Total Issuance + hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec().into(), + // Execution Phase + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec().into(), + // Event Count + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec().into(), + // System Events + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7").to_vec().into(), + ]; let mut batches = Vec::::new(); let params = (&config, &whitelist); diff --git a/cumulus/parachains/runtimes/people/people-westend/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/people/people-westend/src/weights/pallet_xcm.rs index f06669209a18..c337289243b7 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/weights/pallet_xcm.rs @@ -17,27 +17,25 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-12-18, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `4105cf7eb2c7`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("people-westend-dev")`, DB CACHE: 1024 // Executed Command: // target/production/polkadot-parachain // benchmark // pallet -// --extrinsic=* -// --chain=people-westend-dev -// --pallet=pallet_xcm -// --header=/__w/polkadot-sdk/polkadot-sdk/cumulus/file_header.txt -// --output=./cumulus/parachains/runtimes/people/people-westend/src/weights -// --wasm-execution=compiled // --steps=50 // --repeat=20 +// --extrinsic=* +// --wasm-execution=compiled // --heap-pages=4096 -// --no-storage-info -// --no-min-squares -// --no-median-slopes +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_xcm +// --chain=people-westend-dev +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/people/people-westend/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,8 +48,6 @@ use core::marker::PhantomData; /// Weight functions for `pallet_xcm`. pub struct WeightInfo(PhantomData); impl pallet_xcm::WeightInfo for WeightInfo { - /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) - /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -64,18 +60,16 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn send() -> Weight { // Proof Size summary in bytes: - // Measured: `107` - // Estimated: `3572` - // Minimum execution time: 29_434_000 picoseconds. - Weight::from_parts(30_114_000, 0) - .saturating_add(Weight::from_parts(0, 3572)) - .saturating_add(T::DbWeight::get().reads(6)) + // Measured: `38` + // Estimated: `3503` + // Minimum execution time: 17_856_000 picoseconds. + Weight::from_parts(18_473_000, 0) + .saturating_add(Weight::from_parts(0, 3503)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `PolkadotXcm::ShouldRecordXcm` (r:1 w:0) - /// Proof: `PolkadotXcm::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -88,12 +82,12 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn teleport_assets() -> Weight { // Proof Size summary in bytes: - // Measured: `107` - // Estimated: `3572` - // Minimum execution time: 73_433_000 picoseconds. - Weight::from_parts(75_377_000, 0) - .saturating_add(Weight::from_parts(0, 3572)) - .saturating_add(T::DbWeight::get().reads(7)) + // Measured: `70` + // Estimated: `3535` + // Minimum execution time: 56_112_000 picoseconds. + Weight::from_parts(57_287_000, 0) + .saturating_add(Weight::from_parts(0, 3535)) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `Benchmark::Override` (r:0 w:0) @@ -116,16 +110,15 @@ impl pallet_xcm::WeightInfo for WeightInfo { Weight::from_parts(18_446_744_073_709_551_000, 0) .saturating_add(Weight::from_parts(0, 0)) } - /// Storage: `PolkadotXcm::ShouldRecordXcm` (r:1 w:0) - /// Proof: `PolkadotXcm::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Benchmark::Override` (r:0 w:0) + /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) fn execute() -> Weight { // Proof Size summary in bytes: - // Measured: `32` - // Estimated: `1517` - // Minimum execution time: 11_627_000 picoseconds. - Weight::from_parts(12_034_000, 0) - .saturating_add(Weight::from_parts(0, 1517)) - .saturating_add(T::DbWeight::get().reads(1)) + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. + Weight::from_parts(18_446_744_073_709_551_000, 0) + .saturating_add(Weight::from_parts(0, 0)) } /// Storage: `PolkadotXcm::SupportedVersion` (r:0 w:1) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -133,8 +126,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_075_000 picoseconds. - Weight::from_parts(7_406_000, 0) + // Minimum execution time: 6_186_000 picoseconds. + Weight::from_parts(6_420_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -144,8 +137,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_308_000 picoseconds. - Weight::from_parts(2_485_000, 0) + // Minimum execution time: 1_824_000 picoseconds. + Weight::from_parts(1_999_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -169,8 +162,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 29_939_000 picoseconds. - Weight::from_parts(30_795_000, 0) + // Minimum execution time: 23_833_000 picoseconds. + Weight::from_parts(24_636_000, 0) .saturating_add(Weight::from_parts(0, 3503)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(5)) @@ -193,8 +186,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `255` // Estimated: `3720` - // Minimum execution time: 34_830_000 picoseconds. - Weight::from_parts(35_677_000, 0) + // Minimum execution time: 26_557_000 picoseconds. + Weight::from_parts(27_275_000, 0) .saturating_add(Weight::from_parts(0, 3720)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) @@ -205,45 +198,45 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_363_000 picoseconds. - Weight::from_parts(2_517_000, 0) + // Minimum execution time: 1_921_000 picoseconds. + Weight::from_parts(2_040_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `PolkadotXcm::SupportedVersion` (r:6 w:2) + /// Storage: `PolkadotXcm::SupportedVersion` (r:5 w:2) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_supported_version() -> Weight { // Proof Size summary in bytes: // Measured: `89` - // Estimated: `15929` - // Minimum execution time: 22_322_000 picoseconds. - Weight::from_parts(22_709_000, 0) - .saturating_add(Weight::from_parts(0, 15929)) - .saturating_add(T::DbWeight::get().reads(6)) + // Estimated: `13454` + // Minimum execution time: 16_832_000 picoseconds. + Weight::from_parts(17_312_000, 0) + .saturating_add(Weight::from_parts(0, 13454)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifiers` (r:6 w:2) + /// Storage: `PolkadotXcm::VersionNotifiers` (r:5 w:2) /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notifiers() -> Weight { // Proof Size summary in bytes: // Measured: `93` - // Estimated: `15933` - // Minimum execution time: 22_418_000 picoseconds. - Weight::from_parts(22_834_000, 0) - .saturating_add(Weight::from_parts(0, 15933)) - .saturating_add(T::DbWeight::get().reads(6)) + // Estimated: `13458` + // Minimum execution time: 16_687_000 picoseconds. + Weight::from_parts(17_123_000, 0) + .saturating_add(Weight::from_parts(0, 13458)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:7 w:0) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:6 w:0) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn already_notified_target() -> Weight { // Proof Size summary in bytes: // Measured: `106` - // Estimated: `18421` - // Minimum execution time: 26_310_000 picoseconds. - Weight::from_parts(26_623_000, 0) - .saturating_add(Weight::from_parts(0, 18421)) - .saturating_add(T::DbWeight::get().reads(7)) + // Estimated: `15946` + // Minimum execution time: 18_164_000 picoseconds. + Weight::from_parts(18_580_000, 0) + .saturating_add(Weight::from_parts(0, 15946)) + .saturating_add(T::DbWeight::get().reads(6)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:2 w:1) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -261,36 +254,36 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `6046` - // Minimum execution time: 29_863_000 picoseconds. - Weight::from_parts(30_467_000, 0) + // Minimum execution time: 23_577_000 picoseconds. + Weight::from_parts(24_324_000, 0) .saturating_add(Weight::from_parts(0, 6046)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:0) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:0) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn notify_target_migration_fail() -> Weight { // Proof Size summary in bytes: - // Measured: `109` - // Estimated: `13474` - // Minimum execution time: 17_075_000 picoseconds. - Weight::from_parts(17_578_000, 0) - .saturating_add(Weight::from_parts(0, 13474)) - .saturating_add(T::DbWeight::get().reads(5)) + // Measured: `136` + // Estimated: `11026` + // Minimum execution time: 11_014_000 picoseconds. + Weight::from_parts(11_223_000, 0) + .saturating_add(Weight::from_parts(0, 11026)) + .saturating_add(T::DbWeight::get().reads(4)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:6 w:2) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notify_targets() -> Weight { // Proof Size summary in bytes: // Measured: `100` - // Estimated: `15940` - // Minimum execution time: 22_816_000 picoseconds. - Weight::from_parts(23_175_000, 0) - .saturating_add(Weight::from_parts(0, 15940)) - .saturating_add(T::DbWeight::get().reads(6)) + // Estimated: `13465` + // Minimum execution time: 16_887_000 picoseconds. + Weight::from_parts(17_361_000, 0) + .saturating_add(Weight::from_parts(0, 13465)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:6 w:2) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -305,11 +298,11 @@ impl pallet_xcm::WeightInfo for WeightInfo { fn migrate_and_notify_old_targets() -> Weight { // Proof Size summary in bytes: // Measured: `106` - // Estimated: `15946` - // Minimum execution time: 42_767_000 picoseconds. - Weight::from_parts(43_308_000, 0) - .saturating_add(Weight::from_parts(0, 15946)) - .saturating_add(T::DbWeight::get().reads(11)) + // Estimated: `13471` + // Minimum execution time: 31_705_000 picoseconds. + Weight::from_parts(32_166_000, 0) + .saturating_add(Weight::from_parts(0, 13471)) + .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) @@ -320,8 +313,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `32` // Estimated: `1517` - // Minimum execution time: 4_864_000 picoseconds. - Weight::from_parts(5_010_000, 0) + // Minimum execution time: 3_568_000 picoseconds. + Weight::from_parts(3_669_000, 0) .saturating_add(Weight::from_parts(0, 1517)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -332,24 +325,22 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7669` // Estimated: `11134` - // Minimum execution time: 30_237_000 picoseconds. - Weight::from_parts(30_662_000, 0) + // Minimum execution time: 24_823_000 picoseconds. + Weight::from_parts(25_344_000, 0) .saturating_add(Weight::from_parts(0, 11134)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `PolkadotXcm::ShouldRecordXcm` (r:1 w:0) - /// Proof: `PolkadotXcm::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::AssetTraps` (r:1 w:1) /// Proof: `PolkadotXcm::AssetTraps` (`max_values`: None, `max_size`: None, mode: `Measured`) fn claim_assets() -> Weight { // Proof Size summary in bytes: // Measured: `90` // Estimated: `3555` - // Minimum execution time: 41_418_000 picoseconds. - Weight::from_parts(42_011_000, 0) + // Minimum execution time: 34_516_000 picoseconds. + Weight::from_parts(35_478_000, 0) .saturating_add(Weight::from_parts(0, 3555)) - .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } } diff --git a/cumulus/parachains/runtimes/people/people-westend/src/weights/xcm/mod.rs b/cumulus/parachains/runtimes/people/people-westend/src/weights/xcm/mod.rs index 466da1eadd55..8ca9771dca46 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/weights/xcm/mod.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/weights/xcm/mod.rs @@ -21,7 +21,6 @@ use alloc::vec::Vec; use frame_support::weights::Weight; use pallet_xcm_benchmarks_fungible::WeightInfo as XcmFungibleWeight; use pallet_xcm_benchmarks_generic::WeightInfo as XcmGeneric; -use sp_runtime::BoundedVec; use xcm::{ latest::{prelude::*, AssetTransferFilter}, DoubleEncoded, @@ -84,11 +83,7 @@ impl XcmWeightInfo for PeopleWestendXcmWeight { fn transfer_reserve_asset(assets: &Assets, _dest: &Location, _xcm: &Xcm<()>) -> Weight { assets.weigh_assets(XcmFungibleWeight::::transfer_reserve_asset()) } - fn transact( - _origin_type: &OriginKind, - _fallback_max_weight: &Option, - _call: &DoubleEncoded, - ) -> Weight { + fn transact(_origin_type: &OriginKind, _call: &DoubleEncoded) -> Weight { XcmGeneric::::transact() } fn hrmp_new_channel_open_request( @@ -249,23 +244,13 @@ impl XcmWeightInfo for PeopleWestendXcmWeight { XcmGeneric::::clear_topic() } fn alias_origin(_: &Location) -> Weight { - XcmGeneric::::alias_origin() + // XCM Executor does not currently support alias origin operations + Weight::MAX } fn unpaid_execution(_: &WeightLimit, _: &Option) -> Weight { XcmGeneric::::unpaid_execution() } - fn set_hints(hints: &BoundedVec) -> Weight { - let mut weight = Weight::zero(); - for hint in hints { - match hint { - AssetClaimer { .. } => { - weight = weight.saturating_add(XcmGeneric::::asset_claimer()); - }, - } - } - weight - } - fn execute_with_origin(_: &Option, _: &Xcm) -> Weight { - XcmGeneric::::execute_with_origin() + fn set_asset_claimer(_location: &Location) -> Weight { + XcmGeneric::::set_asset_claimer() } } diff --git a/cumulus/parachains/runtimes/people/people-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs b/cumulus/parachains/runtimes/people/people-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs index 3fa51a816b69..3c539902abc8 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs @@ -17,28 +17,26 @@ //! Autogenerated weights for `pallet_xcm_benchmarks::generic` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-12-10, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-08-27, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `9340d096ec0f`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-svzsllib-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: Compiled, CHAIN: Some("people-westend-dev"), DB CACHE: 1024 // Executed Command: // target/production/polkadot-parachain // benchmark // pallet -// --extrinsic=* -// --chain=people-westend-dev -// --pallet=pallet_xcm_benchmarks::generic -// --header=/__w/polkadot-sdk/polkadot-sdk/cumulus/file_header.txt -// --output=./cumulus/parachains/runtimes/people/people-westend/src/weights/xcm -// --wasm-execution=compiled // --steps=50 // --repeat=20 +// --extrinsic=* +// --wasm-execution=compiled // --heap-pages=4096 -// --template=cumulus/templates/xcm-bench-template.hbs -// --no-storage-info -// --no-min-squares -// --no-median-slopes +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_xcm_benchmarks::generic +// --chain=people-westend-dev +// --header=./cumulus/file_header.txt +// --template=./cumulus/templates/xcm-bench-template.hbs +// --output=./cumulus/parachains/runtimes/people/people-westend/src/weights/xcm/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -64,10 +62,10 @@ impl WeightInfo { // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn report_holding() -> Weight { // Proof Size summary in bytes: - // Measured: `107` - // Estimated: `3572` - // Minimum execution time: 31_309_000 picoseconds. - Weight::from_parts(31_924_000, 3572) + // Measured: `70` + // Estimated: `3535` + // Minimum execution time: 29_015_000 picoseconds. + Weight::from_parts(30_359_000, 3535) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -75,26 +73,15 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 635_000 picoseconds. - Weight::from_parts(677_000, 0) + // Minimum execution time: 572_000 picoseconds. + Weight::from_parts(637_000, 0) } - // Storage: `System::Account` (r:1 w:1) - // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) pub fn pay_fees() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `3593` - // Minimum execution time: 3_457_000 picoseconds. - Weight::from_parts(3_656_000, 3593) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - pub fn asset_claimer() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 644_000 picoseconds. - Weight::from_parts(695_000, 0) + // Minimum execution time: 1_550_000 picoseconds. + Weight::from_parts(1_604_000, 0) } // Storage: `PolkadotXcm::Queries` (r:1 w:0) // Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -102,65 +89,58 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `32` // Estimated: `3497` - // Minimum execution time: 7_701_000 picoseconds. - Weight::from_parts(8_120_000, 3497) + // Minimum execution time: 7_354_000 picoseconds. + Weight::from_parts(7_808_000, 3497) .saturating_add(T::DbWeight::get().reads(1)) } pub fn transact() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_945_000 picoseconds. - Weight::from_parts(7_187_000, 0) + // Minimum execution time: 6_716_000 picoseconds. + Weight::from_parts(7_067_000, 0) } pub fn refund_surplus() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_352_000 picoseconds. - Weight::from_parts(1_428_000, 0) + // Minimum execution time: 1_280_000 picoseconds. + Weight::from_parts(1_355_000, 0) } pub fn set_error_handler() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 603_000 picoseconds. - Weight::from_parts(648_000, 0) + // Minimum execution time: 587_000 picoseconds. + Weight::from_parts(645_000, 0) } pub fn set_appendix() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 621_000 picoseconds. - Weight::from_parts(661_000, 0) + // Minimum execution time: 629_000 picoseconds. + Weight::from_parts(662_000, 0) } pub fn clear_error() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 591_000 picoseconds. - Weight::from_parts(655_000, 0) + // Minimum execution time: 590_000 picoseconds. + Weight::from_parts(639_000, 0) } pub fn descend_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 666_000 picoseconds. - Weight::from_parts(736_000, 0) - } - pub fn execute_with_origin() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 694_000 picoseconds. - Weight::from_parts(759_000, 0) + // Minimum execution time: 651_000 picoseconds. + Weight::from_parts(688_000, 0) } pub fn clear_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 632_000 picoseconds. - Weight::from_parts(664_000, 0) + // Minimum execution time: 601_000 picoseconds. + Weight::from_parts(630_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -176,10 +156,10 @@ impl WeightInfo { // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn report_error() -> Weight { // Proof Size summary in bytes: - // Measured: `107` - // Estimated: `3572` - // Minimum execution time: 26_932_000 picoseconds. - Weight::from_parts(27_882_000, 3572) + // Measured: `70` + // Estimated: `3535` + // Minimum execution time: 25_650_000 picoseconds. + Weight::from_parts(26_440_000, 3535) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -189,8 +169,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `90` // Estimated: `3555` - // Minimum execution time: 11_316_000 picoseconds. - Weight::from_parts(11_608_000, 3555) + // Minimum execution time: 10_492_000 picoseconds. + Weight::from_parts(10_875_000, 3555) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -198,8 +178,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 564_000 picoseconds. - Weight::from_parts(614_000, 0) + // Minimum execution time: 597_000 picoseconds. + Weight::from_parts(647_000, 0) } // Storage: `PolkadotXcm::VersionNotifyTargets` (r:1 w:1) // Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -217,8 +197,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 24_373_000 picoseconds. - Weight::from_parts(25_068_000, 3503) + // Minimum execution time: 23_732_000 picoseconds. + Weight::from_parts(24_290_000, 3503) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(3)) } @@ -228,44 +208,44 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_582_000 picoseconds. - Weight::from_parts(2_714_000, 0) + // Minimum execution time: 2_446_000 picoseconds. + Weight::from_parts(2_613_000, 0) .saturating_add(T::DbWeight::get().writes(1)) } pub fn burn_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 952_000 picoseconds. - Weight::from_parts(1_059_000, 0) + // Minimum execution time: 960_000 picoseconds. + Weight::from_parts(1_045_000, 0) } pub fn expect_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 684_000 picoseconds. - Weight::from_parts(734_000, 0) + // Minimum execution time: 703_000 picoseconds. + Weight::from_parts(739_000, 0) } pub fn expect_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 600_000 picoseconds. - Weight::from_parts(650_000, 0) + // Minimum execution time: 616_000 picoseconds. + Weight::from_parts(651_000, 0) } pub fn expect_error() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 599_000 picoseconds. - Weight::from_parts(628_000, 0) + // Minimum execution time: 621_000 picoseconds. + Weight::from_parts(660_000, 0) } pub fn expect_transact_status() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 769_000 picoseconds. - Weight::from_parts(816_000, 0) + // Minimum execution time: 794_000 picoseconds. + Weight::from_parts(831_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -281,10 +261,10 @@ impl WeightInfo { // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn query_pallet() -> Weight { // Proof Size summary in bytes: - // Measured: `107` - // Estimated: `3572` - // Minimum execution time: 31_815_000 picoseconds. - Weight::from_parts(32_738_000, 3572) + // Measured: `70` + // Estimated: `3535` + // Minimum execution time: 29_527_000 picoseconds. + Weight::from_parts(30_614_000, 3535) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -292,8 +272,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_462_000 picoseconds. - Weight::from_parts(3_563_000, 0) + // Minimum execution time: 3_189_000 picoseconds. + Weight::from_parts(3_296_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -309,10 +289,10 @@ impl WeightInfo { // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn report_transact_status() -> Weight { // Proof Size summary in bytes: - // Measured: `107` - // Estimated: `3572` - // Minimum execution time: 27_752_000 picoseconds. - Weight::from_parts(28_455_000, 3572) + // Measured: `70` + // Estimated: `3535` + // Minimum execution time: 25_965_000 picoseconds. + Weight::from_parts(26_468_000, 3535) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -320,42 +300,42 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 605_000 picoseconds. - Weight::from_parts(687_000, 0) + // Minimum execution time: 618_000 picoseconds. + Weight::from_parts(659_000, 0) } pub fn set_topic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 610_000 picoseconds. - Weight::from_parts(646_000, 0) + // Minimum execution time: 593_000 picoseconds. + Weight::from_parts(618_000, 0) } pub fn clear_topic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 579_000 picoseconds. - Weight::from_parts(636_000, 0) + // Minimum execution time: 603_000 picoseconds. + Weight::from_parts(634_000, 0) } pub fn set_fees_mode() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 583_000 picoseconds. - Weight::from_parts(626_000, 0) + // Minimum execution time: 568_000 picoseconds. + Weight::from_parts(629_000, 0) } pub fn unpaid_execution() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 616_000 picoseconds. - Weight::from_parts(679_000, 0) + // Minimum execution time: 598_000 picoseconds. + Weight::from_parts(655_000, 0) } - pub fn alias_origin() -> Weight { + pub fn set_asset_claimer() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 626_000 picoseconds. - Weight::from_parts(687_000, 0) + // Minimum execution time: 707_000 picoseconds. + Weight::from_parts(749_000, 0) } } diff --git a/cumulus/parachains/runtimes/people/people-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/people/people-westend/src/xcm_config.rs index 7eaa43c05b20..25256495ef91 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/xcm_config.rs @@ -36,8 +36,7 @@ use polkadot_parachain_primitives::primitives::Sibling; use sp_runtime::traits::AccountIdConversion; use xcm::latest::{prelude::*, WESTEND_GENESIS_HASH}; use xcm_builder::{ - AccountId32Aliases, AliasChildLocation, AliasOriginRootUsingFilter, - AllowExplicitUnpaidExecutionFrom, AllowHrmpNotificationsFromRelayChain, + AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowHrmpNotificationsFromRelayChain, AllowKnownQueryResponses, AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, DenyReserveTransferToRelayChain, DenyThenTry, DescribeAllTerminal, DescribeFamily, DescribeTerminus, EnsureXcmOrigin, FrameTransactionalProcessor, FungibleAdapter, @@ -52,7 +51,6 @@ use xcm_executor::XcmExecutor; parameter_types! { pub const RootLocation: Location = Location::here(); pub const RelayLocation: Location = Location::parent(); - pub AssetHubLocation: Location = Location::new(1, [Parachain(1000)]); pub const RelayNetwork: Option = Some(NetworkId::ByGenesis(WESTEND_GENESIS_HASH)); pub RelayChainOrigin: RuntimeOrigin = cumulus_pallet_xcm::Origin::Relay.into(); pub UniversalLocation: InteriorLocation = @@ -197,10 +195,6 @@ pub type WaivedLocations = ( LocalPlurality, ); -/// We allow locations to alias into their own child locations, as well as -/// AssetHub to alias into anything. -pub type Aliasers = (AliasChildLocation, AliasOriginRootUsingFilter); - pub struct XcmConfig; impl xcm_executor::Config for XcmConfig { type RuntimeCall = RuntimeCall; @@ -242,7 +236,7 @@ impl xcm_executor::Config for XcmConfig { type UniversalAliases = Nothing; type CallDispatcher = RuntimeCall; type SafeCallFilter = Everything; - type Aliasers = Aliasers; + type Aliasers = Nothing; type TransactionalProcessor = FrameTransactionalProcessor; type HrmpNewChannelOpenRequestHandler = (); type HrmpChannelAcceptedHandler = (); diff --git a/cumulus/parachains/runtimes/people/people-westend/tests/tests.rs b/cumulus/parachains/runtimes/people/people-westend/tests/tests.rs index 5cefec44b1cd..fa9331952b4b 100644 --- a/cumulus/parachains/runtimes/people/people-westend/tests/tests.rs +++ b/cumulus/parachains/runtimes/people/people-westend/tests/tests.rs @@ -17,9 +17,7 @@ #![cfg(test)] use parachains_common::AccountId; -use people_westend_runtime::{ - xcm_config::LocationToAccountId, Block, Runtime, RuntimeCall, RuntimeOrigin, -}; +use people_westend_runtime::xcm_config::LocationToAccountId; use sp_core::crypto::Ss58Codec; use xcm::latest::prelude::*; use xcm_runtime_apis::conversions::LocationToAccountHelper; @@ -134,13 +132,3 @@ fn location_conversion_works() { assert_eq!(got, expected, "{}", tc.description); } } - -#[test] -fn xcm_payment_api_works() { - parachains_runtimes_test_utils::test_cases::xcm_payment_api_with_native_token_works::< - Runtime, - RuntimeCall, - RuntimeOrigin, - Block, - >(); -} diff --git a/cumulus/parachains/runtimes/test-utils/Cargo.toml b/cumulus/parachains/runtimes/test-utils/Cargo.toml index cc8f29524514..01d7fcc2b5c8 100644 --- a/cumulus/parachains/runtimes/test-utils/Cargo.toml +++ b/cumulus/parachains/runtimes/test-utils/Cargo.toml @@ -5,8 +5,6 @@ authors.workspace = true edition.workspace = true description = "Utils for Runtimes testing" license = "Apache-2.0" -homepage.workspace = true -repository.workspace = true [lints] workspace = true @@ -21,27 +19,25 @@ pallet-balances = { workspace = true } pallet-session = { workspace = true } pallet-timestamp = { workspace = true } sp-consensus-aura = { workspace = true } -sp-core = { workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } sp-tracing = { workspace = true, default-features = true } +sp-core = { workspace = true } # Cumulus cumulus-pallet-parachain-system = { workspace = true } cumulus-pallet-xcmp-queue = { workspace = true } +pallet-collator-selection = { workspace = true } +parachain-info = { workspace = true } cumulus-primitives-core = { workspace = true } cumulus-primitives-parachain-inherent = { workspace = true } cumulus-test-relay-sproof-builder = { workspace = true } -pallet-collator-selection = { workspace = true } -parachain-info = { workspace = true } -parachains-common = { workspace = true } # Polkadot -pallet-xcm = { workspace = true } -polkadot-parachain-primitives = { workspace = true } xcm = { workspace = true } xcm-executor = { workspace = true } -xcm-runtime-apis = { workspace = true } +pallet-xcm = { workspace = true } +polkadot-parachain-primitives = { workspace = true } [dev-dependencies] hex-literal = { workspace = true, default-features = true } @@ -66,13 +62,11 @@ std = [ "pallet-timestamp/std", "pallet-xcm/std", "parachain-info/std", - "parachains-common/std", "polkadot-parachain-primitives/std", "sp-consensus-aura/std", "sp-core/std", "sp-io/std", "sp-runtime/std", "xcm-executor/std", - "xcm-runtime-apis/std", "xcm/std", ] diff --git a/cumulus/parachains/runtimes/test-utils/src/lib.rs b/cumulus/parachains/runtimes/test-utils/src/lib.rs index 5c33809ba67b..05ecf6ca8e81 100644 --- a/cumulus/parachains/runtimes/test-utils/src/lib.rs +++ b/cumulus/parachains/runtimes/test-utils/src/lib.rs @@ -445,11 +445,7 @@ impl< // prepare xcm as governance will do let xcm = Xcm(vec![ UnpaidExecution { weight_limit: Unlimited, check_origin: None }, - Transact { - origin_kind: OriginKind::Superuser, - call: call.into(), - fallback_max_weight: None, - }, + Transact { origin_kind: OriginKind::Superuser, call: call.into() }, ExpectTransactStatus(MaybeErrorCode::Success), ]); @@ -464,26 +460,18 @@ impl< ) } - pub fn execute_as_origin( - (origin, origin_kind): (Location, OriginKind), + pub fn execute_as_origin_xcm( + origin: Location, call: Call, - maybe_buy_execution_fee: Option, + buy_execution_fee: Asset, ) -> Outcome { - let mut instructions = if let Some(buy_execution_fee) = maybe_buy_execution_fee { - vec![ - WithdrawAsset(buy_execution_fee.clone().into()), - BuyExecution { fees: buy_execution_fee.clone(), weight_limit: Unlimited }, - ] - } else { - vec![UnpaidExecution { check_origin: None, weight_limit: Unlimited }] - }; - // prepare `Transact` xcm - instructions.extend(vec![ - Transact { origin_kind, call: call.encode().into(), fallback_max_weight: None }, + let xcm = Xcm(vec![ + WithdrawAsset(buy_execution_fee.clone().into()), + BuyExecution { fees: buy_execution_fee.clone(), weight_limit: Unlimited }, + Transact { origin_kind: OriginKind::Xcm, call: call.encode().into() }, ExpectTransactStatus(MaybeErrorCode::Success), ]); - let xcm = Xcm(instructions); // execute xcm as parent origin let mut hash = xcm.using_encoded(sp_io::hashing::blake2_256); diff --git a/cumulus/parachains/runtimes/test-utils/src/test_cases.rs b/cumulus/parachains/runtimes/test-utils/src/test_cases.rs index 6bdf3ef09d1b..a66163154cf6 100644 --- a/cumulus/parachains/runtimes/test-utils/src/test_cases.rs +++ b/cumulus/parachains/runtimes/test-utils/src/test_cases.rs @@ -18,15 +18,7 @@ use crate::{AccountIdOf, CollatorSessionKeys, ExtBuilder, ValidatorIdOf}; use codec::Encode; -use frame_support::{ - assert_ok, - traits::{Get, OriginTrait}, -}; -use parachains_common::AccountId; -use sp_runtime::traits::{Block as BlockT, StaticLookup}; -use xcm_runtime_apis::fees::{ - runtime_decl_for_xcm_payment_api::XcmPaymentApiV1, Error as XcmPaymentApiError, -}; +use frame_support::{assert_ok, traits::Get}; type RuntimeHelper = crate::RuntimeHelper; @@ -136,60 +128,3 @@ pub fn set_storage_keys_by_governance_works( assert_storage(); }); } - -pub fn xcm_payment_api_with_native_token_works() -where - Runtime: XcmPaymentApiV1 - + frame_system::Config - + pallet_balances::Config - + pallet_session::Config - + pallet_xcm::Config - + parachain_info::Config - + pallet_collator_selection::Config - + cumulus_pallet_parachain_system::Config - + cumulus_pallet_xcmp_queue::Config - + pallet_timestamp::Config, - ValidatorIdOf: From>, - RuntimeOrigin: OriginTrait::AccountId>, - <::Lookup as StaticLookup>::Source: - From<::AccountId>, - Block: BlockT, -{ - use xcm::prelude::*; - ExtBuilder::::default().build().execute_with(|| { - let transfer_amount = 100u128; - let xcm_to_weigh = Xcm::::builder_unsafe() - .withdraw_asset((Here, transfer_amount)) - .buy_execution((Here, transfer_amount), Unlimited) - .deposit_asset(AllCounted(1), [1u8; 32]) - .build(); - let versioned_xcm_to_weigh = VersionedXcm::from(xcm_to_weigh.clone().into()); - - // We first try calling it with a lower XCM version. - let lower_version_xcm_to_weigh = - versioned_xcm_to_weigh.clone().into_version(XCM_VERSION - 1).unwrap(); - let xcm_weight = Runtime::query_xcm_weight(lower_version_xcm_to_weigh); - assert!(xcm_weight.is_ok()); - let native_token: Location = Parent.into(); - let native_token_versioned = VersionedAssetId::from(AssetId(native_token)); - let lower_version_native_token = - native_token_versioned.clone().into_version(XCM_VERSION - 1).unwrap(); - let execution_fees = - Runtime::query_weight_to_asset_fee(xcm_weight.unwrap(), lower_version_native_token); - assert!(execution_fees.is_ok()); - - // Now we call it with the latest version. - let xcm_weight = Runtime::query_xcm_weight(versioned_xcm_to_weigh); - assert!(xcm_weight.is_ok()); - let execution_fees = - Runtime::query_weight_to_asset_fee(xcm_weight.unwrap(), native_token_versioned); - assert!(execution_fees.is_ok()); - - // If we call it with anything other than the native token it will error. - let non_existent_token: Location = Here.into(); - let non_existent_token_versioned = VersionedAssetId::from(AssetId(non_existent_token)); - let execution_fees = - Runtime::query_weight_to_asset_fee(xcm_weight.unwrap(), non_existent_token_versioned); - assert_eq!(execution_fees, Err(XcmPaymentApiError::AssetNotFound)); - }); -} diff --git a/cumulus/parachains/runtimes/testing/penpal/Cargo.toml b/cumulus/parachains/runtimes/testing/penpal/Cargo.toml index 5b17f4f57388..3a6b9d42f211 100644 --- a/cumulus/parachains/runtimes/testing/penpal/Cargo.toml +++ b/cumulus/parachains/runtimes/testing/penpal/Cargo.toml @@ -32,9 +32,6 @@ frame-system = { workspace = true } frame-system-benchmarking = { optional = true, workspace = true } frame-system-rpc-runtime-api = { workspace = true } frame-try-runtime = { optional = true, workspace = true } -pallet-asset-conversion = { workspace = true } -pallet-asset-tx-payment = { workspace = true } -pallet-assets = { workspace = true } pallet-aura = { workspace = true } pallet-authorship = { workspace = true } pallet-balances = { workspace = true } @@ -43,6 +40,9 @@ pallet-sudo = { workspace = true } pallet-timestamp = { workspace = true } pallet-transaction-payment = { workspace = true } pallet-transaction-payment-rpc-runtime-api = { workspace = true } +pallet-asset-tx-payment = { workspace = true } +pallet-assets = { workspace = true } +pallet-asset-conversion = { workspace = true } sp-api = { workspace = true } sp-block-builder = { workspace = true } sp-consensus-aura = { workspace = true } @@ -57,9 +57,9 @@ sp-transaction-pool = { workspace = true } sp-version = { workspace = true } # Polkadot +polkadot-primitives = { workspace = true } pallet-xcm = { workspace = true } polkadot-parachain-primitives = { workspace = true } -polkadot-primitives = { workspace = true } polkadot-runtime-common = { workspace = true } xcm = { workspace = true } xcm-builder = { workspace = true } @@ -67,8 +67,8 @@ xcm-executor = { workspace = true } xcm-runtime-apis = { workspace = true } # Cumulus -assets-common = { workspace = true } cumulus-pallet-aura-ext = { workspace = true } +pallet-message-queue = { workspace = true } cumulus-pallet-parachain-system = { workspace = true } cumulus-pallet-session-benchmarking = { workspace = true } cumulus-pallet-xcm = { workspace = true } @@ -76,9 +76,9 @@ cumulus-pallet-xcmp-queue = { workspace = true } cumulus-primitives-core = { workspace = true } cumulus-primitives-utility = { workspace = true } pallet-collator-selection = { workspace = true } -pallet-message-queue = { workspace = true } parachain-info = { workspace = true } parachains-common = { workspace = true } +assets-common = { workspace = true } snowbridge-router-primitives = { workspace = true } primitive-types = { version = "0.12.1", default-features = false, features = ["codec", "num-traits", "scale-info"] } @@ -175,7 +175,6 @@ runtime-benchmarks = [ "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", "xcm-runtime-apis/runtime-benchmarks", - "xcm/runtime-benchmarks", ] try-runtime = [ diff --git a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs index 51dc95bf2c71..b51670c792d6 100644 --- a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs @@ -1132,8 +1132,18 @@ impl_runtime_apis! { use cumulus_pallet_session_benchmarking::Pallet as SessionBench; impl cumulus_pallet_session_benchmarking::Config for Runtime {} - use frame_support::traits::WhitelistedStorageKeys; - let whitelist: Vec = AllPalletsWithSystem::whitelisted_storage_keys(); + let whitelist: Vec = vec![ + // Block Number + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(), + // Total Issuance + hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec().into(), + // Execution Phase + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec().into(), + // Event Count + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec().into(), + // System Events + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7").to_vec().into(), + ]; let mut batches = Vec::::new(); let params = (&config, &whitelist); diff --git a/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs b/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs index 10481d5d2ebc..375c3d509f48 100644 --- a/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs @@ -34,7 +34,7 @@ use core::marker::PhantomData; use frame_support::{ parameter_types, traits::{ - tokens::imbalance::ResolveAssetTo, ConstU32, Contains, ContainsPair, Equals, Everything, + tokens::imbalance::ResolveAssetTo, ConstU32, Contains, ContainsPair, Everything, EverythingBut, Get, Nothing, PalletInfoAccess, }, weights::Weight, @@ -210,7 +210,6 @@ pub type XcmOriginToTransactDispatchOrigin = ( ); parameter_types! { - pub const RootLocation: Location = Location::here(); // One XCM operation is 1_000_000_000 weight - almost certainly a conservative estimate. pub UnitWeightCost: Weight = Weight::from_parts(1_000_000_000, 64 * 1024); pub const MaxInstructions: u32 = 100; @@ -337,7 +336,6 @@ pub type TrustedReserves = ( pub type TrustedTeleporters = (AssetFromChain,); -pub type WaivedLocations = Equals; /// `AssetId`/`Balance` converter for `TrustBackedAssets`. pub type TrustBackedAssetsConvertedConcreteId = assets_common::TrustBackedAssetsConvertedConcreteId; @@ -401,7 +399,7 @@ impl xcm_executor::Config for XcmConfig { type AssetLocker = (); type AssetExchanger = PoolAssetsExchanger; type FeeManager = XcmFeeManagerFromComponents< - WaivedLocations, + (), SendXcmFeeToAccount, >; type MessageExporter = (); diff --git a/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml b/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml index e8761445f161..b0581c8d43ff 100644 --- a/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml +++ b/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml @@ -5,8 +5,6 @@ authors.workspace = true edition.workspace = true description = "Simple runtime used by the rococo parachain(s)" license = "Apache-2.0" -homepage.workspace = true -repository.workspace = true [lints] workspace = true @@ -43,14 +41,15 @@ sp-version = { workspace = true } # Polkadot pallet-xcm = { workspace = true } polkadot-parachain-primitives = { workspace = true } -polkadot-runtime-common = { workspace = true } xcm = { workspace = true } xcm-builder = { workspace = true } xcm-executor = { workspace = true } +polkadot-runtime-common = { workspace = true } # Cumulus cumulus-pallet-aura-ext = { workspace = true } -cumulus-pallet-parachain-system = { workspace = true } +pallet-message-queue = { workspace = true } +cumulus-pallet-parachain-system = { workspace = true, features = ["experimental-ump-signals"] } cumulus-pallet-xcm = { workspace = true } cumulus-pallet-xcmp-queue = { workspace = true } cumulus-ping = { workspace = true } @@ -58,10 +57,9 @@ cumulus-primitives-aura = { workspace = true } cumulus-primitives-core = { workspace = true } cumulus-primitives-storage-weight-reclaim = { workspace = true } cumulus-primitives-utility = { workspace = true } -pallet-message-queue = { workspace = true } -parachain-info = { workspace = true } parachains-common = { workspace = true } testnet-parachains-constants = { features = ["rococo"], workspace = true } +parachain-info = { workspace = true } [build-dependencies] substrate-wasm-builder = { optional = true, workspace = true, default-features = true } @@ -136,7 +134,6 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", - "xcm/runtime-benchmarks", ] # A feature that should be enabled when the runtime should be built for on-chain diff --git a/cumulus/polkadot-omni-node/Cargo.toml b/cumulus/polkadot-omni-node/Cargo.toml index 8b46bc882868..a736e1ef80c5 100644 --- a/cumulus/polkadot-omni-node/Cargo.toml +++ b/cumulus/polkadot-omni-node/Cargo.toml @@ -6,8 +6,6 @@ edition.workspace = true build = "build.rs" description = "Generic binary that can run a parachain node with u32 block number and Aura consensus" license = "Apache-2.0" -homepage.workspace = true -repository.workspace = true [lints] workspace = true diff --git a/cumulus/polkadot-omni-node/README.md b/cumulus/polkadot-omni-node/README.md index 015019961c9f..d87b3b63c407 100644 --- a/cumulus/polkadot-omni-node/README.md +++ b/cumulus/polkadot-omni-node/README.md @@ -49,10 +49,10 @@ chain-spec-builder create --relay-chain --para-id -r +polkadot-omni-node --chain ``` ## Useful links diff --git a/cumulus/polkadot-omni-node/lib/Cargo.toml b/cumulus/polkadot-omni-node/lib/Cargo.toml index 018fc88a2aea..a690229f1695 100644 --- a/cumulus/polkadot-omni-node/lib/Cargo.toml +++ b/cumulus/polkadot-omni-node/lib/Cargo.toml @@ -5,8 +5,6 @@ authors.workspace = true edition.workspace = true description = "Helper library that can be used to build a parachain node" license = "Apache-2.0" -homepage.workspace = true -repository.workspace = true [lints] workspace = true @@ -19,61 +17,55 @@ async-trait = { workspace = true } clap = { features = ["derive"], workspace = true } codec = { workspace = true, default-features = true } color-print = { workspace = true } -docify = { workspace = true } futures = { workspace = true } log = { workspace = true, default-features = true } serde = { features = ["derive"], workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } +docify = { workspace = true } # Local jsonrpsee = { features = ["server"], workspace = true } parachains-common = { workspace = true, default-features = true } -scale-info = { workspace = true } -subxt-metadata = { workspace = true, default-features = true } # Substrate frame-benchmarking = { optional = true, workspace = true, default-features = true } frame-benchmarking-cli = { workspace = true, default-features = true } -frame-support = { optional = true, workspace = true, default-features = true } -frame-system-rpc-runtime-api = { workspace = true, default-features = true } +sp-runtime = { workspace = true } +sp-core = { workspace = true, default-features = true } +sp-session = { workspace = true, default-features = true } frame-try-runtime = { optional = true, workspace = true, default-features = true } -pallet-transaction-payment = { workspace = true, default-features = true } -pallet-transaction-payment-rpc = { workspace = true, default-features = true } -pallet-transaction-payment-rpc-runtime-api = { workspace = true, default-features = true } -prometheus-endpoint = { workspace = true, default-features = true } -sc-basic-authorship = { workspace = true, default-features = true } -sc-chain-spec = { workspace = true, default-features = true } +sc-consensus = { workspace = true, default-features = true } +frame-support = { optional = true, workspace = true, default-features = true } sc-cli = { workspace = true, default-features = true } sc-client-api = { workspace = true, default-features = true } sc-client-db = { workspace = true, default-features = true } -sc-consensus = { workspace = true, default-features = true } -sc-consensus-manual-seal = { workspace = true, default-features = true } sc-executor = { workspace = true, default-features = true } -sc-network = { workspace = true, default-features = true } -sc-rpc = { workspace = true, default-features = true } -sc-runtime-utilities = { workspace = true, default-features = true } sc-service = { workspace = true, default-features = true } -sc-sysinfo = { workspace = true, default-features = true } sc-telemetry = { workspace = true, default-features = true } -sc-tracing = { workspace = true, default-features = true } sc-transaction-pool = { workspace = true, default-features = true } -sp-api = { workspace = true, default-features = true } -sp-block-builder = { workspace = true, default-features = true } -sp-consensus = { workspace = true, default-features = true } -sp-consensus-aura = { workspace = true, default-features = true } -sp-core = { workspace = true, default-features = true } -sp-crypto-hashing = { workspace = true } +sp-transaction-pool = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } +sc-basic-authorship = { workspace = true, default-features = true } +sp-timestamp = { workspace = true, default-features = true } sp-genesis-builder = { workspace = true } -sp-inherents = { workspace = true, default-features = true } +sp-block-builder = { workspace = true, default-features = true } sp-keystore = { workspace = true, default-features = true } -sp-runtime = { workspace = true } -sp-session = { workspace = true, default-features = true } -sp-storage = { workspace = true, default-features = true } -sp-timestamp = { workspace = true, default-features = true } -sp-transaction-pool = { workspace = true, default-features = true } +sc-chain-spec = { workspace = true, default-features = true } +sc-rpc = { workspace = true, default-features = true } sp-version = { workspace = true, default-features = true } sp-weights = { workspace = true, default-features = true } +sc-tracing = { workspace = true, default-features = true } +frame-system-rpc-runtime-api = { workspace = true, default-features = true } +pallet-transaction-payment = { workspace = true, default-features = true } +pallet-transaction-payment-rpc-runtime-api = { workspace = true, default-features = true } +sp-inherents = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-consensus-aura = { workspace = true, default-features = true } +sc-consensus-manual-seal = { workspace = true, default-features = true } +sc-sysinfo = { workspace = true, default-features = true } +prometheus-endpoint = { workspace = true, default-features = true } substrate-frame-rpc-system = { workspace = true, default-features = true } +pallet-transaction-payment-rpc = { workspace = true, default-features = true } substrate-state-trie-migration-rpc = { workspace = true, default-features = true } # Polkadot @@ -84,9 +76,9 @@ polkadot-primitives = { workspace = true, default-features = true } cumulus-client-cli = { workspace = true, default-features = true } cumulus-client-collator = { workspace = true, default-features = true } cumulus-client-consensus-aura = { workspace = true, default-features = true } +cumulus-client-consensus-relay-chain = { workspace = true, default-features = true } cumulus-client-consensus-common = { workspace = true, default-features = true } cumulus-client-consensus-proposer = { workspace = true, default-features = true } -cumulus-client-consensus-relay-chain = { workspace = true, default-features = true } cumulus-client-parachain-inherent = { workspace = true, default-features = true } cumulus-client-service = { workspace = true, default-features = true } cumulus-primitives-aura = { workspace = true, default-features = true } @@ -96,15 +88,18 @@ futures-timer = "3.0.3" [dev-dependencies] assert_cmd = { workspace = true } -cumulus-test-runtime = { workspace = true } nix = { features = ["signal"], workspace = true } tokio = { version = "1.32.0", features = ["macros", "parking_lot", "time"] } wait-timeout = { workspace = true } [features] default = [] -rococo-native = ["polkadot-cli/rococo-native"] -westend-native = ["polkadot-cli/westend-native"] +rococo-native = [ + "polkadot-cli/rococo-native", +] +westend-native = [ + "polkadot-cli/westend-native", +] runtime-benchmarks = [ "cumulus-primitives-core/runtime-benchmarks", "frame-benchmarking-cli/runtime-benchmarks", diff --git a/cumulus/polkadot-omni-node/lib/src/cli.rs b/cumulus/polkadot-omni-node/lib/src/cli.rs index 9c4e2561592d..dc59c185d909 100644 --- a/cumulus/polkadot-omni-node/lib/src/cli.rs +++ b/cumulus/polkadot-omni-node/lib/src/cli.rs @@ -126,14 +126,9 @@ pub struct Cli { /// Start a dev node that produces a block each `dev_block_time` ms. /// - /// This is a dev option. It enables a manual sealing, meaning blocks are produced manually - /// rather than being part of an actual network consensus process. Using the option won't - /// result in starting or connecting to a parachain network. The resulting node will work on - /// its own, running the wasm blob and artificially producing a block each `dev_block_time` ms, - /// as if it was part of a parachain. - /// - /// The `--dev` flag sets the `dev_block_time` to a default value of 3000ms unless explicitly - /// provided. + /// This is a dev option, and it won't result in starting or connecting to a parachain network. + /// The resulting node will work on its own, running the wasm blob and artificially producing + /// a block each `dev_block_time` ms, as if it was part of a parachain. #[arg(long)] pub dev_block_time: Option, diff --git a/cumulus/polkadot-omni-node/lib/src/command.rs b/cumulus/polkadot-omni-node/lib/src/command.rs index fe7f7cac0971..cf283819966f 100644 --- a/cumulus/polkadot-omni-node/lib/src/command.rs +++ b/cumulus/polkadot-omni-node/lib/src/command.rs @@ -34,13 +34,11 @@ use cumulus_client_service::storage_proof_size::HostFunctions as ReclaimHostFunc use cumulus_primitives_core::ParaId; use frame_benchmarking_cli::{BenchmarkCmd, SUBSTRATE_REFERENCE_HARDWARE}; use log::info; -use sc_cli::{CliConfiguration, Result, SubstrateCli}; +use sc_cli::{Result, SubstrateCli}; use sp_runtime::traits::AccountIdConversion; #[cfg(feature = "runtime-benchmarks")] use sp_runtime::traits::HashingFor; -const DEFAULT_DEV_BLOCK_TIME_MS: u64 = 3000; - /// Structure that can be used in order to provide customizers for different functionalities of the /// node binary that is being built using this library. pub struct RunConfig { @@ -232,19 +230,10 @@ pub fn run(cmd_config: RunConfig) -> Result<() .ok_or("Could not find parachain extension in chain-spec.")?, ); - if cli.run.base.is_dev()? { - // Set default dev block time to 3000ms if not set. - // TODO: take block time from AURA config if set. - let dev_block_time = cli.dev_block_time.unwrap_or(DEFAULT_DEV_BLOCK_TIME_MS); - return node_spec - .start_manual_seal_node(config, para_id, dev_block_time) - .map_err(Into::into); - } - if let Some(dev_block_time) = cli.dev_block_time { return node_spec .start_manual_seal_node(config, para_id, dev_block_time) - .map_err(Into::into); + .map_err(Into::into) } // If Statemint (Statemine, Westmint, Rockmine) DB exists and we're using the diff --git a/cumulus/polkadot-omni-node/lib/src/common/runtime.rs b/cumulus/polkadot-omni-node/lib/src/common/runtime.rs index fcc1d7f0643e..509d13b9d7a2 100644 --- a/cumulus/polkadot-omni-node/lib/src/common/runtime.rs +++ b/cumulus/polkadot-omni-node/lib/src/common/runtime.rs @@ -16,19 +16,7 @@ //! Runtime parameters. -use codec::Decode; -use cumulus_client_service::ParachainHostFunctions; use sc_chain_spec::ChainSpec; -use sc_executor::WasmExecutor; -use sc_runtime_utilities::fetch_latest_metadata_from_code_blob; -use scale_info::{form::PortableForm, TypeDef, TypeDefPrimitive}; -use std::fmt::Display; -use subxt_metadata::{Metadata, StorageEntryType}; - -/// Expected parachain system pallet runtime type name. -pub const DEFAULT_PARACHAIN_SYSTEM_PALLET_NAME: &str = "ParachainSystem"; -/// Expected frame system pallet runtime type name. -pub const DEFAULT_FRAME_SYSTEM_PALLET_NAME: &str = "System"; /// The Aura ID used by the Aura consensus #[derive(PartialEq)] @@ -47,7 +35,7 @@ pub enum Consensus { } /// The choice of block number for the parachain omni-node. -#[derive(PartialEq, Debug)] +#[derive(PartialEq)] pub enum BlockNumber { /// u32 U32, @@ -55,34 +43,6 @@ pub enum BlockNumber { U64, } -impl Display for BlockNumber { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - BlockNumber::U32 => write!(f, "u32"), - BlockNumber::U64 => write!(f, "u64"), - } - } -} - -impl Into for BlockNumber { - fn into(self) -> TypeDefPrimitive { - match self { - BlockNumber::U32 => TypeDefPrimitive::U32, - BlockNumber::U64 => TypeDefPrimitive::U64, - } - } -} - -impl BlockNumber { - fn from_type_def(type_def: &TypeDef) -> Option { - match type_def { - TypeDef::Primitive(TypeDefPrimitive::U32) => Some(BlockNumber::U32), - TypeDef::Primitive(TypeDefPrimitive::U64) => Some(BlockNumber::U64), - _ => None, - } - } -} - /// Helper enum listing the supported Runtime types #[derive(PartialEq)] pub enum Runtime { @@ -102,112 +62,7 @@ pub trait RuntimeResolver { pub struct DefaultRuntimeResolver; impl RuntimeResolver for DefaultRuntimeResolver { - fn runtime(&self, chain_spec: &dyn ChainSpec) -> sc_cli::Result { - let Ok(metadata_inspector) = MetadataInspector::new(chain_spec) else { - log::info!("Unable to check metadata. Skipping metadata checks. Metadata checks are supported for metadata versions v14 and higher."); - return Ok(Runtime::Omni(BlockNumber::U32, Consensus::Aura(AuraConsensusId::Sr25519))) - }; - - let block_number = match metadata_inspector.block_number() { - Some(inner) => inner, - None => { - log::warn!( - r#"⚠️ There isn't a runtime type named `System`, corresponding to the `frame-system` - pallet (https://docs.rs/frame-system/latest/frame_system/). Please check Omni Node docs for runtime conventions: - https://paritytech.github.io/polkadot-sdk/master/polkadot_sdk_docs/reference_docs/omni_node/index.html#runtime-conventions. - Note: We'll assume a block number size of `u32`."# - ); - BlockNumber::U32 - }, - }; - - if !metadata_inspector.pallet_exists(DEFAULT_PARACHAIN_SYSTEM_PALLET_NAME) { - log::warn!( - r#"⚠️ The parachain system pallet (https://docs.rs/crate/cumulus-pallet-parachain-system/latest) is - missing from the runtime’s metadata. Please check Omni Node docs for runtime conventions: - https://paritytech.github.io/polkadot-sdk/master/polkadot_sdk_docs/reference_docs/omni_node/index.html#runtime-conventions."# - ); - } - - Ok(Runtime::Omni(block_number, Consensus::Aura(AuraConsensusId::Sr25519))) - } -} - -struct MetadataInspector(Metadata); - -impl MetadataInspector { - fn new(chain_spec: &dyn ChainSpec) -> Result { - MetadataInspector::fetch_metadata(chain_spec).map(MetadataInspector) - } - - fn pallet_exists(&self, name: &str) -> bool { - self.0.pallet_by_name(name).is_some() - } - - fn block_number(&self) -> Option { - let pallet_metadata = self.0.pallet_by_name(DEFAULT_FRAME_SYSTEM_PALLET_NAME); - pallet_metadata - .and_then(|inner| inner.storage()) - .and_then(|inner| inner.entry_by_name("Number")) - .and_then(|number_ty| match number_ty.entry_type() { - StorageEntryType::Plain(ty_id) => Some(ty_id), - _ => None, - }) - .and_then(|ty_id| self.0.types().resolve(*ty_id)) - .and_then(|portable_type| BlockNumber::from_type_def(&portable_type.type_def)) - } - - fn fetch_metadata(chain_spec: &dyn ChainSpec) -> Result { - let mut storage = chain_spec.build_storage()?; - let code_bytes = storage - .top - .remove(sp_storage::well_known_keys::CODE) - .ok_or("chain spec genesis does not contain code")?; - let opaque_metadata = fetch_latest_metadata_from_code_blob( - &WasmExecutor::::builder() - .with_allow_missing_host_functions(true) - .build(), - sp_runtime::Cow::Borrowed(code_bytes.as_slice()), - ) - .map_err(|err| err.to_string())?; - - Metadata::decode(&mut (*opaque_metadata).as_slice()).map_err(Into::into) - } -} - -#[cfg(test)] -mod tests { - use crate::runtime::{ - BlockNumber, MetadataInspector, DEFAULT_FRAME_SYSTEM_PALLET_NAME, - DEFAULT_PARACHAIN_SYSTEM_PALLET_NAME, - }; - use codec::Decode; - use cumulus_client_service::ParachainHostFunctions; - use sc_executor::WasmExecutor; - use sc_runtime_utilities::fetch_latest_metadata_from_code_blob; - - fn cumulus_test_runtime_metadata() -> subxt_metadata::Metadata { - let opaque_metadata = fetch_latest_metadata_from_code_blob( - &WasmExecutor::::builder() - .with_allow_missing_host_functions(true) - .build(), - sp_runtime::Cow::Borrowed(cumulus_test_runtime::WASM_BINARY.unwrap()), - ) - .unwrap(); - - subxt_metadata::Metadata::decode(&mut (*opaque_metadata).as_slice()).unwrap() - } - - #[test] - fn test_pallet_exists() { - let metadata_inspector = MetadataInspector(cumulus_test_runtime_metadata()); - assert!(metadata_inspector.pallet_exists(DEFAULT_PARACHAIN_SYSTEM_PALLET_NAME)); - assert!(metadata_inspector.pallet_exists(DEFAULT_FRAME_SYSTEM_PALLET_NAME)); - } - - #[test] - fn test_runtime_block_number() { - let metadata_inspector = MetadataInspector(cumulus_test_runtime_metadata()); - assert_eq!(metadata_inspector.block_number().unwrap(), BlockNumber::U32); + fn runtime(&self, _chain_spec: &dyn ChainSpec) -> sc_cli::Result { + Ok(Runtime::Omni(BlockNumber::U32, Consensus::Aura(AuraConsensusId::Sr25519))) } } diff --git a/cumulus/polkadot-omni-node/lib/src/common/spec.rs b/cumulus/polkadot-omni-node/lib/src/common/spec.rs index 868368f3ca1a..8397cb778dcf 100644 --- a/cumulus/polkadot-omni-node/lib/src/common/spec.rs +++ b/cumulus/polkadot-omni-node/lib/src/common/spec.rs @@ -44,28 +44,23 @@ use sc_transaction_pool::TransactionPoolHandle; use sp_keystore::KeystorePtr; use std::{future::Future, pin::Pin, sync::Arc, time::Duration}; -pub(crate) trait BuildImportQueue< - Block: BlockT, - RuntimeApi, - BlockImport: sc_consensus::BlockImport, -> -{ +pub(crate) trait BuildImportQueue { fn build_import_queue( client: Arc>, - block_import: ParachainBlockImport, + block_import: ParachainBlockImport, config: &Configuration, telemetry_handle: Option, task_manager: &TaskManager, ) -> sc_service::error::Result>; } -pub(crate) trait StartConsensus +pub(crate) trait StartConsensus where RuntimeApi: ConstructNodeRuntimeApi>, { fn start_consensus( client: Arc>, - block_import: ParachainBlockImport, + block_import: ParachainBlockImport, prometheus_registry: Option<&Registry>, telemetry: Option, task_manager: &TaskManager, @@ -79,7 +74,6 @@ where announce_block: Arc>) + Send + Sync>, backend: Arc>, node_extra_args: NodeExtraArgs, - block_import_extra_return_value: BIAuxiliaryData, ) -> Result<(), sc_service::Error>; } @@ -98,31 +92,6 @@ fn warn_if_slow_hardware(hwbench: &sc_sysinfo::HwBench) { } } -pub(crate) trait InitBlockImport { - type BlockImport: sc_consensus::BlockImport + Clone + Send + Sync; - type BlockImportAuxiliaryData; - - fn init_block_import( - client: Arc>, - ) -> sc_service::error::Result<(Self::BlockImport, Self::BlockImportAuxiliaryData)>; -} - -pub(crate) struct ClientBlockImport; - -impl InitBlockImport for ClientBlockImport -where - RuntimeApi: Send + ConstructNodeRuntimeApi>, -{ - type BlockImport = Arc>; - type BlockImportAuxiliaryData = (); - - fn init_block_import( - client: Arc>, - ) -> sc_service::error::Result<(Self::BlockImport, Self::BlockImportAuxiliaryData)> { - Ok((client.clone(), ())) - } -} - pub(crate) trait BaseNodeSpec { type Block: NodeBlock; @@ -131,13 +100,7 @@ pub(crate) trait BaseNodeSpec { ParachainClient, >; - type BuildImportQueue: BuildImportQueue< - Self::Block, - Self::RuntimeApi, - >::BlockImport, - >; - - type InitBlockImport: self::InitBlockImport; + type BuildImportQueue: BuildImportQueue; /// Starts a `ServiceBuilder` for a full service. /// @@ -145,14 +108,7 @@ pub(crate) trait BaseNodeSpec { /// be able to perform chain operations. fn new_partial( config: &Configuration, - ) -> sc_service::error::Result< - ParachainService< - Self::Block, - Self::RuntimeApi, - >::BlockImport, - >::BlockImportAuxiliaryData - > - >{ + ) -> sc_service::error::Result> { let telemetry = config .telemetry_endpoints .clone() @@ -204,10 +160,7 @@ pub(crate) trait BaseNodeSpec { .build(), ); - let (block_import, block_import_auxiliary_data) = - Self::InitBlockImport::init_block_import(client.clone())?; - - let block_import = ParachainBlockImport::new(block_import, backend.clone()); + let block_import = ParachainBlockImport::new(client.clone(), backend.clone()); let import_queue = Self::BuildImportQueue::build_import_queue( client.clone(), @@ -225,7 +178,7 @@ pub(crate) trait BaseNodeSpec { task_manager, transaction_pool, select_chain: (), - other: (block_import, telemetry, telemetry_worker_handle, block_import_auxiliary_data), + other: (block_import, telemetry, telemetry_worker_handle), }) } } @@ -237,12 +190,7 @@ pub(crate) trait NodeSpec: BaseNodeSpec { TransactionPoolHandle>, >; - type StartConsensus: StartConsensus< - Self::Block, - Self::RuntimeApi, - >::BlockImport, - >::BlockImportAuxiliaryData, - >; + type StartConsensus: StartConsensus; const SYBIL_RESISTANCE: CollatorSybilResistance; @@ -260,153 +208,153 @@ pub(crate) trait NodeSpec: BaseNodeSpec { where Net: NetworkBackend, { - let fut = async move { - let parachain_config = prepare_node_config(parachain_config); - - let params = Self::new_partial(¶chain_config)?; - let (block_import, mut telemetry, telemetry_worker_handle, block_import_auxiliary_data) = - params.other; - let client = params.client.clone(); - let backend = params.backend.clone(); - let mut task_manager = params.task_manager; - let (relay_chain_interface, collator_key) = build_relay_chain_interface( - polkadot_config, - ¶chain_config, - telemetry_worker_handle, - &mut task_manager, - collator_options.clone(), - hwbench.clone(), - ) - .await - .map_err(|e| sc_service::Error::Application(Box::new(e) as Box<_>))?; - - let validator = parachain_config.role.is_authority(); - let prometheus_registry = parachain_config.prometheus_registry().cloned(); - let transaction_pool = params.transaction_pool.clone(); - let import_queue_service = params.import_queue.service(); - let net_config = FullNetworkConfiguration::<_, _, Net>::new( - ¶chain_config.network, - prometheus_registry.clone(), - ); - - let (network, system_rpc_tx, tx_handler_controller, sync_service) = - build_network(BuildNetworkParams { - parachain_config: ¶chain_config, - net_config, + Box::pin( + async move { + let parachain_config = prepare_node_config(parachain_config); + + let params = Self::new_partial(¶chain_config)?; + let (block_import, mut telemetry, telemetry_worker_handle) = params.other; + + let client = params.client.clone(); + let backend = params.backend.clone(); + + let mut task_manager = params.task_manager; + let (relay_chain_interface, collator_key) = build_relay_chain_interface( + polkadot_config, + ¶chain_config, + telemetry_worker_handle, + &mut task_manager, + collator_options.clone(), + hwbench.clone(), + ) + .await + .map_err(|e| sc_service::Error::Application(Box::new(e) as Box<_>))?; + + let validator = parachain_config.role.is_authority(); + let prometheus_registry = parachain_config.prometheus_registry().cloned(); + let transaction_pool = params.transaction_pool.clone(); + let import_queue_service = params.import_queue.service(); + let net_config = FullNetworkConfiguration::<_, _, Net>::new( + ¶chain_config.network, + prometheus_registry.clone(), + ); + + let (network, system_rpc_tx, tx_handler_controller, start_network, sync_service) = + build_network(BuildNetworkParams { + parachain_config: ¶chain_config, + net_config, + client: client.clone(), + transaction_pool: transaction_pool.clone(), + para_id, + spawn_handle: task_manager.spawn_handle(), + relay_chain_interface: relay_chain_interface.clone(), + import_queue: params.import_queue, + sybil_resistance_level: Self::SYBIL_RESISTANCE, + }) + .await?; + + let rpc_builder = { + let client = client.clone(); + let transaction_pool = transaction_pool.clone(); + let backend_for_rpc = backend.clone(); + + Box::new(move |_| { + Self::BuildRpcExtensions::build_rpc_extensions( + client.clone(), + backend_for_rpc.clone(), + transaction_pool.clone(), + ) + }) + }; + + sc_service::spawn_tasks(sc_service::SpawnTasksParams { + rpc_builder, client: client.clone(), transaction_pool: transaction_pool.clone(), + task_manager: &mut task_manager, + config: parachain_config, + keystore: params.keystore_container.keystore(), + backend: backend.clone(), + network: network.clone(), + sync_service: sync_service.clone(), + system_rpc_tx, + tx_handler_controller, + telemetry: telemetry.as_mut(), + })?; + + if let Some(hwbench) = hwbench { + sc_sysinfo::print_hwbench(&hwbench); + if validator { + warn_if_slow_hardware(&hwbench); + } + + if let Some(ref mut telemetry) = telemetry { + let telemetry_handle = telemetry.handle(); + task_manager.spawn_handle().spawn( + "telemetry_hwbench", + None, + sc_sysinfo::initialize_hwbench_telemetry(telemetry_handle, hwbench), + ); + } + } + + let announce_block = { + let sync_service = sync_service.clone(); + Arc::new(move |hash, data| sync_service.announce_block(hash, data)) + }; + + let relay_chain_slot_duration = Duration::from_secs(6); + + let overseer_handle = relay_chain_interface + .overseer_handle() + .map_err(|e| sc_service::Error::Application(Box::new(e)))?; + + start_relay_chain_tasks(StartRelayChainTasksParams { + client: client.clone(), + announce_block: announce_block.clone(), para_id, - spawn_handle: task_manager.spawn_handle(), relay_chain_interface: relay_chain_interface.clone(), - import_queue: params.import_queue, - sybil_resistance_level: Self::SYBIL_RESISTANCE, - }) - .await?; - - let rpc_builder = { - let client = client.clone(); - let transaction_pool = transaction_pool.clone(); - let backend_for_rpc = backend.clone(); - - Box::new(move |_| { - Self::BuildRpcExtensions::build_rpc_extensions( - client.clone(), - backend_for_rpc.clone(), - transaction_pool.clone(), - ) - }) - }; - - sc_service::spawn_tasks(sc_service::SpawnTasksParams { - rpc_builder, - client: client.clone(), - transaction_pool: transaction_pool.clone(), - task_manager: &mut task_manager, - config: parachain_config, - keystore: params.keystore_container.keystore(), - backend: backend.clone(), - network: network.clone(), - sync_service: sync_service.clone(), - system_rpc_tx, - tx_handler_controller, - telemetry: telemetry.as_mut(), - })?; - - if let Some(hwbench) = hwbench { - sc_sysinfo::print_hwbench(&hwbench); + task_manager: &mut task_manager, + da_recovery_profile: if validator { + DARecoveryProfile::Collator + } else { + DARecoveryProfile::FullNode + }, + import_queue: import_queue_service, + relay_chain_slot_duration, + recovery_handle: Box::new(overseer_handle.clone()), + sync_service, + })?; + if validator { - warn_if_slow_hardware(&hwbench); + Self::StartConsensus::start_consensus( + client.clone(), + block_import, + prometheus_registry.as_ref(), + telemetry.as_ref().map(|t| t.handle()), + &task_manager, + relay_chain_interface.clone(), + transaction_pool, + params.keystore_container.keystore(), + relay_chain_slot_duration, + para_id, + collator_key.expect("Command line arguments do not allow this. qed"), + overseer_handle, + announce_block, + backend.clone(), + node_extra_args, + )?; } - if let Some(ref mut telemetry) = telemetry { - let telemetry_handle = telemetry.handle(); - task_manager.spawn_handle().spawn( - "telemetry_hwbench", - None, - sc_sysinfo::initialize_hwbench_telemetry(telemetry_handle, hwbench), - ); - } - } + start_network.start_network(); - let announce_block = { - let sync_service = sync_service.clone(); - Arc::new(move |hash, data| sync_service.announce_block(hash, data)) - }; - - let relay_chain_slot_duration = Duration::from_secs(6); - - let overseer_handle = relay_chain_interface - .overseer_handle() - .map_err(|e| sc_service::Error::Application(Box::new(e)))?; - - start_relay_chain_tasks(StartRelayChainTasksParams { - client: client.clone(), - announce_block: announce_block.clone(), - para_id, - relay_chain_interface: relay_chain_interface.clone(), - task_manager: &mut task_manager, - da_recovery_profile: if validator { - DARecoveryProfile::Collator - } else { - DARecoveryProfile::FullNode - }, - import_queue: import_queue_service, - relay_chain_slot_duration, - recovery_handle: Box::new(overseer_handle.clone()), - sync_service, - })?; - - if validator { - Self::StartConsensus::start_consensus( - client.clone(), - block_import, - prometheus_registry.as_ref(), - telemetry.as_ref().map(|t| t.handle()), - &task_manager, - relay_chain_interface.clone(), - transaction_pool, - params.keystore_container.keystore(), - relay_chain_slot_duration, - para_id, - collator_key.expect("Command line arguments do not allow this. qed"), - overseer_handle, - announce_block, - backend.clone(), - node_extra_args, - block_import_auxiliary_data, - )?; + Ok(task_manager) } - - Ok(task_manager) - }; - - Box::pin(Instrument::instrument( - fut, - sc_tracing::tracing::info_span!( + .instrument(sc_tracing::tracing::info_span!( sc_tracing::logging::PREFIX_LOG_SPAN, - name = "Parachain" - ), - )) + name = "Parachain", + )), + ) } } diff --git a/cumulus/polkadot-omni-node/lib/src/common/types.rs b/cumulus/polkadot-omni-node/lib/src/common/types.rs index 978368be2584..4bc58dc9db7e 100644 --- a/cumulus/polkadot-omni-node/lib/src/common/types.rs +++ b/cumulus/polkadot-omni-node/lib/src/common/types.rs @@ -22,6 +22,7 @@ use sc_service::{PartialComponents, TFullBackend, TFullClient}; use sc_telemetry::{Telemetry, TelemetryWorkerHandle}; use sc_transaction_pool::TransactionPoolHandle; use sp_runtime::{generic, traits::BlakeTwo256}; +use std::sync::Arc; pub use parachains_common::{AccountId, Balance, Hash, Nonce}; @@ -41,20 +42,15 @@ pub type ParachainClient = pub type ParachainBackend = TFullBackend; -pub type ParachainBlockImport = - TParachainBlockImport>; +pub type ParachainBlockImport = + TParachainBlockImport>, ParachainBackend>; /// Assembly of PartialComponents (enough to run chain ops subcommands) -pub type ParachainService = PartialComponents< +pub type ParachainService = PartialComponents< ParachainClient, ParachainBackend, (), DefaultImportQueue, TransactionPoolHandle>, - ( - ParachainBlockImport, - Option, - Option, - BIExtraReturnValue, - ), + (ParachainBlockImport, Option, Option), >; diff --git a/cumulus/polkadot-omni-node/lib/src/nodes/aura.rs b/cumulus/polkadot-omni-node/lib/src/nodes/aura.rs index 816f76117a26..ec5d0a439ec4 100644 --- a/cumulus/polkadot-omni-node/lib/src/nodes/aura.rs +++ b/cumulus/polkadot-omni-node/lib/src/nodes/aura.rs @@ -18,10 +18,7 @@ use crate::{ common::{ aura::{AuraIdT, AuraRuntimeApi}, rpc::BuildParachainRpcExtensions, - spec::{ - BaseNodeSpec, BuildImportQueue, ClientBlockImport, InitBlockImport, NodeSpec, - StartConsensus, - }, + spec::{BaseNodeSpec, BuildImportQueue, NodeSpec, StartConsensus}, types::{ AccountId, Balance, Hash, Nonce, ParachainBackend, ParachainBlockImport, ParachainClient, @@ -33,14 +30,11 @@ use crate::{ use cumulus_client_collator::service::{ CollatorService, ServiceInterface as CollatorServiceInterface, }; +use cumulus_client_consensus_aura::collators::lookahead::{self as aura, Params as AuraParams}; #[docify::export(slot_based_colator_import)] use cumulus_client_consensus_aura::collators::slot_based::{ self as slot_based, Params as SlotBasedParams, }; -use cumulus_client_consensus_aura::collators::{ - lookahead::{self as aura, Params as AuraParams}, - slot_based::{SlotBasedBlockImport, SlotBasedBlockImportHandle}, -}; use cumulus_client_consensus_proposer::{Proposer, ProposerInterface}; use cumulus_client_consensus_relay_chain::Verifier as RelayChainVerifier; #[allow(deprecated)] @@ -60,7 +54,6 @@ use sc_service::{Configuration, Error, TaskManager}; use sc_telemetry::TelemetryHandle; use sc_transaction_pool::TransactionPoolHandle; use sp_api::ProvideRuntimeApi; -use sp_core::traits::SpawnNamed; use sp_inherents::CreateInherentDataProviders; use sp_keystore::KeystorePtr; use sp_runtime::{ @@ -97,23 +90,20 @@ where /// Build the import queue for parachain runtimes that started with relay chain consensus and /// switched to aura. -pub(crate) struct BuildRelayToAuraImportQueue( - PhantomData<(Block, RuntimeApi, AuraId, BlockImport)>, +pub(crate) struct BuildRelayToAuraImportQueue( + PhantomData<(Block, RuntimeApi, AuraId)>, ); -impl - BuildImportQueue - for BuildRelayToAuraImportQueue +impl BuildImportQueue + for BuildRelayToAuraImportQueue where RuntimeApi: ConstructNodeRuntimeApi>, RuntimeApi::RuntimeApi: AuraRuntimeApi, AuraId: AuraIdT + Sync, - BlockImport: - sc_consensus::BlockImport + Send + Sync + 'static, { fn build_import_queue( client: Arc>, - block_import: ParachainBlockImport, + block_import: ParachainBlockImport, config: &Configuration, telemetry_handle: Option, task_manager: &TaskManager, @@ -168,20 +158,20 @@ where /// Uses the lookahead collator to support async backing. /// /// Start an aura powered parachain node. Some system chains use this. -pub(crate) struct AuraNode( - pub PhantomData<(Block, RuntimeApi, AuraId, StartConsensus, InitBlockImport)>, +pub(crate) struct AuraNode( + pub PhantomData<(Block, RuntimeApi, AuraId, StartConsensus)>, ); -impl Default - for AuraNode +impl Default + for AuraNode { fn default() -> Self { Self(Default::default()) } } -impl BaseNodeSpec - for AuraNode +impl BaseNodeSpec + for AuraNode where Block: NodeBlock, RuntimeApi: ConstructNodeRuntimeApi>, @@ -189,19 +179,14 @@ where + pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi + substrate_frame_rpc_system::AccountNonceApi, AuraId: AuraIdT + Sync, - InitBlockImport: self::InitBlockImport + Send, - InitBlockImport::BlockImport: - sc_consensus::BlockImport + 'static, { type Block = Block; type RuntimeApi = RuntimeApi; - type BuildImportQueue = - BuildRelayToAuraImportQueue; - type InitBlockImport = InitBlockImport; + type BuildImportQueue = BuildRelayToAuraImportQueue; } -impl NodeSpec - for AuraNode +impl NodeSpec + for AuraNode where Block: NodeBlock, RuntimeApi: ConstructNodeRuntimeApi>, @@ -209,15 +194,7 @@ where + pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi + substrate_frame_rpc_system::AccountNonceApi, AuraId: AuraIdT + Sync, - StartConsensus: self::StartConsensus< - Block, - RuntimeApi, - InitBlockImport::BlockImport, - InitBlockImport::BlockImportAuxiliaryData, - > + 'static, - InitBlockImport: self::InitBlockImport + Send, - InitBlockImport::BlockImport: - sc_consensus::BlockImport + 'static, + StartConsensus: self::StartConsensus + 'static, { type BuildRpcExtensions = BuildParachainRpcExtensions; type StartConsensus = StartConsensus; @@ -241,7 +218,6 @@ where RuntimeApi, AuraId, StartSlotBasedAuraConsensus, - StartSlotBasedAuraConsensus, >::default()) } else { Box::new(AuraNode::< @@ -249,7 +225,6 @@ where RuntimeApi, AuraId, StartLookaheadAuraConsensus, - ClientBlockImport, >::default()) } } @@ -267,17 +242,9 @@ where AuraId: AuraIdT + Sync, { #[docify::export_content] - fn launch_slot_based_collator( + fn launch_slot_based_collator( params: SlotBasedParams< - Block, - ParachainBlockImport< - Block, - SlotBasedBlockImport< - Block, - Arc>, - ParachainClient, - >, - >, + ParachainBlockImport, CIDP, ParachainClient, ParachainBackend, @@ -285,31 +252,33 @@ where CHP, Proposer, CS, - Spawner, >, + task_manager: &TaskManager, ) where CIDP: CreateInherentDataProviders + 'static, CIDP::InherentDataProviders: Send, CHP: cumulus_client_consensus_common::ValidationCodeHashProvider + Send + 'static, Proposer: ProposerInterface + Send + Sync + 'static, CS: CollatorServiceInterface + Send + Sync + Clone + 'static, - Spawner: SpawnNamed, { - slot_based::run::::Pair, _, _, _, _, _, _, _, _, _>(params); + let (collation_future, block_builder_future) = + slot_based::run::::Pair, _, _, _, _, _, _, _, _>(params); + + task_manager.spawn_essential_handle().spawn( + "collation-task", + Some("parachain-block-authoring"), + collation_future, + ); + task_manager.spawn_essential_handle().spawn( + "block-builder-task", + Some("parachain-block-authoring"), + block_builder_future, + ); } } -impl, RuntimeApi, AuraId> - StartConsensus< - Block, - RuntimeApi, - SlotBasedBlockImport< - Block, - Arc>, - ParachainClient, - >, - SlotBasedBlockImportHandle, - > for StartSlotBasedAuraConsensus +impl, RuntimeApi, AuraId> StartConsensus + for StartSlotBasedAuraConsensus where RuntimeApi: ConstructNodeRuntimeApi>, RuntimeApi::RuntimeApi: AuraRuntimeApi, @@ -317,14 +286,7 @@ where { fn start_consensus( client: Arc>, - block_import: ParachainBlockImport< - Block, - SlotBasedBlockImport< - Block, - Arc>, - ParachainClient, - >, - >, + block_import: ParachainBlockImport, prometheus_registry: Option<&Registry>, telemetry: Option, task_manager: &TaskManager, @@ -338,7 +300,6 @@ where announce_block: Arc>) + Send + Sync>, backend: Arc>, _node_extra_args: NodeExtraArgs, - block_import_handle: SlotBasedBlockImportHandle, ) -> Result<(), Error> { let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording( task_manager.spawn_handle(), @@ -374,39 +335,16 @@ where authoring_duration: Duration::from_millis(2000), reinitialize: false, slot_drift: Duration::from_secs(1), - block_import_handle, - spawner: task_manager.spawn_handle(), }; // We have a separate function only to be able to use `docify::export` on this piece of // code. - Self::launch_slot_based_collator(params); + Self::launch_slot_based_collator(params, task_manager); Ok(()) } } -impl, RuntimeApi, AuraId> InitBlockImport - for StartSlotBasedAuraConsensus -where - RuntimeApi: ConstructNodeRuntimeApi>, - RuntimeApi::RuntimeApi: AuraRuntimeApi, - AuraId: AuraIdT + Sync, -{ - type BlockImport = SlotBasedBlockImport< - Block, - Arc>, - ParachainClient, - >; - type BlockImportAuxiliaryData = SlotBasedBlockImportHandle; - - fn init_block_import( - client: Arc>, - ) -> sc_service::error::Result<(Self::BlockImport, Self::BlockImportAuxiliaryData)> { - Ok(SlotBasedBlockImport::new(client.clone(), client)) - } -} - /// Wait for the Aura runtime API to appear on chain. /// This is useful for chains that started out without Aura. Components that /// are depending on Aura functionality will wait until Aura appears in the runtime. @@ -435,8 +373,7 @@ pub(crate) struct StartLookaheadAuraConsensus( PhantomData<(Block, RuntimeApi, AuraId)>, ); -impl, RuntimeApi, AuraId> - StartConsensus>, ()> +impl, RuntimeApi, AuraId> StartConsensus for StartLookaheadAuraConsensus where RuntimeApi: ConstructNodeRuntimeApi>, @@ -445,7 +382,7 @@ where { fn start_consensus( client: Arc>, - block_import: ParachainBlockImport>>, + block_import: ParachainBlockImport, prometheus_registry: Option<&Registry>, telemetry: Option, task_manager: &TaskManager, @@ -459,7 +396,6 @@ where announce_block: Arc>) + Send + Sync>, backend: Arc>, node_extra_args: NodeExtraArgs, - _: (), ) -> Result<(), Error> { let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording( task_manager.spawn_handle(), diff --git a/cumulus/polkadot-omni-node/lib/src/nodes/manual_seal.rs b/cumulus/polkadot-omni-node/lib/src/nodes/manual_seal.rs index f33865ad45cd..b7fc3489da25 100644 --- a/cumulus/polkadot-omni-node/lib/src/nodes/manual_seal.rs +++ b/cumulus/polkadot-omni-node/lib/src/nodes/manual_seal.rs @@ -16,37 +16,28 @@ use crate::common::{ rpc::BuildRpcExtensions as BuildRpcExtensionsT, - spec::{BaseNodeSpec, BuildImportQueue, ClientBlockImport, NodeSpec as NodeSpecT}, + spec::{BaseNodeSpec, BuildImportQueue, NodeSpec as NodeSpecT}, types::{Hash, ParachainBlockImport, ParachainClient}, }; use codec::Encode; use cumulus_client_parachain_inherent::{MockValidationDataInherentDataProvider, MockXcmConfig}; -use cumulus_primitives_core::{CollectCollationInfo, ParaId}; -use polkadot_primitives::UpgradeGoAhead; +use cumulus_primitives_core::ParaId; use sc_consensus::{DefaultImportQueue, LongestChain}; use sc_consensus_manual_seal::rpc::{ManualSeal, ManualSealApiServer}; use sc_network::NetworkBackend; use sc_service::{Configuration, PartialComponents, TaskManager}; use sc_telemetry::TelemetryHandle; -use sp_api::ProvideRuntimeApi; use sp_runtime::traits::Header; use std::{marker::PhantomData, sync::Arc}; pub struct ManualSealNode(PhantomData); -impl - BuildImportQueue< - NodeSpec::Block, - NodeSpec::RuntimeApi, - Arc>, - > for ManualSealNode +impl BuildImportQueue + for ManualSealNode { fn build_import_queue( client: Arc>, - _block_import: ParachainBlockImport< - NodeSpec::Block, - Arc>, - >, + _block_import: ParachainBlockImport, config: &Configuration, _telemetry_handle: Option, task_manager: &TaskManager, @@ -63,7 +54,6 @@ impl BaseNodeSpec for ManualSealNode { type Block = NodeSpec::Block; type RuntimeApi = NodeSpec::RuntimeApi; type BuildImportQueue = Self; - type InitBlockImport = ClientBlockImport; } impl ManualSealNode { @@ -88,7 +78,7 @@ impl ManualSealNode { keystore_container, select_chain: _, transaction_pool, - other: (_, mut telemetry, _, _), + other: (_, mut telemetry, _), } = Self::new_partial(&config)?; let select_chain = LongestChain::new(backend.clone()); @@ -103,7 +93,7 @@ impl ManualSealNode { config.prometheus_config.as_ref().map(|cfg| &cfg.registry), ); - let (network, system_rpc_tx, tx_handler_controller, sync_service) = + let (network, system_rpc_tx, tx_handler_controller, start_network, sync_service) = sc_service::build_network(sc_service::BuildNetworkParams { config: &config, client: client.clone(), @@ -157,18 +147,6 @@ impl ManualSealNode { .header(block) .expect("Header lookup should succeed") .expect("Header passed in as parent should be present in backend."); - - let should_send_go_ahead = match client_for_cidp - .runtime_api() - .collect_collation_info(block, ¤t_para_head) - { - Ok(info) => info.new_validation_code.is_some(), - Err(e) => { - log::error!("Failed to collect collation info: {:?}", e); - false - }, - }; - let current_para_block_head = Some(polkadot_primitives::HeadData(current_para_head.encode())); let client_for_xcm = client_for_cidp.clone(); @@ -191,12 +169,6 @@ impl ManualSealNode { raw_downward_messages: vec![], raw_horizontal_messages: vec![], additional_key_values: None, - upgrade_go_ahead: should_send_go_ahead.then(|| { - log::info!( - "Detected pending validation code, sending go-ahead signal." - ); - UpgradeGoAhead::GoAhead - }), }; Ok(( // This is intentional, as the runtime that we expect to run against this @@ -247,6 +219,7 @@ impl ManualSealNode { telemetry: telemetry.as_mut(), })?; + start_network.start_network(); Ok(task_manager) } } diff --git a/cumulus/polkadot-parachain/Cargo.toml b/cumulus/polkadot-parachain/Cargo.toml index 9130f60ceb38..5520126d0742 100644 --- a/cumulus/polkadot-parachain/Cargo.toml +++ b/cumulus/polkadot-parachain/Cargo.toml @@ -6,8 +6,6 @@ edition.workspace = true build = "build.rs" description = "Runs a polkadot parachain node" license = "Apache-2.0" -homepage.workspace = true -repository.workspace = true [lints] workspace = true @@ -24,29 +22,29 @@ serde = { features = ["derive"], workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } # Local +polkadot-omni-node-lib = { features = ["rococo-native", "westend-native"], workspace = true } +rococo-parachain-runtime = { workspace = true } +glutton-westend-runtime = { workspace = true } asset-hub-rococo-runtime = { workspace = true, default-features = true } asset-hub-westend-runtime = { workspace = true } -bridge-hub-rococo-runtime = { workspace = true, default-features = true } -bridge-hub-westend-runtime = { workspace = true, default-features = true } collectives-westend-runtime = { workspace = true } contracts-rococo-runtime = { workspace = true } +bridge-hub-rococo-runtime = { workspace = true, default-features = true } coretime-rococo-runtime = { workspace = true } coretime-westend-runtime = { workspace = true } -glutton-westend-runtime = { workspace = true } -parachains-common = { workspace = true, default-features = true } +bridge-hub-westend-runtime = { workspace = true, default-features = true } penpal-runtime = { workspace = true } people-rococo-runtime = { workspace = true } people-westend-runtime = { workspace = true } -polkadot-omni-node-lib = { features = ["rococo-native", "westend-native"], workspace = true } -rococo-parachain-runtime = { workspace = true } +parachains-common = { workspace = true, default-features = true } # Substrate -sc-chain-spec = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } sc-cli = { workspace = true, default-features = true } sc-service = { workspace = true, default-features = true } -sp-core = { workspace = true, default-features = true } +sc-chain-spec = { workspace = true, default-features = true } sp-genesis-builder = { workspace = true, default-features = true } -sp-keyring = { workspace = true, default-features = true } # Polkadot xcm = { workspace = true, default-features = true } @@ -78,7 +76,6 @@ runtime-benchmarks = [ "people-rococo-runtime/runtime-benchmarks", "people-westend-runtime/runtime-benchmarks", "rococo-parachain-runtime/runtime-benchmarks", - "xcm/runtime-benchmarks", ] try-runtime = [ "polkadot-omni-node-lib/try-runtime", diff --git a/cumulus/primitives/aura/Cargo.toml b/cumulus/primitives/aura/Cargo.toml index 715ce3e1a03e..185b2d40833f 100644 --- a/cumulus/primitives/aura/Cargo.toml +++ b/cumulus/primitives/aura/Cargo.toml @@ -5,8 +5,6 @@ authors.workspace = true edition.workspace = true license = "Apache-2.0" description = "Core primitives for Aura in Cumulus" -homepage.workspace = true -repository.workspace = true [lints] workspace = true diff --git a/cumulus/primitives/core/Cargo.toml b/cumulus/primitives/core/Cargo.toml index 307860897aec..533d368d3b00 100644 --- a/cumulus/primitives/core/Cargo.toml +++ b/cumulus/primitives/core/Cargo.toml @@ -5,8 +5,6 @@ authors.workspace = true edition.workspace = true license = "Apache-2.0" description = "Cumulus related core primitive types and traits" -homepage.workspace = true -repository.workspace = true [lints] workspace = true @@ -43,5 +41,4 @@ runtime-benchmarks = [ "polkadot-parachain-primitives/runtime-benchmarks", "polkadot-primitives/runtime-benchmarks", "sp-runtime/runtime-benchmarks", - "xcm/runtime-benchmarks", ] diff --git a/cumulus/primitives/parachain-inherent/Cargo.toml b/cumulus/primitives/parachain-inherent/Cargo.toml index 2ff990b8d514..a4271d3fd9cc 100644 --- a/cumulus/primitives/parachain-inherent/Cargo.toml +++ b/cumulus/primitives/parachain-inherent/Cargo.toml @@ -5,8 +5,6 @@ authors.workspace = true edition.workspace = true description = "Inherent that needs to be present in every parachain block. Contains messages and a relay chain storage-proof." license = "Apache-2.0" -homepage.workspace = true -repository.workspace = true [lints] workspace = true diff --git a/cumulus/primitives/proof-size-hostfunction/Cargo.toml b/cumulus/primitives/proof-size-hostfunction/Cargo.toml index b3b300d66ef3..e61c865d05fb 100644 --- a/cumulus/primitives/proof-size-hostfunction/Cargo.toml +++ b/cumulus/primitives/proof-size-hostfunction/Cargo.toml @@ -5,21 +5,19 @@ authors.workspace = true edition.workspace = true description = "Hostfunction exposing storage proof size to the runtime." license = "Apache-2.0" -homepage.workspace = true -repository.workspace = true [lints] workspace = true [dependencies] -sp-externalities = { workspace = true } sp-runtime-interface = { workspace = true } +sp-externalities = { workspace = true } sp-trie = { workspace = true } [dev-dependencies] +sp-state-machine = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } sp-io = { workspace = true, default-features = true } -sp-state-machine = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/cumulus/primitives/storage-weight-reclaim/Cargo.toml b/cumulus/primitives/storage-weight-reclaim/Cargo.toml index 4bcbabc1f16c..e1ae6743335a 100644 --- a/cumulus/primitives/storage-weight-reclaim/Cargo.toml +++ b/cumulus/primitives/storage-weight-reclaim/Cargo.toml @@ -5,8 +5,6 @@ authors.workspace = true edition.workspace = true description = "Utilities to reclaim storage weight." license = "Apache-2.0" -homepage.workspace = true -repository.workspace = true [lints] workspace = true @@ -27,9 +25,9 @@ cumulus-primitives-proof-size-hostfunction = { workspace = true } docify = { workspace = true } [dev-dependencies] -cumulus-test-runtime = { workspace = true } sp-io = { workspace = true } sp-trie = { workspace = true } +cumulus-test-runtime = { workspace = true } [features] default = ["std"] diff --git a/cumulus/primitives/storage-weight-reclaim/src/tests.rs b/cumulus/primitives/storage-weight-reclaim/src/tests.rs index ab83762cc0db..c5552b0f0a33 100644 --- a/cumulus/primitives/storage-weight-reclaim/src/tests.rs +++ b/cumulus/primitives/storage-weight-reclaim/src/tests.rs @@ -90,7 +90,7 @@ fn basic_refund() { assert_ok!(CheckWeight::::do_prepare(&info, LEN, next_len)); let (pre, _) = StorageWeightReclaim::(PhantomData) - .validate_and_prepare(Some(ALICE.clone()).into(), CALL, &info, LEN, 0) + .validate_and_prepare(Some(ALICE.clone()).into(), CALL, &info, LEN) .unwrap(); assert_eq!(pre, Some(0)); @@ -130,7 +130,7 @@ fn underestimating_refund() { assert_ok!(CheckWeight::::do_prepare(&info, LEN, next_len)); let (pre, _) = StorageWeightReclaim::(PhantomData) - .validate_and_prepare(Some(ALICE.clone()).into(), CALL, &info, LEN, 0) + .validate_and_prepare(Some(ALICE.clone()).into(), CALL, &info, LEN) .unwrap(); assert_eq!(pre, Some(0)); @@ -168,7 +168,7 @@ fn sets_to_node_storage_proof_if_higher() { assert_ok!(CheckWeight::::do_prepare(&info, LEN, next_len)); let (pre, _) = StorageWeightReclaim::(PhantomData) - .validate_and_prepare(Some(ALICE.clone()).into(), CALL, &info, LEN, 0) + .validate_and_prepare(Some(ALICE.clone()).into(), CALL, &info, LEN) .unwrap(); assert_eq!(pre, Some(1000)); @@ -211,7 +211,7 @@ fn sets_to_node_storage_proof_if_higher() { assert_ok!(CheckWeight::::do_prepare(&info, LEN, next_len)); let (pre, _) = StorageWeightReclaim::(PhantomData) - .validate_and_prepare(Some(ALICE.clone()).into(), CALL, &info, LEN, 0) + .validate_and_prepare(Some(ALICE.clone()).into(), CALL, &info, LEN) .unwrap(); assert_eq!(pre, Some(175)); @@ -256,7 +256,7 @@ fn does_nothing_without_extension() { assert_ok!(CheckWeight::::do_prepare(&info, LEN, next_len)); let (pre, _) = StorageWeightReclaim::(PhantomData) - .validate_and_prepare(Some(ALICE.clone()).into(), CALL, &info, LEN, 0) + .validate_and_prepare(Some(ALICE.clone()).into(), CALL, &info, LEN) .unwrap(); assert_eq!(pre, None); @@ -288,7 +288,7 @@ fn negative_refund_is_added_to_weight() { assert_ok!(CheckWeight::::do_prepare(&info, LEN, next_len)); let (pre, _) = StorageWeightReclaim::(PhantomData) - .validate_and_prepare(Some(ALICE.clone()).into(), CALL, &info, LEN, 0) + .validate_and_prepare(Some(ALICE.clone()).into(), CALL, &info, LEN) .unwrap(); assert_eq!(pre, Some(100)); @@ -321,7 +321,7 @@ fn test_zero_proof_size() { assert_ok!(CheckWeight::::do_prepare(&info, LEN, next_len)); let (pre, _) = StorageWeightReclaim::(PhantomData) - .validate_and_prepare(Some(ALICE.clone()).into(), CALL, &info, LEN, 0) + .validate_and_prepare(Some(ALICE.clone()).into(), CALL, &info, LEN) .unwrap(); assert_eq!(pre, Some(0)); @@ -354,7 +354,7 @@ fn test_larger_pre_dispatch_proof_size() { assert_ok!(CheckWeight::::do_prepare(&info, LEN, next_len)); let (pre, _) = StorageWeightReclaim::(PhantomData) - .validate_and_prepare(Some(ALICE.clone()).into(), CALL, &info, LEN, 0) + .validate_and_prepare(Some(ALICE.clone()).into(), CALL, &info, LEN) .unwrap(); assert_eq!(pre, Some(300)); @@ -394,7 +394,7 @@ fn test_incorporates_check_weight_unspent_weight() { assert_ok!(CheckWeight::::do_prepare(&info, LEN, next_len)); let (pre, _) = StorageWeightReclaim::(PhantomData) - .validate_and_prepare(Some(ALICE.clone()).into(), CALL, &info, LEN, 0) + .validate_and_prepare(Some(ALICE.clone()).into(), CALL, &info, LEN) .unwrap(); assert_eq!(pre, Some(100)); @@ -434,7 +434,7 @@ fn test_incorporates_check_weight_unspent_weight_on_negative() { assert_ok!(CheckWeight::::do_prepare(&info, LEN, next_len)); let (pre, _) = StorageWeightReclaim::(PhantomData) - .validate_and_prepare(Some(ALICE.clone()).into(), CALL, &info, LEN, 0) + .validate_and_prepare(Some(ALICE.clone()).into(), CALL, &info, LEN) .unwrap(); assert_eq!(pre, Some(100)); @@ -478,7 +478,7 @@ fn test_nothing_relcaimed() { assert_eq!(get_storage_weight().total().proof_size(), 250); let (pre, _) = StorageWeightReclaim::(PhantomData) - .validate_and_prepare(Some(ALICE.clone()).into(), CALL, &info, LEN, 0) + .validate_and_prepare(Some(ALICE.clone()).into(), CALL, &info, LEN) .unwrap(); // Should return `setup_test_externalities` proof recorder value: 100. assert_eq!(pre, Some(0)); @@ -525,7 +525,7 @@ fn test_incorporates_check_weight_unspent_weight_reverse_order() { assert_ok!(CheckWeight::::do_prepare(&info, LEN, next_len)); let (pre, _) = StorageWeightReclaim::(PhantomData) - .validate_and_prepare(Some(ALICE.clone()).into(), CALL, &info, LEN, 0) + .validate_and_prepare(Some(ALICE.clone()).into(), CALL, &info, LEN) .unwrap(); assert_eq!(pre, Some(100)); @@ -567,7 +567,7 @@ fn test_incorporates_check_weight_unspent_weight_on_negative_reverse_order() { assert_ok!(CheckWeight::::do_prepare(&info, LEN, next_len)); let (pre, _) = StorageWeightReclaim::(PhantomData) - .validate_and_prepare(Some(ALICE.clone()).into(), CALL, &info, LEN, 0) + .validate_and_prepare(Some(ALICE.clone()).into(), CALL, &info, LEN) .unwrap(); assert_eq!(pre, Some(100)); diff --git a/cumulus/primitives/timestamp/Cargo.toml b/cumulus/primitives/timestamp/Cargo.toml index 70cb3e607b98..cb328e2f2cc6 100644 --- a/cumulus/primitives/timestamp/Cargo.toml +++ b/cumulus/primitives/timestamp/Cargo.toml @@ -5,8 +5,6 @@ authors.workspace = true edition.workspace = true description = "Provides timestamp related functionality for parachains." license = "Apache-2.0" -homepage.workspace = true -repository.workspace = true [lints] workspace = true diff --git a/cumulus/primitives/utility/Cargo.toml b/cumulus/primitives/utility/Cargo.toml index 84039b9345b2..2ca8b82001d5 100644 --- a/cumulus/primitives/utility/Cargo.toml +++ b/cumulus/primitives/utility/Cargo.toml @@ -5,8 +5,6 @@ authors.workspace = true edition.workspace = true license = "Apache-2.0" description = "Helper datatypes for Cumulus" -homepage.workspace = true -repository.workspace = true [lints] workspace = true @@ -17,14 +15,14 @@ log = { workspace = true } # Substrate frame-support = { workspace = true } -pallet-asset-conversion = { workspace = true } sp-runtime = { workspace = true } +pallet-asset-conversion = { workspace = true } # Polkadot polkadot-runtime-common = { workspace = true } xcm = { workspace = true } -xcm-builder = { workspace = true } xcm-executor = { workspace = true } +xcm-builder = { workspace = true } # Cumulus cumulus-primitives-core = { workspace = true } @@ -52,5 +50,4 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", - "xcm/runtime-benchmarks", ] diff --git a/cumulus/test/client/Cargo.toml b/cumulus/test/client/Cargo.toml index 2c72ca98f35a..33023816c718 100644 --- a/cumulus/test/client/Cargo.toml +++ b/cumulus/test/client/Cargo.toml @@ -12,40 +12,40 @@ workspace = true codec = { features = ["derive"], workspace = true } # Substrate -frame-system = { workspace = true, default-features = true } -pallet-balances = { workspace = true, default-features = true } -pallet-transaction-payment = { workspace = true, default-features = true } -sc-block-builder = { workspace = true, default-features = true } +sc-service = { workspace = true, default-features = true } sc-consensus = { workspace = true, default-features = true } sc-consensus-aura = { workspace = true, default-features = true } +sc-block-builder = { workspace = true, default-features = true } sc-executor = { workspace = true, default-features = true } sc-executor-common = { workspace = true, default-features = true } -sc-service = { workspace = true, default-features = true } -sp-api = { workspace = true, default-features = true } +substrate-test-client = { workspace = true } sp-application-crypto = { workspace = true, default-features = true } -sp-blockchain = { workspace = true, default-features = true } -sp-consensus-aura = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } -sp-inherents = { workspace = true, default-features = true } -sp-io = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } sp-keyring = { workspace = true, default-features = true } sp-keystore = { workspace = true, default-features = true } -sp-runtime = { workspace = true, default-features = true } +sp-consensus-aura = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-inherents = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } sp-timestamp = { workspace = true, default-features = true } -substrate-test-client = { workspace = true } +frame-system = { workspace = true, default-features = true } +pallet-transaction-payment = { workspace = true, default-features = true } +pallet-balances = { workspace = true, default-features = true } # Polkadot -polkadot-parachain-primitives = { workspace = true, default-features = true } polkadot-primitives = { workspace = true, default-features = true } +polkadot-parachain-primitives = { workspace = true, default-features = true } # Cumulus +cumulus-test-runtime = { workspace = true } +cumulus-test-service = { workspace = true } +cumulus-test-relay-sproof-builder = { workspace = true, default-features = true } cumulus-primitives-core = { workspace = true, default-features = true } -cumulus-primitives-parachain-inherent = { workspace = true, default-features = true } cumulus-primitives-proof-size-hostfunction = { workspace = true, default-features = true } +cumulus-primitives-parachain-inherent = { workspace = true, default-features = true } cumulus-primitives-storage-weight-reclaim = { workspace = true, default-features = true } -cumulus-test-relay-sproof-builder = { workspace = true, default-features = true } -cumulus-test-runtime = { workspace = true } -cumulus-test-service = { workspace = true } [features] runtime-benchmarks = [ diff --git a/cumulus/test/client/src/lib.rs b/cumulus/test/client/src/lib.rs index 26cf02b3dea9..863a8fa93f6f 100644 --- a/cumulus/test/client/src/lib.rs +++ b/cumulus/test/client/src/lib.rs @@ -167,7 +167,7 @@ pub fn generate_extrinsic_with_pair( /// Generate an extrinsic from the provided function call, origin and [`Client`]. pub fn generate_extrinsic( client: &Client, - origin: sp_keyring::Sr25519Keyring, + origin: sp_keyring::AccountKeyring, function: impl Into, ) -> UncheckedExtrinsic { generate_extrinsic_with_pair(client, origin.into(), function, None) @@ -176,8 +176,8 @@ pub fn generate_extrinsic( /// Transfer some token from one account to another using a provided test [`Client`]. pub fn transfer( client: &Client, - origin: sp_keyring::Sr25519Keyring, - dest: sp_keyring::Sr25519Keyring, + origin: sp_keyring::AccountKeyring, + dest: sp_keyring::AccountKeyring, value: Balance, ) -> UncheckedExtrinsic { let function = RuntimeCall::Balances(pallet_balances::Call::transfer_allow_death { diff --git a/cumulus/test/relay-sproof-builder/Cargo.toml b/cumulus/test/relay-sproof-builder/Cargo.toml index c1efa141a45d..e266b5807081 100644 --- a/cumulus/test/relay-sproof-builder/Cargo.toml +++ b/cumulus/test/relay-sproof-builder/Cargo.toml @@ -5,8 +5,6 @@ authors.workspace = true edition.workspace = true license = "Apache-2.0" description = "Mocked relay state proof builder for testing Cumulus." -homepage.workspace = true -repository.workspace = true [lints] workspace = true diff --git a/cumulus/test/runtime/Cargo.toml b/cumulus/test/runtime/Cargo.toml index 150838e5e96e..8117e6e69709 100644 --- a/cumulus/test/runtime/Cargo.toml +++ b/cumulus/test/runtime/Cargo.toml @@ -18,37 +18,37 @@ frame-executive = { workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } frame-system-rpc-runtime-api = { workspace = true } -pallet-aura = { workspace = true } -pallet-authorship = { workspace = true } pallet-balances = { workspace = true } -pallet-glutton = { workspace = true } pallet-message-queue = { workspace = true } -pallet-session = { workspace = true } pallet-sudo = { workspace = true } +pallet-aura = { workspace = true } +pallet-authorship = { workspace = true } pallet-timestamp = { workspace = true } +pallet-glutton = { workspace = true } pallet-transaction-payment = { workspace = true } +pallet-session = { workspace = true } sp-api = { workspace = true } sp-block-builder = { workspace = true } -sp-consensus-aura = { workspace = true } sp-core = { workspace = true } sp-genesis-builder = { workspace = true } sp-inherents = { workspace = true } sp-io = { workspace = true } -sp-keyring = { workspace = true } sp-offchain = { workspace = true } sp-runtime = { workspace = true } sp-session = { workspace = true } +sp-consensus-aura = { workspace = true } sp-transaction-pool = { workspace = true } sp-version = { workspace = true } +sp-keyring = { workspace = true } # Cumulus -cumulus-pallet-aura-ext = { workspace = true } cumulus-pallet-parachain-system = { workspace = true } +parachain-info = { workspace = true } cumulus-primitives-aura = { workspace = true } +pallet-collator-selection = { workspace = true } +cumulus-pallet-aura-ext = { workspace = true } cumulus-primitives-core = { workspace = true } cumulus-primitives-storage-weight-reclaim = { workspace = true } -pallet-collator-selection = { workspace = true } -parachain-info = { workspace = true } [build-dependencies] substrate-wasm-builder = { optional = true, workspace = true, default-features = true } @@ -96,4 +96,3 @@ std = [ ] increment-spec-version = [] elastic-scaling = [] -experimental-ump-signals = ["cumulus-pallet-parachain-system/experimental-ump-signals"] diff --git a/cumulus/test/runtime/build.rs b/cumulus/test/runtime/build.rs index 43e60c1074a0..7a7fe8ffaa82 100644 --- a/cumulus/test/runtime/build.rs +++ b/cumulus/test/runtime/build.rs @@ -29,14 +29,6 @@ fn main() { .with_current_project() .enable_feature("elastic-scaling") .import_memory() - .set_file_name("wasm_binary_elastic_scaling_mvp.rs") - .build(); - - WasmBuilder::new() - .with_current_project() - .enable_feature("elastic-scaling") - .enable_feature("experimental-ump-signals") - .import_memory() .set_file_name("wasm_binary_elastic_scaling.rs") .build(); } diff --git a/cumulus/test/runtime/src/lib.rs b/cumulus/test/runtime/src/lib.rs index 4abc10276af1..b1649c410581 100644 --- a/cumulus/test/runtime/src/lib.rs +++ b/cumulus/test/runtime/src/lib.rs @@ -27,11 +27,6 @@ pub mod wasm_spec_version_incremented { include!(concat!(env!("OUT_DIR"), "/wasm_binary_spec_version_incremented.rs")); } -pub mod elastic_scaling_mvp { - #[cfg(feature = "std")] - include!(concat!(env!("OUT_DIR"), "/wasm_binary_elastic_scaling_mvp.rs")); -} - pub mod elastic_scaling { #[cfg(feature = "std")] include!(concat!(env!("OUT_DIR"), "/wasm_binary_elastic_scaling.rs")); diff --git a/cumulus/test/service/Cargo.toml b/cumulus/test/service/Cargo.toml index b3d92444c7d1..86a8c48bb54f 100644 --- a/cumulus/test/service/Cargo.toml +++ b/cumulus/test/service/Cargo.toml @@ -22,80 +22,80 @@ prometheus = { workspace = true } rand = { workspace = true, default-features = true } serde = { features = ["derive"], workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } -tempfile = { workspace = true } tokio = { features = ["macros"], workspace = true, default-features = true } tracing = { workspace = true, default-features = true } url = { workspace = true } +tempfile = { workspace = true } # Substrate frame-system = { workspace = true, default-features = true } frame-system-rpc-runtime-api = { workspace = true, default-features = true } pallet-transaction-payment = { workspace = true, default-features = true } sc-basic-authorship = { workspace = true, default-features = true } -sc-block-builder = { workspace = true, default-features = true } sc-chain-spec = { workspace = true, default-features = true } -sc-cli = { workspace = true, default-features = true } sc-client-api = { workspace = true, default-features = true } sc-consensus = { workspace = true, default-features = true } sc-consensus-aura = { workspace = true, default-features = true } sc-executor = { workspace = true, default-features = true } -sc-executor-common = { workspace = true, default-features = true } -sc-executor-wasmtime = { workspace = true, default-features = true } sc-network = { workspace = true, default-features = true } sc-service = { workspace = true, default-features = true } -sc-telemetry = { workspace = true, default-features = true } sc-tracing = { workspace = true, default-features = true } sc-transaction-pool = { workspace = true, default-features = true } sc-transaction-pool-api = { workspace = true, default-features = true } -sp-api = { workspace = true, default-features = true } +sc-telemetry = { workspace = true, default-features = true } sp-arithmetic = { workspace = true, default-features = true } sp-blockchain = { workspace = true, default-features = true } -sp-consensus = { workspace = true, default-features = true } -sp-consensus-aura = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } -sp-genesis-builder = { workspace = true, default-features = true } sp-io = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } sp-keyring = { workspace = true, default-features = true } +sp-genesis-builder = { workspace = true, default-features = true } sp-runtime = { workspace = true } sp-state-machine = { workspace = true, default-features = true } -sp-timestamp = { workspace = true, default-features = true } sp-tracing = { workspace = true, default-features = true } +sp-timestamp = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sp-consensus-aura = { workspace = true, default-features = true } substrate-test-client = { workspace = true } +sc-cli = { workspace = true, default-features = true } +sc-block-builder = { workspace = true, default-features = true } +sc-executor-wasmtime = { workspace = true, default-features = true } +sc-executor-common = { workspace = true, default-features = true } # Polkadot -polkadot-cli = { workspace = true, default-features = true } -polkadot-node-subsystem = { workspace = true, default-features = true } -polkadot-overseer = { workspace = true, default-features = true } polkadot-primitives = { workspace = true, default-features = true } polkadot-service = { workspace = true, default-features = true } polkadot-test-service = { workspace = true } +polkadot-cli = { workspace = true, default-features = true } +polkadot-node-subsystem = { workspace = true, default-features = true } +polkadot-overseer = { workspace = true, default-features = true } # Cumulus cumulus-client-cli = { workspace = true, default-features = true } -cumulus-client-collator = { workspace = true, default-features = true } -cumulus-client-consensus-aura = { workspace = true, default-features = true } +parachains-common = { workspace = true, default-features = true } cumulus-client-consensus-common = { workspace = true, default-features = true } cumulus-client-consensus-proposer = { workspace = true, default-features = true } +cumulus-client-consensus-aura = { workspace = true, default-features = true } cumulus-client-consensus-relay-chain = { workspace = true, default-features = true } cumulus-client-parachain-inherent = { workspace = true, default-features = true } -cumulus-client-pov-recovery = { workspace = true, default-features = true } cumulus-client-service = { workspace = true, default-features = true } -cumulus-pallet-parachain-system = { workspace = true } +cumulus-client-collator = { workspace = true, default-features = true } cumulus-primitives-core = { workspace = true, default-features = true } -cumulus-primitives-storage-weight-reclaim = { workspace = true, default-features = true } cumulus-relay-chain-inprocess-interface = { workspace = true, default-features = true } cumulus-relay-chain-interface = { workspace = true, default-features = true } +cumulus-test-runtime = { workspace = true } cumulus-relay-chain-minimal-node = { workspace = true, default-features = true } +cumulus-client-pov-recovery = { workspace = true, default-features = true } cumulus-test-relay-sproof-builder = { workspace = true, default-features = true } -cumulus-test-runtime = { workspace = true } +cumulus-pallet-parachain-system = { workspace = true } +cumulus-primitives-storage-weight-reclaim = { workspace = true, default-features = true } pallet-timestamp = { workspace = true, default-features = true } -parachains-common = { workspace = true, default-features = true } [dev-dependencies] -cumulus-test-client = { workspace = true } futures = { workspace = true } portpicker = { workspace = true } sp-authority-discovery = { workspace = true, default-features = true } +cumulus-test-client = { workspace = true } # Polkadot dependencies polkadot-test-service = { workspace = true } diff --git a/cumulus/test/service/src/chain_spec.rs b/cumulus/test/service/src/chain_spec.rs index 5ebcc14592d7..3d4e4dca5f8d 100644 --- a/cumulus/test/service/src/chain_spec.rs +++ b/cumulus/test/service/src/chain_spec.rs @@ -116,13 +116,3 @@ pub fn get_elastic_scaling_chain_spec(id: Option) -> ChainSpec { .expect("WASM binary was not built, please build it!"), ) } - -/// Get the chain spec for a specific parachain ID. -pub fn get_elastic_scaling_mvp_chain_spec(id: Option) -> ChainSpec { - get_chain_spec_with_extra_endowed( - id, - Default::default(), - cumulus_test_runtime::elastic_scaling_mvp::WASM_BINARY - .expect("WASM binary was not built, please build it!"), - ) -} diff --git a/cumulus/test/service/src/cli.rs b/cumulus/test/service/src/cli.rs index e019089e70fe..220b0449f339 100644 --- a/cumulus/test/service/src/cli.rs +++ b/cumulus/test/service/src/cli.rs @@ -262,16 +262,10 @@ impl SubstrateCli for TestCollatorCli { tracing::info!("Using default test service chain spec."); Box::new(cumulus_test_service::get_chain_spec(Some(ParaId::from(2000)))) as Box<_> }, - "elastic-scaling-mvp" => { - tracing::info!("Using elastic-scaling mvp chain spec."); - Box::new(cumulus_test_service::get_elastic_scaling_mvp_chain_spec(Some( - ParaId::from(2100), - ))) as Box<_> - }, "elastic-scaling" => { tracing::info!("Using elastic-scaling chain spec."); Box::new(cumulus_test_service::get_elastic_scaling_chain_spec(Some(ParaId::from( - 2200, + 2100, )))) as Box<_> }, path => { diff --git a/cumulus/test/service/src/lib.rs b/cumulus/test/service/src/lib.rs index 2c13d20333a7..fe3cbfbbb498 100644 --- a/cumulus/test/service/src/lib.rs +++ b/cumulus/test/service/src/lib.rs @@ -27,10 +27,7 @@ use cumulus_client_collator::service::CollatorService; use cumulus_client_consensus_aura::{ collators::{ lookahead::{self as aura, Params as AuraParams}, - slot_based::{ - self as slot_based, Params as SlotBasedParams, SlotBasedBlockImport, - SlotBasedBlockImportHandle, - }, + slot_based::{self as slot_based, Params as SlotBasedParams}, }, ImportQueueParams, }; @@ -134,8 +131,7 @@ pub type Client = TFullClient; /// The block-import type being used by the test service. -pub type ParachainBlockImport = - TParachainBlockImport, Client>, Backend>; +pub type ParachainBlockImport = TParachainBlockImport, Backend>; /// Transaction pool type used by the test service pub type TransactionPool = Arc>; @@ -188,7 +184,7 @@ pub type Service = PartialComponents< (), sc_consensus::import_queue::BasicQueue, sc_transaction_pool::TransactionPoolHandle, - (ParachainBlockImport, SlotBasedBlockImportHandle), + ParachainBlockImport, >; /// Starts a `ServiceBuilder` for a full service. @@ -221,9 +217,7 @@ pub fn new_partial( )?; let client = Arc::new(client); - let (block_import, slot_based_handle) = - SlotBasedBlockImport::new(client.clone(), client.clone()); - let block_import = ParachainBlockImport::new(block_import, backend.clone()); + let block_import = ParachainBlockImport::new(client.clone(), backend.clone()); let transaction_pool = Arc::from( sc_transaction_pool::Builder::new( @@ -266,7 +260,7 @@ pub fn new_partial( task_manager, transaction_pool, select_chain: (), - other: (block_import, slot_based_handle), + other: block_import, }; Ok(params) @@ -355,8 +349,7 @@ where let client = params.client.clone(); let backend = params.backend.clone(); - let block_import = params.other.0; - let slot_based_handle = params.other.1; + let block_import = params.other; let relay_chain_interface = build_relay_chain_interface( relay_chain_config, parachain_config.prometheus_registry(), @@ -374,7 +367,7 @@ where prometheus_registry.clone(), ); - let (network, system_rpc_tx, tx_handler_controller, sync_service) = + let (network, system_rpc_tx, tx_handler_controller, start_network, sync_service) = build_network(BuildNetworkParams { parachain_config: ¶chain_config, net_config, @@ -504,11 +497,20 @@ where authoring_duration: Duration::from_millis(2000), reinitialize: false, slot_drift: Duration::from_secs(1), - block_import_handle: slot_based_handle, - spawner: task_manager.spawn_handle(), }; - slot_based::run::(params); + let (collation_future, block_builder_future) = + slot_based::run::(params); + task_manager.spawn_essential_handle().spawn( + "collation-task", + None, + collation_future, + ); + task_manager.spawn_essential_handle().spawn( + "block-builder-task", + None, + block_builder_future, + ); } else { tracing::info!(target: LOG_TARGET, "Starting block authoring with lookahead collator."); let params = AuraParams { @@ -540,6 +542,8 @@ where } } + start_network.start_network(); + Ok((task_manager, client, network, rpc_handlers, transaction_pool, backend)) } diff --git a/cumulus/xcm/xcm-emulator/Cargo.toml b/cumulus/xcm/xcm-emulator/Cargo.toml index ae8cb79bb55e..8598481fae76 100644 --- a/cumulus/xcm/xcm-emulator/Cargo.toml +++ b/cumulus/xcm/xcm-emulator/Cargo.toml @@ -5,43 +5,41 @@ version = "0.5.0" authors.workspace = true edition.workspace = true license = "Apache-2.0" -homepage.workspace = true -repository.workspace = true [lints] workspace = true [dependencies] -array-bytes = { workspace = true } codec = { workspace = true, default-features = true } -impl-trait-for-tuples = { workspace = true } -log = { workspace = true } paste = { workspace = true, default-features = true } +log = { workspace = true } +impl-trait-for-tuples = { workspace = true } +array-bytes = { workspace = true } # Substrate frame-support = { workspace = true, default-features = true } frame-system = { workspace = true, default-features = true } -pallet-balances = { workspace = true, default-features = true } -pallet-message-queue = { workspace = true, default-features = true } -sp-arithmetic = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } sp-crypto-hashing = { workspace = true, default-features = true } -sp-io = { workspace = true, default-features = true } -sp-runtime = { workspace = true, default-features = true } sp-std = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-arithmetic = { workspace = true, default-features = true } sp-tracing = { workspace = true, default-features = true } +pallet-balances = { workspace = true, default-features = true } +pallet-message-queue = { workspace = true, default-features = true } # Cumulus -cumulus-pallet-parachain-system = { workspace = true, default-features = true } -cumulus-pallet-xcmp-queue = { workspace = true, default-features = true } cumulus-primitives-core = { workspace = true, default-features = true } +cumulus-pallet-xcmp-queue = { workspace = true, default-features = true } +cumulus-pallet-parachain-system = { workspace = true, default-features = true } cumulus-primitives-parachain-inherent = { workspace = true, default-features = true } cumulus-test-relay-sproof-builder = { workspace = true, default-features = true } parachains-common = { workspace = true, default-features = true } # Polkadot -polkadot-parachain-primitives = { workspace = true, default-features = true } -polkadot-primitives = { workspace = true, default-features = true } -polkadot-runtime-parachains = { workspace = true, default-features = true } xcm = { workspace = true, default-features = true } xcm-executor = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } +polkadot-parachain-primitives = { workspace = true, default-features = true } +polkadot-runtime-parachains = { workspace = true, default-features = true } diff --git a/docs/RELEASE.md b/docs/RELEASE.md index 677cb5465b67..bea367411359 100644 --- a/docs/RELEASE.md +++ b/docs/RELEASE.md @@ -14,11 +14,7 @@ Merging to it is restricted to [Backports](#backports). We are releasing multiple different things from this repository in one release, but we don't want to use the same version for everything. Thus, in the following we explain the versioning story for the crates, node and Westend & -Rococo. - -To easily refer to a release, it shall be named by its date in the form `stableYYMM`. Patches to stable releases are -tagged in the form of `stableYYMM-PATCH`, with `PATCH` ranging from 1 to 99. For example, the fourth patch to -`stable2409` would be `stable2409-4`. +Rococo. To easily refer to a release, it shall be named by its date in the form `stableYYMMDD`. ## Crate diff --git a/docs/contributor/container.md b/docs/contributor/container.md index e387f568d7b5..ec51b8b9d7cc 100644 --- a/docs/contributor/container.md +++ b/docs/contributor/container.md @@ -24,7 +24,7 @@ The command below allows building a Linux binary without having to even install docker run --rm -it \ -w /polkadot-sdk \ -v $(pwd):/polkadot-sdk \ - docker.io/paritytech/ci-unified:bullseye-1.81.0-2024-11-19-v202411281558 \ + docker.io/paritytech/ci-unified:bullseye-1.77.0-2024-04-10-v20240408 \ cargo build --release --locked -p polkadot-parachain-bin --bin polkadot-parachain sudo chown -R $(id -u):$(id -g) target/ ``` diff --git a/docs/contributor/prdoc.md b/docs/contributor/prdoc.md index 1f6252425e69..4a1a3c1f0688 100644 --- a/docs/contributor/prdoc.md +++ b/docs/contributor/prdoc.md @@ -1,88 +1,73 @@ # PRDoc -A [prdoc](https://github.com/paritytech/prdoc) is like a changelog but for a Pull Request. We use -this approach to record changes on a crate level. This information is then processed by the release -team to apply the correct crate version bumps and to generate the CHANGELOG of the next release. +A [prdoc](https://github.com/paritytech/prdoc) is like a changelog but for a Pull Request. We use this approach to +record changes on a crate level. This information is then processed by the release team to apply the correct crate +version bumps and to generate the CHANGELOG of the next release. ## Requirements -When creating a PR, the author needs to decide with the `R0-silent` label whether the PR has to -contain a prdoc. The `R0` label should only be placed for No-OP changes like correcting a typo in a -comment or CI stuff. If unsure, ping the [CODEOWNERS](../../.github/CODEOWNERS) for advice. +When creating a PR, the author needs to decide with the `R0-silent` label whether the PR has to contain a prdoc. The +`R0` label should only be placed for No-OP changes like correcting a typo in a comment or CI stuff. If unsure, ping +the [CODEOWNERS](../../.github/CODEOWNERS) for advice. -## Auto Generation +## PRDoc How-To -You can create a PrDoc by using the `/cmd prdoc` command (see args with `/cmd prdoc --help`) in a -comment on your PR. +A `.prdoc` file is a YAML file with a defined structure (ie JSON Schema). Please follow these steps to generate one: + +1. Install the [`prdoc` CLI](https://github.com/paritytech/prdoc) by running `cargo install parity-prdoc`. +1. Open a Pull Request and get the PR number. +1. Generate the file with `prdoc generate `. The output filename will be printed. +1. Optional: Install the `prdoc/schema_user.json` schema in your editor, for example + [VsCode](https://github.com/paritytech/prdoc?tab=readme-ov-file#schemas). +1. Edit your `.prdoc` file according to the [Audience](#pick-an-audience) and [SemVer](#record-semver-changes) sections. +1. Check your prdoc with `prdoc check -n `. This is optional since the CI will also check it. + +> **Tip:** GitHub CLI and jq can be used to provide the number of your PR to generate the correct file: +> `prdoc generate $(gh pr view --json number | jq '.number') -o prdoc` + +Alternatively you can call the prdoc from PR via `/cmd prdoc` (see args with `/cmd prdoc --help`) +in a comment to PR to trigger it from CI. Options: -- `audience` The audience of whom the changes may concern. - - `runtime_dev`: Anyone building a runtime themselves. For example parachain teams, or people - providing template runtimes. Also devs using pallets, FRAME etc directly. These are people who - care about the protocol (WASM), not the meta-protocol (client). - - `runtime_user`: Anyone using the runtime. Can be front-end devs reading the state, exchanges - listening for events, libraries that have hard-coded pallet indices etc. Anything that would - result in an observable change to the runtime behaviour must be marked with this. - - `node_dev`: Those who build around the client side code. Alternative client builders, SMOLDOT, - those who consume RPCs. These are people who are oblivious to the runtime changes. They only care - about the meta-protocol, not the protocol itself. - - `node_operator`: People who run the node. Think of validators, exchanges, indexer services, CI - actions. Anything that modifies how the binary behaves (its arguments, default arguments, error - messags, etc) must be marked with this. -- `bump:`: The default bump level for all crates. The PrDoc will likely need to be edited to reflect - the actual changes after generation. More details in the section below. - - `none`: There is no observable change. So to say: if someone were handed the old and the new - version of our software, it would be impossible to figure out what version is which. - - `patch`: Fixes that will never cause compilation errors if someone updates to this version. No - functionality has been changed. Should be limited to fixing bugs or No-OP implementation - changes. - - `minor`: Additions that will never cause compilation errors if someone updates to this version. - No functionality has been changed. Should be limited to adding new features. - - `major`: Anything goes. -- `force: true|false`: Whether to overwrite any existing PrDoc file. +- `pr`: The PR number to generate the PrDoc for. +- `audience`: The audience of whom the changes may concern. +- `bump`: A default bump level for all crates. + The PrDoc will likely need to be edited to reflect the actual changes after generation. +- `force`: Whether to overwrite any existing PrDoc. -### Example +## Pick An Audience -```bash -/cmd prdoc --audience runtime_dev --bump patch -``` +While describing a PR, the author needs to consider which audience(s) need to be addressed. +The list of valid audiences is described and documented in the JSON schema as follow: -## Local Generation +- `Node Dev`: Those who build around the client side code. Alternative client builders, SMOLDOT, those who consume RPCs. + These are people who are oblivious to the runtime changes. They only care about the meta-protocol, not the protocol + itself. -A `.prdoc` file is a YAML file with a defined structure (ie JSON Schema). Please follow these steps -to generate one: +- `Runtime Dev`: All of those who rely on the runtime. A parachain team that is using a pallet. A DApp that is using a + pallet. These are people who care about the protocol (WASM), not the meta-protocol (client). -1. Install the [`prdoc` CLI](https://github.com/paritytech/prdoc) by running `cargo install - parity-prdoc`. -1. Open a Pull Request and get the PR number. -1. Generate the file with `prdoc generate `. The output filename will be printed. -1. Optional: Install the `prdoc/schema_user.json` schema in your editor, for example - [VsCode](https://github.com/paritytech/prdoc?tab=readme-ov-file#schemas). -1. Edit your `.prdoc` file according to the [Audience](#pick-an-audience) and - [SemVer](#record-semver-changes) sections. -1. Check your prdoc with `prdoc check -n `. This is optional since the CI will also check - it. +- `Node Operator`: Those who don't write any code and only run code. -> **Tip:** GitHub CLI and jq can be used to provide the number of your PR to generate the correct -> file: -> `prdoc generate $(gh pr view --json number | jq '.number') -o prdoc` +- `Runtime User`: Anyone using the runtime. This can be a token holder or a dev writing a front end for a chain. + +If you have a change that affects multiple audiences, you can either list them all, or write multiple sections and +re-phrase the changes for each audience. ## Record SemVer Changes -All published crates that got modified need to have an entry in the `crates` section of your -`PRDoc`. This entry tells the release team how to bump the crate version prior to the next release. -It is very important that this information is correct, otherwise it could break the code of -downstream teams. +All published crates that got modified need to have an entry in the `crates` section of your `PRDoc`. This entry tells +the release team how to bump the crate version prior to the next release. It is very important that this information is +correct, otherwise it could break the code of downstream teams. The bump can either be `major`, `minor`, `patch` or `none`. The three first options are defined by -[rust-lang.org](https://doc.rust-lang.org/cargo/reference/semver.html), whereas `None` should be -picked if no other applies. The `None` option is equivalent to the `R0-silent` label, but on a crate -level. Experimental and private APIs are exempt from bumping and can be broken at any time. Please -read the [Crate Section](../RELEASE.md) of the RELEASE doc about them. +[rust-lang.org](https://doc.rust-lang.org/cargo/reference/semver.html), whereas `None` should be picked if no other +applies. The `None` option is equivalent to the `R0-silent` label, but on a crate level. Experimental and private APIs +are exempt from bumping and can be broken at any time. Please read the [Crate Section](../RELEASE.md) of the RELEASE doc +about them. -> **Note**: There is currently no CI in place to sanity check this information, but should be added -> soon. +> **Note**: There is currently no CI in place to sanity check this information, but should be added soon. ### Example @@ -96,13 +81,12 @@ crates: bump: minor ``` -It means that downstream code using `frame-example-pallet` is still guaranteed to work as before, -while code using `frame-example` might break. +It means that downstream code using `frame-example-pallet` is still guaranteed to work as before, while code using +`frame-example` might break. ### Dependencies -A crate that depends on another crate will automatically inherit its `major` bumps. This means that -you do not need to bump a crate that had a SemVer breaking change only from re-exporting another -crate with a breaking change. -`minor` an `patch` bumps do not need to be inherited, since `cargo` will automatically update them -to the latest compatible version. +A crate that depends on another crate will automatically inherit its `major` bumps. This means that you do not need to +bump a crate that had a SemVer breaking change only from re-exporting another crate with a breaking change. +`minor` an `patch` bumps do not need to be inherited, since `cargo` will automatically update them to the latest +compatible version. diff --git a/docs/sdk/Cargo.toml b/docs/sdk/Cargo.toml index a856e94f42b5..0c39367eeed3 100644 --- a/docs/sdk/Cargo.toml +++ b/docs/sdk/Cargo.toml @@ -16,112 +16,112 @@ workspace = true [dependencies] # Needed for all FRAME-based code codec = { workspace = true } +scale-info = { workspace = true } frame = { features = [ "experimental", "runtime", ], workspace = true, default-features = true } +pallet-examples = { workspace = true } pallet-contracts = { workspace = true } pallet-default-config-example = { workspace = true, default-features = true } pallet-example-offchain-worker = { workspace = true, default-features = true } -pallet-examples = { workspace = true } -scale-info = { workspace = true } # How we build docs in rust-docs +simple-mermaid = "0.1.1" docify = { workspace = true } serde_json = { workspace = true } -simple-mermaid = "0.1.1" # Polkadot SDK deps, typically all should only be in scope such that we can link to their doc item. +polkadot-sdk = { features = ["runtime-full"], workspace = true, default-features = true } +node-cli = { workspace = true } +kitchensink-runtime = { workspace = true } chain-spec-builder = { workspace = true, default-features = true } -frame-benchmarking = { workspace = true } -frame-executive = { workspace = true } -frame-metadata-hash-extension = { workspace = true, default-features = true } -frame-support = { workspace = true } +subkey = { workspace = true, default-features = true } frame-system = { workspace = true } -kitchensink-runtime = { workspace = true } -log = { workspace = true, default-features = true } -node-cli = { workspace = true } +frame-support = { workspace = true } +frame-executive = { workspace = true } +frame-benchmarking = { workspace = true } pallet-example-authorization-tx-extension = { workspace = true, default-features = true } pallet-example-single-block-migrations = { workspace = true, default-features = true } -polkadot-sdk = { features = ["runtime-full"], workspace = true, default-features = true } -subkey = { workspace = true, default-features = true } +frame-metadata-hash-extension = { workspace = true, default-features = true } +log = { workspace = true, default-features = true } # Substrate Client -sc-chain-spec = { workspace = true, default-features = true } -sc-cli = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } +sc-rpc-api = { workspace = true, default-features = true } +sc-rpc = { workspace = true, default-features = true } sc-client-db = { workspace = true, default-features = true } +sc-cli = { workspace = true, default-features = true } sc-consensus-aura = { workspace = true, default-features = true } sc-consensus-babe = { workspace = true, default-features = true } -sc-consensus-beefy = { workspace = true, default-features = true } sc-consensus-grandpa = { workspace = true, default-features = true } +sc-consensus-beefy = { workspace = true, default-features = true } sc-consensus-manual-seal = { workspace = true, default-features = true } sc-consensus-pow = { workspace = true, default-features = true } sc-executor = { workspace = true, default-features = true } -sc-network = { workspace = true, default-features = true } -sc-rpc = { workspace = true, default-features = true } -sc-rpc-api = { workspace = true, default-features = true } sc-service = { workspace = true, default-features = true } +sc-chain-spec = { workspace = true, default-features = true } substrate-wasm-builder = { workspace = true, default-features = true } # Cumulus -cumulus-client-service = { workspace = true, default-features = true } cumulus-pallet-aura-ext = { workspace = true, default-features = true } cumulus-pallet-parachain-system = { workspace = true, default-features = true } +parachain-info = { workspace = true, default-features = true } cumulus-primitives-proof-size-hostfunction = { workspace = true, default-features = true } +cumulus-client-service = { workspace = true, default-features = true } cumulus-primitives-storage-weight-reclaim = { workspace = true, default-features = true } -parachain-info = { workspace = true, default-features = true } # Omni Node polkadot-omni-node-lib = { workspace = true, default-features = true } # Pallets and FRAME internals -pallet-asset-conversion-tx-payment = { workspace = true, default-features = true } -pallet-asset-tx-payment = { workspace = true, default-features = true } -pallet-assets = { workspace = true, default-features = true } pallet-aura = { workspace = true, default-features = true } -pallet-authorship = { workspace = true, default-features = true } -pallet-babe = { workspace = true, default-features = true } +pallet-timestamp = { workspace = true, default-features = true } pallet-balances = { workspace = true, default-features = true } -pallet-broker = { workspace = true, default-features = true } +pallet-assets = { workspace = true, default-features = true } +pallet-preimage = { workspace = true, default-features = true } +pallet-transaction-payment = { workspace = true, default-features = true } +pallet-asset-tx-payment = { workspace = true, default-features = true } +pallet-skip-feeless-payment = { workspace = true, default-features = true } +pallet-asset-conversion-tx-payment = { workspace = true, default-features = true } +pallet-utility = { workspace = true, default-features = true } +pallet-multisig = { workspace = true, default-features = true } +pallet-proxy = { workspace = true, default-features = true } +pallet-authorship = { workspace = true, default-features = true } pallet-collective = { workspace = true, default-features = true } pallet-democracy = { workspace = true, default-features = true } -pallet-grandpa = { workspace = true, default-features = true } -pallet-multisig = { workspace = true, default-features = true } +pallet-uniques = { workspace = true, default-features = true } pallet-nfts = { workspace = true, default-features = true } -pallet-preimage = { workspace = true, default-features = true } -pallet-proxy = { workspace = true, default-features = true } -pallet-referenda = { workspace = true, default-features = true } pallet-scheduler = { workspace = true, default-features = true } -pallet-skip-feeless-payment = { workspace = true, default-features = true } -pallet-timestamp = { workspace = true, default-features = true } -pallet-transaction-payment = { workspace = true, default-features = true } -pallet-uniques = { workspace = true, default-features = true } -pallet-utility = { workspace = true, default-features = true } +pallet-referenda = { workspace = true, default-features = true } +pallet-broker = { workspace = true, default-features = true } +pallet-babe = { workspace = true, default-features = true } +pallet-grandpa = { workspace = true, default-features = true } # Primitives +sp-io = { workspace = true, default-features = true } +sp-std = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } +sp-runtime-interface = { workspace = true, default-features = true } sp-api = { workspace = true, default-features = true } -sp-arithmetic = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } -sp-genesis-builder = { workspace = true, default-features = true } -sp-io = { workspace = true, default-features = true } sp-keyring = { workspace = true, default-features = true } -sp-offchain = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } -sp-runtime-interface = { workspace = true, default-features = true } -sp-std = { workspace = true, default-features = true } -sp-tracing = { workspace = true, default-features = true } +sp-arithmetic = { workspace = true, default-features = true } +sp-genesis-builder = { workspace = true, default-features = true } +sp-offchain = { workspace = true, default-features = true } sp-version = { workspace = true, default-features = true } sp-weights = { workspace = true, default-features = true } # XCM -pallet-xcm = { workspace = true } xcm = { workspace = true, default-features = true } xcm-builder = { workspace = true } xcm-docs = { workspace = true } xcm-executor = { workspace = true } xcm-simulator = { workspace = true } +pallet-xcm = { workspace = true } # runtime guides @@ -129,14 +129,13 @@ chain-spec-guide-runtime = { workspace = true, default-features = true } # Templates minimal-template-runtime = { workspace = true, default-features = true } -parachain-template-runtime = { workspace = true, default-features = true } solochain-template-runtime = { workspace = true, default-features = true } +parachain-template-runtime = { workspace = true, default-features = true } # local packages -first-pallet = { workspace = true, default-features = true } first-runtime = { workspace = true, default-features = true } +first-pallet = { workspace = true, default-features = true } [dev-dependencies] assert_cmd = "2.0.14" -cmd_lib = { workspace = true } rand = "0.8" diff --git a/docs/sdk/packages/guides/first-pallet/Cargo.toml b/docs/sdk/packages/guides/first-pallet/Cargo.toml index a1411580119d..dad5b8863494 100644 --- a/docs/sdk/packages/guides/first-pallet/Cargo.toml +++ b/docs/sdk/packages/guides/first-pallet/Cargo.toml @@ -17,9 +17,9 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { workspace = true } -docify = { workspace = true } -frame = { workspace = true, features = ["experimental", "runtime"] } scale-info = { workspace = true } +frame = { workspace = true, features = ["experimental", "runtime"] } +docify = { workspace = true } [features] default = ["std"] diff --git a/docs/sdk/packages/guides/first-runtime/src/lib.rs b/docs/sdk/packages/guides/first-runtime/src/lib.rs index 2ab060c8c43f..7c96f5653e52 100644 --- a/docs/sdk/packages/guides/first-runtime/src/lib.rs +++ b/docs/sdk/packages/guides/first-runtime/src/lib.rs @@ -130,21 +130,23 @@ pub mod genesis_config_presets { interface::{Balance, MinimumBalance}, BalancesConfig, RuntimeGenesisConfig, SudoConfig, }; - use frame::deps::frame_support::build_struct_json_patch; use serde_json::Value; /// Returns a development genesis config preset. #[docify::export] pub fn development_config_genesis() -> Value { let endowment = >::get().max(1) * 1000; - build_struct_json_patch!(RuntimeGenesisConfig { + let config = RuntimeGenesisConfig { balances: BalancesConfig { - balances: Sr25519Keyring::iter() + balances: AccountKeyring::iter() .map(|a| (a.to_account_id(), endowment)) .collect::>(), }, - sudo: SudoConfig { key: Some(Sr25519Keyring::Alice.to_account_id()) }, - }) + sudo: SudoConfig { key: Some(AccountKeyring::Alice.to_account_id()) }, + ..Default::default() + }; + + serde_json::to_value(config).expect("Could not build genesis config.") } /// Get the set of the available genesis config presets. diff --git a/docs/sdk/src/guides/your_first_node.rs b/docs/sdk/src/guides/your_first_node.rs index 3c782e4793ba..da37c11c206f 100644 --- a/docs/sdk/src/guides/your_first_node.rs +++ b/docs/sdk/src/guides/your_first_node.rs @@ -103,7 +103,6 @@ #[cfg(test)] mod tests { use assert_cmd::Command; - use cmd_lib::*; use rand::Rng; use sc_chain_spec::{DEV_RUNTIME_PRESET, LOCAL_TESTNET_RUNTIME_PRESET}; use sp_genesis_builder::PresetId; @@ -174,10 +173,13 @@ mod tests { println!("Building polkadot-sdk-docs-first-runtime..."); #[docify::export_content] fn build_runtime() { - run_cmd!( - cargo build --release -p $FIRST_RUNTIME - ) - .expect("Failed to run command"); + Command::new("cargo") + .arg("build") + .arg("--release") + .arg("-p") + .arg(FIRST_RUNTIME) + .assert() + .success(); } build_runtime() } @@ -272,10 +274,14 @@ mod tests { let chain_spec_builder = find_release_binary(&CHAIN_SPEC_BUILDER).unwrap(); let runtime_path = find_wasm(PARA_RUNTIME).unwrap(); let output = "/tmp/demo-chain-spec.json"; - let runtime_str = runtime_path.to_str().unwrap(); - run_cmd!( - $chain_spec_builder -c $output create --para-id 1000 --relay-chain dontcare -r $runtime_str named-preset development - ).expect("Failed to run command"); + Command::new(chain_spec_builder) + .args(["-c", output]) + .arg("create") + .args(["--para-id", "1000", "--relay-chain", "dontcare"]) + .args(["-r", runtime_path.to_str().unwrap()]) + .args(["named-preset", "development"]) + .assert() + .success(); std::fs::remove_file(output).unwrap(); } build_para_chain_spec_works(); diff --git a/docs/sdk/src/polkadot_sdk/frame_runtime.rs b/docs/sdk/src/polkadot_sdk/frame_runtime.rs index 24595e445fdd..8acf19f76413 100644 --- a/docs/sdk/src/polkadot_sdk/frame_runtime.rs +++ b/docs/sdk/src/polkadot_sdk/frame_runtime.rs @@ -57,7 +57,6 @@ //! The following example showcases a minimal pallet. #![doc = docify::embed!("src/polkadot_sdk/frame_runtime.rs", pallet)] //! -//! ## Runtime //! //! A runtime is a collection of pallets that are amalgamated together. Each pallet typically has //! some configurations (exposed as a `trait Config`) that needs to be *specified* in the runtime. diff --git a/docs/sdk/src/reference_docs/chain_spec_genesis.rs b/docs/sdk/src/reference_docs/chain_spec_genesis.rs index d5cc482711ad..b7a0a648d0cf 100644 --- a/docs/sdk/src/reference_docs/chain_spec_genesis.rs +++ b/docs/sdk/src/reference_docs/chain_spec_genesis.rs @@ -174,13 +174,13 @@ //! ``` //! Here are some examples in the form of rust tests: //! ## Listing available preset names: -#![doc = docify::embed!("./src/reference_docs/chain_spec_runtime/tests/chain_spec_builder_tests.rs", cmd_list_presets)] +#![doc = docify::embed!("./src/reference_docs/chain_spec_runtime/tests/chain_spec_builder_tests.rs", list_presets)] //! ## Displaying preset with given name -#![doc = docify::embed!("./src/reference_docs/chain_spec_runtime/tests/chain_spec_builder_tests.rs", cmd_get_preset)] +#![doc = docify::embed!("./src/reference_docs/chain_spec_runtime/tests/chain_spec_builder_tests.rs", get_preset)] //! ## Building a solo chain-spec (the default) using given preset -#![doc = docify::embed!("./src/reference_docs/chain_spec_runtime/tests/chain_spec_builder_tests.rs", cmd_generate_chain_spec)] +#![doc = docify::embed!("./src/reference_docs/chain_spec_runtime/tests/chain_spec_builder_tests.rs", generate_chain_spec)] //! ## Building a parachain chain-spec using given preset -#![doc = docify::embed!("./src/reference_docs/chain_spec_runtime/tests/chain_spec_builder_tests.rs", cmd_generate_para_chain_spec)] +#![doc = docify::embed!("./src/reference_docs/chain_spec_runtime/tests/chain_spec_builder_tests.rs", generate_para_chain_spec)] //! //! [`RuntimeGenesisConfig`]: //! chain_spec_guide_runtime::runtime::RuntimeGenesisConfig diff --git a/docs/sdk/src/reference_docs/chain_spec_runtime/Cargo.toml b/docs/sdk/src/reference_docs/chain_spec_runtime/Cargo.toml index 925cb7bb2e65..07c0342f5fbe 100644 --- a/docs/sdk/src/reference_docs/chain_spec_runtime/Cargo.toml +++ b/docs/sdk/src/reference_docs/chain_spec_runtime/Cargo.toml @@ -10,8 +10,8 @@ edition.workspace = true publish = false [dependencies] -codec = { workspace = true } docify = { workspace = true } +codec = { workspace = true } frame-support = { workspace = true } scale-info = { workspace = true } serde = { workspace = true } @@ -31,18 +31,17 @@ pallet-transaction-payment = { workspace = true } pallet-transaction-payment-rpc-runtime-api = { workspace = true } # genesis builder that allows us to interact with runtime genesis config -sp-application-crypto = { features = ["serde"], workspace = true } -sp-core = { workspace = true } sp-genesis-builder = { workspace = true } -sp-keyring = { workspace = true } sp-runtime = { features = ["serde"], workspace = true } +sp-core = { workspace = true } +sp-keyring = { workspace = true } +sp-application-crypto = { features = ["serde"], workspace = true } [build-dependencies] substrate-wasm-builder = { optional = true, workspace = true, default-features = true } [dev-dependencies] chain-spec-builder = { workspace = true, default-features = true } -cmd_lib = { workspace = true } sc-chain-spec = { workspace = true, default-features = true } [features] diff --git a/docs/sdk/src/reference_docs/chain_spec_runtime/src/presets.rs b/docs/sdk/src/reference_docs/chain_spec_runtime/src/presets.rs index 5432d37e907d..5918f2b8ccd5 100644 --- a/docs/sdk/src/reference_docs/chain_spec_runtime/src/presets.rs +++ b/docs/sdk/src/reference_docs/chain_spec_runtime/src/presets.rs @@ -25,7 +25,7 @@ use alloc::vec; use frame_support::build_struct_json_patch; use serde_json::{json, to_string, Value}; use sp_application_crypto::Ss58Codec; -use sp_keyring::Sr25519Keyring; +use sp_keyring::AccountKeyring; /// A demo preset with strings only. pub const PRESET_1: &str = "preset_1"; @@ -70,7 +70,7 @@ fn preset_2() -> Value { some_integer: 200, some_enum: FooEnum::Data2(SomeFooData2 { values: vec![0x0c, 0x10] }) }, - bar: BarConfig { initial_account: Some(Sr25519Keyring::Ferdie.public().into()) }, + bar: BarConfig { initial_account: Some(AccountKeyring::Ferdie.public().into()) }, }) } @@ -80,7 +80,7 @@ fn preset_2() -> Value { fn preset_3() -> Value { json!({ "bar": { - "initialAccount": Sr25519Keyring::Alice.public().to_ss58check(), + "initialAccount": AccountKeyring::Alice.public().to_ss58check(), }, "foo": { "someEnum": FooEnum::Data1( diff --git a/docs/sdk/src/reference_docs/chain_spec_runtime/tests/chain_spec_builder_tests.rs b/docs/sdk/src/reference_docs/chain_spec_runtime/tests/chain_spec_builder_tests.rs index b773af24de80..c2fe5a6727e6 100644 --- a/docs/sdk/src/reference_docs/chain_spec_runtime/tests/chain_spec_builder_tests.rs +++ b/docs/sdk/src/reference_docs/chain_spec_runtime/tests/chain_spec_builder_tests.rs @@ -1,192 +1,194 @@ -use cmd_lib::*; use serde_json::{json, Value}; -use std::str; +use std::{process::Command, str}; -fn wasm_file_path() -> &'static str { - chain_spec_guide_runtime::runtime::WASM_BINARY_PATH - .expect("chain_spec_guide_runtime wasm should exist. qed") -} +const WASM_FILE_PATH: &str = + "../../../../../target/release/wbuild/chain-spec-guide-runtime/chain_spec_guide_runtime.wasm"; const CHAIN_SPEC_BUILDER_PATH: &str = "../../../../../target/release/chain-spec-builder"; -macro_rules! bash( - ( chain-spec-builder $($a:tt)* ) => {{ - let path = get_chain_spec_builder_path(); - spawn_with_output!( - $path $($a)* - ) - .expect("a process running. qed") - .wait_with_output() - .expect("to get output. qed.") - - }} -); - fn get_chain_spec_builder_path() -> &'static str { - run_cmd!( - cargo build --release -p staging-chain-spec-builder --bin chain-spec-builder - ) - .expect("Failed to execute command"); + // dev-dependencies do not build binary. So let's do the naive work-around here: + let _ = std::process::Command::new("cargo") + .arg("build") + .arg("--release") + .arg("-p") + .arg("staging-chain-spec-builder") + .arg("--bin") + .arg("chain-spec-builder") + .status() + .expect("Failed to execute command"); CHAIN_SPEC_BUILDER_PATH } -#[docify::export_content] -fn cmd_list_presets(runtime_path: &str) -> String { - bash!( - chain-spec-builder list-presets -r $runtime_path - ) -} - #[test] +#[docify::export] fn list_presets() { - let output: serde_json::Value = - serde_json::from_slice(cmd_list_presets(wasm_file_path()).as_bytes()).unwrap(); - assert_eq!( - output, - json!({ - "presets":[ - "preset_1", - "preset_2", - "preset_3", - "preset_4", - "preset_invalid" - ] - }), - "Output did not match expected" - ); -} + let output = Command::new(get_chain_spec_builder_path()) + .arg("list-presets") + .arg("-r") + .arg(WASM_FILE_PATH) + .output() + .expect("Failed to execute command"); -#[docify::export_content] -fn cmd_get_preset(runtime_path: &str) -> String { - bash!( - chain-spec-builder display-preset -r $runtime_path -p preset_2 - ) + let output: serde_json::Value = serde_json::from_slice(&output.stdout).unwrap(); + + let expected_output = json!({ + "presets":[ + "preset_1", + "preset_2", + "preset_3", + "preset_4", + "preset_invalid" + ] + }); + assert_eq!(output, expected_output, "Output did not match expected"); } #[test] +#[docify::export] fn get_preset() { - let output: serde_json::Value = - serde_json::from_slice(cmd_get_preset(wasm_file_path()).as_bytes()).unwrap(); - assert_eq!( - output, - json!({ - "bar": { - "initialAccount": "5CiPPseXPECbkjWCa6MnjNokrgYjMqmKndv2rSnekmSK2DjL", - }, - "foo": { - "someEnum": { - "Data2": { - "values": "0x0c10" - } - }, - "someInteger": 200 - }, - }), - "Output did not match expected" - ); -} + let output = Command::new(get_chain_spec_builder_path()) + .arg("display-preset") + .arg("-r") + .arg(WASM_FILE_PATH) + .arg("-p") + .arg("preset_2") + .output() + .expect("Failed to execute command"); -#[docify::export_content] -fn cmd_generate_chain_spec(runtime_path: &str) -> String { - bash!( - chain-spec-builder -c /dev/stdout create -r $runtime_path named-preset preset_2 - ) + let output: serde_json::Value = serde_json::from_slice(&output.stdout).unwrap(); + + //note: copy of chain_spec_guide_runtime::preset_2 + let expected_output = json!({ + "bar": { + "initialAccount": "5CiPPseXPECbkjWCa6MnjNokrgYjMqmKndv2rSnekmSK2DjL", + }, + "foo": { + "someEnum": { + "Data2": { + "values": "0x0c10" + } + }, + "someInteger": 200 + }, + }); + assert_eq!(output, expected_output, "Output did not match expected"); } #[test] +#[docify::export] fn generate_chain_spec() { - let mut output: serde_json::Value = - serde_json::from_slice(cmd_generate_chain_spec(wasm_file_path()).as_bytes()).unwrap(); + let output = Command::new(get_chain_spec_builder_path()) + .arg("-c") + .arg("/dev/stdout") + .arg("create") + .arg("-r") + .arg(WASM_FILE_PATH) + .arg("named-preset") + .arg("preset_2") + .output() + .expect("Failed to execute command"); + + let mut output: serde_json::Value = serde_json::from_slice(&output.stdout).unwrap(); + + //remove code field for better readability if let Some(code) = output["genesis"]["runtimeGenesis"].as_object_mut().unwrap().get_mut("code") { *code = Value::String("0x123".to_string()); } - assert_eq!( - output, - json!({ - "name": "Custom", - "id": "custom", - "chainType": "Live", - "bootNodes": [], - "telemetryEndpoints": null, - "protocolId": null, - "properties": { "tokenDecimals": 12, "tokenSymbol": "UNIT" }, - "codeSubstitutes": {}, - "genesis": { - "runtimeGenesis": { - "code": "0x123", - "patch": { - "bar": { - "initialAccount": "5CiPPseXPECbkjWCa6MnjNokrgYjMqmKndv2rSnekmSK2DjL" - }, - "foo": { - "someEnum": { - "Data2": { - "values": "0x0c10" - } - }, - "someInteger": 200 + + let expected_output = json!({ + "name": "Custom", + "id": "custom", + "chainType": "Live", + "bootNodes": [], + "telemetryEndpoints": null, + "protocolId": null, + "properties": { "tokenDecimals": 12, "tokenSymbol": "UNIT" }, + "codeSubstitutes": {}, + "genesis": { + "runtimeGenesis": { + "code": "0x123", + "patch": { + "bar": { + "initialAccount": "5CiPPseXPECbkjWCa6MnjNokrgYjMqmKndv2rSnekmSK2DjL" + }, + "foo": { + "someEnum": { + "Data2": { + "values": "0x0c10" } - } + }, + "someInteger": 200 } } - }), - "Output did not match expected" - ); -} - -#[docify::export_content] -fn cmd_generate_para_chain_spec(runtime_path: &str) -> String { - bash!( - chain-spec-builder -c /dev/stdout create -c polkadot -p 1000 -r $runtime_path named-preset preset_2 - ) + } + } + }); + assert_eq!(output, expected_output, "Output did not match expected"); } #[test] +#[docify::export] fn generate_para_chain_spec() { - let mut output: serde_json::Value = - serde_json::from_slice(cmd_generate_para_chain_spec(wasm_file_path()).as_bytes()).unwrap(); + let output = Command::new(get_chain_spec_builder_path()) + .arg("-c") + .arg("/dev/stdout") + .arg("create") + .arg("-c") + .arg("polkadot") + .arg("-p") + .arg("1000") + .arg("-r") + .arg(WASM_FILE_PATH) + .arg("named-preset") + .arg("preset_2") + .output() + .expect("Failed to execute command"); + + let mut output: serde_json::Value = serde_json::from_slice(&output.stdout).unwrap(); + + //remove code field for better readability if let Some(code) = output["genesis"]["runtimeGenesis"].as_object_mut().unwrap().get_mut("code") { *code = Value::String("0x123".to_string()); } - assert_eq!( - output, - json!({ - "name": "Custom", - "id": "custom", - "chainType": "Live", - "bootNodes": [], - "telemetryEndpoints": null, - "protocolId": null, - "relay_chain": "polkadot", - "para_id": 1000, - "properties": { "tokenDecimals": 12, "tokenSymbol": "UNIT" }, - "codeSubstitutes": {}, - "genesis": { - "runtimeGenesis": { - "code": "0x123", - "patch": { - "bar": { - "initialAccount": "5CiPPseXPECbkjWCa6MnjNokrgYjMqmKndv2rSnekmSK2DjL" + + let expected_output = json!({ + "name": "Custom", + "id": "custom", + "chainType": "Live", + "bootNodes": [], + "telemetryEndpoints": null, + "protocolId": null, + "relay_chain": "polkadot", + "para_id": 1000, + "properties": { "tokenDecimals": 12, "tokenSymbol": "UNIT" }, + "codeSubstitutes": {}, + "genesis": { + "runtimeGenesis": { + "code": "0x123", + "patch": { + "bar": { + "initialAccount": "5CiPPseXPECbkjWCa6MnjNokrgYjMqmKndv2rSnekmSK2DjL" + }, + "foo": { + "someEnum": { + "Data2": { + "values": "0x0c10" + } }, - "foo": { - "someEnum": { - "Data2": { - "values": "0x0c10" - } - }, - "someInteger": 200 - } + "someInteger": 200 } } - }}), - "Output did not match expected" - ); + } + } + }); + assert_eq!(output, expected_output, "Output did not match expected"); } #[test] -#[docify::export_content] +#[docify::export] fn preset_4_json() { assert_eq!( chain_spec_guide_runtime::presets::preset_4(), diff --git a/docs/sdk/src/reference_docs/omni_node.rs b/docs/sdk/src/reference_docs/omni_node.rs index 150755fb29a2..44d63704a458 100644 --- a/docs/sdk/src/reference_docs/omni_node.rs +++ b/docs/sdk/src/reference_docs/omni_node.rs @@ -177,25 +177,9 @@ //! [This](https://github.com/paritytech/polkadot-sdk/issues/5565) future improvement to OmniNode //! aims to make such checks automatic. //! -//! ### Runtime conventions -//! -//! The Omni Node needs to make some assumptions about the runtime. During startup, the node fetches -//! the runtime metadata and asserts that the runtime represents a compatible parachain. -//! The checks are best effort and will generate warning level logs in the Omni Node log file on -//! failure. -//! -//! The list of checks may evolve in the future and for now only few rules are implemented: -//! * runtimes must define a type for [`cumulus-pallet-parachain-system`], which is recommended to -//! be named as `ParachainSystem`. -//! * runtimes must define a type for [`frame-system`] pallet, which is recommended to be named as -//! `System`. The configured [`block number`] here will be used by Omni Node to configure AURA -//! accordingly. //! //! [`templates`]: crate::polkadot_sdk::templates //! [`parachain-template`]: https://github.com/paritytech/polkadot-sdk-parachain-template //! [`--dev-block-time`]: polkadot_omni_node_lib::cli::Cli::dev_block_time //! [`polkadot-omni-node`]: https://crates.io/crates/polkadot-omni-node //! [`chain-spec-builder`]: https://crates.io/crates/staging-chain-spec-builder -//! [`cumulus-pallet-parachain-system`]: https://docs.rs/cumulus-pallet-parachain-system/latest/cumulus_pallet_parachain_system/ -//! [`frame-system`]: https://docs.rs/frame-system/latest/frame_system/ -//! [`block number`]: https://docs.rs/frame-system/latest/frame_system/pallet/storage_types/struct.Number.html diff --git a/polkadot/Cargo.toml b/polkadot/Cargo.toml index ded8157ad90e..3a939464868f 100644 --- a/polkadot/Cargo.toml +++ b/polkadot/Cargo.toml @@ -20,8 +20,6 @@ authors.workspace = true edition.workspace = true version = "6.0.0" default-run = "polkadot" -homepage.workspace = true -repository.workspace = true [lints] workspace = true @@ -46,10 +44,10 @@ tikv-jemallocator = { version = "0.5.0", features = ["unprefixed_malloc_on_suppo [dev-dependencies] assert_cmd = { workspace = true } nix = { features = ["signal"], workspace = true } -polkadot-core-primitives = { workspace = true, default-features = true } -substrate-rpc-client = { workspace = true, default-features = true } tempfile = { workspace = true } tokio = { workspace = true, default-features = true } +substrate-rpc-client = { workspace = true, default-features = true } +polkadot-core-primitives = { workspace = true, default-features = true } [build-dependencies] substrate-build-script-utils = { workspace = true, default-features = true } diff --git a/polkadot/cli/Cargo.toml b/polkadot/cli/Cargo.toml index 6909d142b3a6..da37f6062c57 100644 --- a/polkadot/cli/Cargo.toml +++ b/polkadot/cli/Cargo.toml @@ -5,8 +5,6 @@ version = "7.0.0" authors.workspace = true edition.workspace = true license.workspace = true -homepage.workspace = true -repository.workspace = true [lints] workspace = true @@ -22,27 +20,27 @@ crate-type = ["cdylib", "rlib"] [dependencies] cfg-if = { workspace = true } clap = { features = ["derive"], optional = true, workspace = true } -futures = { workspace = true } log = { workspace = true, default-features = true } +thiserror = { workspace = true } +futures = { workspace = true } pyroscope = { optional = true, workspace = true } pyroscope_pprofrs = { optional = true, workspace = true } -thiserror = { workspace = true } polkadot-service = { optional = true, workspace = true } +sp-core = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } +sp-maybe-compressed-blob = { workspace = true, default-features = true } frame-benchmarking-cli = { optional = true, workspace = true, default-features = true } +sc-cli = { optional = true, workspace = true, default-features = true } +sc-service = { optional = true, workspace = true, default-features = true } polkadot-node-metrics = { workspace = true, default-features = true } polkadot-node-primitives = { workspace = true, default-features = true } -sc-cli = { optional = true, workspace = true, default-features = true } +sc-tracing = { optional = true, workspace = true, default-features = true } +sc-sysinfo = { workspace = true, default-features = true } sc-executor = { workspace = true, default-features = true } -sc-service = { optional = true, workspace = true, default-features = true } sc-storage-monitor = { workspace = true, default-features = true } -sc-sysinfo = { workspace = true, default-features = true } -sc-tracing = { optional = true, workspace = true, default-features = true } -sp-core = { workspace = true, default-features = true } -sp-io = { workspace = true, default-features = true } -sp-keyring = { workspace = true, default-features = true } -sp-maybe-compressed-blob = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } [build-dependencies] diff --git a/polkadot/core-primitives/Cargo.toml b/polkadot/core-primitives/Cargo.toml index 1fb14e9d58e7..42ca27953738 100644 --- a/polkadot/core-primitives/Cargo.toml +++ b/polkadot/core-primitives/Cargo.toml @@ -5,17 +5,15 @@ description = "Core Polkadot types used by Relay Chains and parachains." authors.workspace = true edition.workspace = true license.workspace = true -homepage.workspace = true -repository.workspace = true [lints] workspace = true [dependencies] -codec = { features = ["derive"], workspace = true } -scale-info = { features = ["derive"], workspace = true } sp-core = { workspace = true } sp-runtime = { workspace = true } +scale-info = { features = ["derive"], workspace = true } +codec = { features = ["derive"], workspace = true } [features] default = ["std"] diff --git a/polkadot/erasure-coding/Cargo.toml b/polkadot/erasure-coding/Cargo.toml index ba712a89613b..969742c5bb0a 100644 --- a/polkadot/erasure-coding/Cargo.toml +++ b/polkadot/erasure-coding/Cargo.toml @@ -5,24 +5,22 @@ description = "Erasure coding used for Polkadot's availability system" authors.workspace = true edition.workspace = true license.workspace = true -homepage.workspace = true -repository.workspace = true [lints] workspace = true [dependencies] -codec = { features = ["derive", "std"], workspace = true } -novelpoly = { workspace = true } -polkadot-node-primitives = { workspace = true, default-features = true } polkadot-primitives = { workspace = true, default-features = true } +polkadot-node-primitives = { workspace = true, default-features = true } +novelpoly = { workspace = true } +codec = { features = ["derive", "std"], workspace = true } sp-core = { workspace = true, default-features = true } sp-trie = { workspace = true, default-features = true } thiserror = { workspace = true } [dev-dependencies] -criterion = { features = ["cargo_bench_support"], workspace = true } quickcheck = { workspace = true } +criterion = { features = ["cargo_bench_support"], workspace = true } [[bench]] name = "scaling_with_validators" diff --git a/polkadot/erasure-coding/fuzzer/Cargo.toml b/polkadot/erasure-coding/fuzzer/Cargo.toml index 5f1c2bda4058..6f451f0319b2 100644 --- a/polkadot/erasure-coding/fuzzer/Cargo.toml +++ b/polkadot/erasure-coding/fuzzer/Cargo.toml @@ -10,10 +10,10 @@ publish = false workspace = true [dependencies] -honggfuzz = { workspace = true } polkadot-erasure-coding = { workspace = true, default-features = true } -polkadot-node-primitives = { workspace = true, default-features = true } +honggfuzz = { workspace = true } polkadot-primitives = { workspace = true, default-features = true } +polkadot-node-primitives = { workspace = true, default-features = true } [[bin]] name = "reconstruct" diff --git a/polkadot/grafana/README.md b/polkadot/grafana/README.md index 0ecb0b70515b..e909fdd29a75 100644 --- a/polkadot/grafana/README.md +++ b/polkadot/grafana/README.md @@ -90,4 +90,4 @@ and issue statement or initiate dispute. - **Assignment delay tranches**. Approval voting is designed such that validators assigned to check a specific candidate are split up into equal delay tranches (0.5 seconds each). All validators checks are ordered by the delay tranche index. Early tranches of validators have the opportunity to check the candidate first before later tranches -that act as backups in case of no shows. +that act as as backups in case of no shows. diff --git a/polkadot/grafana/parachains/status.json b/polkadot/grafana/parachains/status.json index 22250967848d..5942cbdf4479 100644 --- a/polkadot/grafana/parachains/status.json +++ b/polkadot/grafana/parachains/status.json @@ -1405,7 +1405,7 @@ "type": "prometheus", "uid": "$data_source" }, - "description": "Approval voting requires that validators which are assigned to check a specific \ncandidate are split up into delay tranches (0.5s each). Then, all validators checks are ordered by the delay \ntranche index. Early tranches of validators will check the candidate first and later tranches act as backups in case of no shows.", + "description": "Approval voting requires that validators which are assigned to check a specific \ncandidate are split up into delay tranches (0.5s each). Then, all validators checks are ordered by the delay \ntranche index. Early tranches of validators will check the candidate first and later tranches act as as backups in case of no shows.", "gridPos": { "h": 9, "w": 18, diff --git a/polkadot/node/collation-generation/Cargo.toml b/polkadot/node/collation-generation/Cargo.toml index eb9568cc22bc..777458673f5b 100644 --- a/polkadot/node/collation-generation/Cargo.toml +++ b/polkadot/node/collation-generation/Cargo.toml @@ -5,14 +5,11 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "Collator-side subsystem that handles incoming candidate submissions from the parachain." -homepage.workspace = true -repository.workspace = true [lints] workspace = true [dependencies] -codec = { features = ["bit-vec", "derive"], workspace = true } futures = { workspace = true } gum = { workspace = true, default-features = true } polkadot-erasure-coding = { workspace = true, default-features = true } @@ -20,15 +17,16 @@ polkadot-node-primitives = { workspace = true, default-features = true } polkadot-node-subsystem = { workspace = true, default-features = true } polkadot-node-subsystem-util = { workspace = true, default-features = true } polkadot-primitives = { workspace = true, default-features = true } -schnellru = { workspace = true } sp-core = { workspace = true, default-features = true } sp-maybe-compressed-blob = { workspace = true, default-features = true } thiserror = { workspace = true } +codec = { features = ["bit-vec", "derive"], workspace = true } +schnellru = { workspace = true } [dev-dependencies] -assert_matches = { workspace = true } polkadot-node-subsystem-test-helpers = { workspace = true } -polkadot-primitives = { workspace = true, features = ["test"] } polkadot-primitives-test-helpers = { workspace = true } +assert_matches = { workspace = true } rstest = { workspace = true } sp-keyring = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, features = ["test"] } diff --git a/polkadot/node/core/approval-voting-parallel/Cargo.toml b/polkadot/node/core/approval-voting-parallel/Cargo.toml index a3b3e97da497..3a98cce80e92 100644 --- a/polkadot/node/core/approval-voting-parallel/Cargo.toml +++ b/polkadot/node/core/approval-voting-parallel/Cargo.toml @@ -5,8 +5,6 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "Approval Voting Subsystem running approval work in parallel" -homepage.workspace = true -repository.workspace = true [lints] workspace = true @@ -19,38 +17,38 @@ gum = { workspace = true } itertools = { workspace = true } thiserror = { workspace = true } -polkadot-approval-distribution = { workspace = true, default-features = true } polkadot-node-core-approval-voting = { workspace = true, default-features = true } -polkadot-node-metrics = { workspace = true, default-features = true } -polkadot-node-network-protocol = { workspace = true, default-features = true } -polkadot-node-primitives = { workspace = true, default-features = true } +polkadot-approval-distribution = { workspace = true, default-features = true } polkadot-node-subsystem = { workspace = true, default-features = true } polkadot-node-subsystem-util = { workspace = true, default-features = true } polkadot-overseer = { workspace = true, default-features = true } polkadot-primitives = { workspace = true, default-features = true } +polkadot-node-primitives = { workspace = true, default-features = true } +polkadot-node-network-protocol = { workspace = true, default-features = true } +polkadot-node-metrics = { workspace = true, default-features = true } sc-keystore = { workspace = true, default-features = false } -sp-application-crypto = { workspace = true, default-features = false, features = ["full_crypto"] } sp-consensus = { workspace = true, default-features = false } sp-consensus-slots = { workspace = true, default-features = false } +sp-application-crypto = { workspace = true, default-features = false, features = ["full_crypto"] } sp-runtime = { workspace = true, default-features = false } rand = { workspace = true } -rand_chacha = { workspace = true } rand_core = { workspace = true } +rand_chacha = { workspace = true } [dev-dependencies] -assert_matches = { workspace = true } async-trait = { workspace = true } -kvdb-memorydb = { workspace = true } -log = { workspace = true, default-features = true } parking_lot = { workspace = true } +sp-keyring = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-consensus-babe = { workspace = true, default-features = true } +sp-tracing = { workspace = true } polkadot-node-subsystem-test-helpers = { workspace = true, default-features = true } +assert_matches = { workspace = true } +kvdb-memorydb = { workspace = true } polkadot-primitives-test-helpers = { workspace = true, default-features = true } +log = { workspace = true, default-features = true } polkadot-subsystem-bench = { workspace = true, default-features = true } schnorrkel = { workspace = true, default-features = true } -sp-consensus-babe = { workspace = true, default-features = true } -sp-core = { workspace = true, default-features = true } -sp-keyring = { workspace = true, default-features = true } -sp-keystore = { workspace = true, default-features = true } -sp-tracing = { workspace = true } diff --git a/polkadot/node/core/approval-voting/Cargo.toml b/polkadot/node/core/approval-voting/Cargo.toml index 2c292ba5efcb..f9754d2babc9 100644 --- a/polkadot/node/core/approval-voting/Cargo.toml +++ b/polkadot/node/core/approval-voting/Cargo.toml @@ -5,57 +5,55 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "Approval Voting Subsystem of the Polkadot node" -homepage.workspace = true -repository.workspace = true [lints] workspace = true [dependencies] -async-trait = { workspace = true } -bitvec = { features = ["alloc"], workspace = true } -codec = { features = ["bit-vec", "derive"], workspace = true } -derive_more = { workspace = true, default-features = true } futures = { workspace = true } futures-timer = { workspace = true } +codec = { features = ["bit-vec", "derive"], workspace = true } gum = { workspace = true, default-features = true } -itertools = { workspace = true } -kvdb = { workspace = true } -merlin = { workspace = true, default-features = true } +bitvec = { features = ["alloc"], workspace = true } schnellru = { workspace = true } +merlin = { workspace = true, default-features = true } schnorrkel = { workspace = true, default-features = true } +kvdb = { workspace = true } +derive_more = { workspace = true, default-features = true } thiserror = { workspace = true } +itertools = { workspace = true } +async-trait = { workspace = true } -polkadot-node-primitives = { workspace = true, default-features = true } polkadot-node-subsystem = { workspace = true, default-features = true } polkadot-node-subsystem-util = { workspace = true, default-features = true } polkadot-overseer = { workspace = true, default-features = true } polkadot-primitives = { workspace = true, default-features = true } +polkadot-node-primitives = { workspace = true, default-features = true } sc-keystore = { workspace = true } -sp-application-crypto = { features = ["full_crypto"], workspace = true } sp-consensus = { workspace = true } sp-consensus-slots = { workspace = true } +sp-application-crypto = { features = ["full_crypto"], workspace = true } sp-runtime = { workspace = true } # rand_core should match schnorrkel -rand = { workspace = true, default-features = true } -rand_chacha = { workspace = true, default-features = true } rand_core = { workspace = true } +rand_chacha = { workspace = true, default-features = true } +rand = { workspace = true, default-features = true } [dev-dependencies] -assert_matches = { workspace = true } async-trait = { workspace = true } -kvdb-memorydb = { workspace = true } -log = { workspace = true, default-features = true } parking_lot = { workspace = true, default-features = true } -polkadot-node-subsystem-test-helpers = { workspace = true } -polkadot-primitives = { workspace = true, features = ["test"] } -polkadot-primitives-test-helpers = { workspace = true } -sp-consensus-babe = { workspace = true, default-features = true } -sp-core = { workspace = true, default-features = true } sp-keyring = { workspace = true, default-features = true } sp-keystore = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-consensus-babe = { workspace = true, default-features = true } +polkadot-node-subsystem-test-helpers = { workspace = true } +assert_matches = { workspace = true } +kvdb-memorydb = { workspace = true } +polkadot-primitives-test-helpers = { workspace = true } +log = { workspace = true, default-features = true } sp-tracing = { workspace = true } +polkadot-primitives = { workspace = true, features = ["test"] } polkadot-subsystem-bench = { workspace = true } diff --git a/polkadot/node/core/approval-voting/src/approval_db/v3/tests.rs b/polkadot/node/core/approval-voting/src/approval_db/v3/tests.rs index 69278868fa3d..372dd49803cb 100644 --- a/polkadot/node/core/approval-voting/src/approval_db/v3/tests.rs +++ b/polkadot/node/core/approval-voting/src/approval_db/v3/tests.rs @@ -264,8 +264,8 @@ fn add_block_entry_adds_child() { fn canonicalize_works() { let (mut db, store) = make_db(); - // -> B1 -> C1 -> D1 -> E1 - // A -> B2 -> C2 -> D2 -> E2 + // -> B1 -> C1 -> D1 + // A -> B2 -> C2 -> D2 // // We'll canonicalize C1. Everything except D1 should disappear. // @@ -293,22 +293,18 @@ fn canonicalize_works() { let block_hash_c2 = Hash::repeat_byte(5); let block_hash_d1 = Hash::repeat_byte(6); let block_hash_d2 = Hash::repeat_byte(7); - let block_hash_e1 = Hash::repeat_byte(8); - let block_hash_e2 = Hash::repeat_byte(9); let candidate_receipt_genesis = make_candidate(ParaId::from(1_u32), genesis); let candidate_receipt_a = make_candidate(ParaId::from(2_u32), block_hash_a); let candidate_receipt_b = make_candidate(ParaId::from(3_u32), block_hash_a); let candidate_receipt_b1 = make_candidate(ParaId::from(4_u32), block_hash_b1); let candidate_receipt_c1 = make_candidate(ParaId::from(5_u32), block_hash_c1); - let candidate_receipt_e1 = make_candidate(ParaId::from(6_u32), block_hash_e1); let cand_hash_1 = candidate_receipt_genesis.hash(); let cand_hash_2 = candidate_receipt_a.hash(); let cand_hash_3 = candidate_receipt_b.hash(); let cand_hash_4 = candidate_receipt_b1.hash(); let cand_hash_5 = candidate_receipt_c1.hash(); - let cand_hash_6 = candidate_receipt_e1.hash(); let block_entry_a = make_block_entry(block_hash_a, genesis, 1, Vec::new()); let block_entry_b1 = make_block_entry(block_hash_b1, block_hash_a, 2, Vec::new()); @@ -330,12 +326,6 @@ fn canonicalize_works() { let block_entry_d2 = make_block_entry(block_hash_d2, block_hash_c2, 4, vec![(CoreIndex(0), cand_hash_5)]); - let block_entry_e1 = - make_block_entry(block_hash_e1, block_hash_d1, 5, vec![(CoreIndex(0), cand_hash_6)]); - - let block_entry_e2 = - make_block_entry(block_hash_e2, block_hash_d2, 5, vec![(CoreIndex(0), cand_hash_6)]); - let candidate_info = { let mut candidate_info = HashMap::new(); candidate_info.insert( @@ -355,8 +345,6 @@ fn canonicalize_works() { candidate_info .insert(cand_hash_5, NewCandidateInfo::new(candidate_receipt_c1, GroupIndex(5), None)); - candidate_info - .insert(cand_hash_6, NewCandidateInfo::new(candidate_receipt_e1, GroupIndex(6), None)); candidate_info }; @@ -369,8 +357,6 @@ fn canonicalize_works() { block_entry_c2.clone(), block_entry_d1.clone(), block_entry_d2.clone(), - block_entry_e1.clone(), - block_entry_e2.clone(), ]; let mut overlay_db = OverlayedBackend::new(&db); @@ -452,7 +438,7 @@ fn canonicalize_works() { assert_eq!( load_stored_blocks(store.as_ref(), &TEST_CONFIG).unwrap().unwrap(), - StoredBlockRange(4, 6) + StoredBlockRange(4, 5) ); check_candidates_in_store(vec![ @@ -461,7 +447,6 @@ fn canonicalize_works() { (cand_hash_3, Some(vec![block_hash_d1])), (cand_hash_4, Some(vec![block_hash_d1])), (cand_hash_5, None), - (cand_hash_6, Some(vec![block_hash_e1])), ]); check_blocks_in_store(vec![ @@ -471,37 +456,6 @@ fn canonicalize_works() { (block_hash_c1, None), (block_hash_c2, None), (block_hash_d1, Some(vec![cand_hash_3, cand_hash_4])), - (block_hash_e1, Some(vec![cand_hash_6])), - (block_hash_d2, None), - ]); - - let mut overlay_db = OverlayedBackend::new(&db); - canonicalize(&mut overlay_db, 4, block_hash_d1).unwrap(); - let write_ops = overlay_db.into_write_ops(); - db.write(write_ops).unwrap(); - - assert_eq!( - load_stored_blocks(store.as_ref(), &TEST_CONFIG).unwrap().unwrap(), - StoredBlockRange(5, 6) - ); - - check_candidates_in_store(vec![ - (cand_hash_1, None), - (cand_hash_2, None), - (cand_hash_3, None), - (cand_hash_4, None), - (cand_hash_5, None), - (cand_hash_6, Some(vec![block_hash_e1])), - ]); - - check_blocks_in_store(vec![ - (block_hash_a, None), - (block_hash_b1, None), - (block_hash_b2, None), - (block_hash_c1, None), - (block_hash_c2, None), - (block_hash_d1, None), - (block_hash_e1, Some(vec![cand_hash_6])), (block_hash_d2, None), ]); } diff --git a/polkadot/node/core/approval-voting/src/lib.rs b/polkadot/node/core/approval-voting/src/lib.rs index 7cea22d1a6a7..2176cc7675be 100644 --- a/polkadot/node/core/approval-voting/src/lib.rs +++ b/polkadot/node/core/approval-voting/src/lib.rs @@ -1582,9 +1582,8 @@ async fn handle_actions< session_info_provider, ) .await?; - for message in messages.into_iter() { - approval_voting_sender.send_unbounded_message(message); - } + + approval_voting_sender.send_messages(messages.into_iter()).await; let next_actions: Vec = next_actions.into_iter().map(|v| v.clone()).chain(actions_iter).collect(); @@ -1669,7 +1668,6 @@ async fn distribution_messages_for_activation SubsystemResult<()> { let range = match overlay_db.load_stored_blocks()? { None => return Ok(()), - Some(range) if range.0 > canon_number => return Ok(()), + Some(range) if range.0 >= canon_number => return Ok(()), Some(range) => range, }; diff --git a/polkadot/node/core/approval-voting/src/tests.rs b/polkadot/node/core/approval-voting/src/tests.rs index be569a1de3ec..099ab419dfbf 100644 --- a/polkadot/node/core/approval-voting/src/tests.rs +++ b/polkadot/node/core/approval-voting/src/tests.rs @@ -4459,114 +4459,6 @@ async fn setup_overseer_with_two_blocks_each_with_one_assignment_triggered( assert!(our_assignment.triggered()); } -// Builds a chain with a fork where both relay blocks include the same candidate. -async fn build_chain_with_block_with_two_candidates( - block_hash1: Hash, - slot: Slot, - sync_oracle_handle: TestSyncOracleHandle, - candidate_receipt: Vec, -) -> (ChainBuilder, SessionInfo) { - let validators = vec![ - Sr25519Keyring::Alice, - Sr25519Keyring::Bob, - Sr25519Keyring::Charlie, - Sr25519Keyring::Dave, - Sr25519Keyring::Eve, - ]; - let session_info = SessionInfo { - validator_groups: IndexedVec::>::from(vec![ - vec![ValidatorIndex(0), ValidatorIndex(1)], - vec![ValidatorIndex(2)], - vec![ValidatorIndex(3), ValidatorIndex(4)], - ]), - ..session_info(&validators) - }; - - let candidates = Some( - candidate_receipt - .iter() - .enumerate() - .map(|(i, receipt)| (receipt.clone(), CoreIndex(i as u32), GroupIndex(i as u32))) - .collect(), - ); - let mut chain_builder = ChainBuilder::new(); - - chain_builder - .major_syncing(sync_oracle_handle.is_major_syncing.clone()) - .add_block( - block_hash1, - ChainBuilder::GENESIS_HASH, - 1, - BlockConfig { - slot, - candidates: candidates.clone(), - session_info: Some(session_info.clone()), - end_syncing: true, - }, - ); - (chain_builder, session_info) -} - -async fn setup_overseer_with_blocks_with_two_assignments_triggered( - virtual_overseer: &mut VirtualOverseer, - store: TestStore, - clock: &Arc, - sync_oracle_handle: TestSyncOracleHandle, -) { - assert_matches!( - overseer_recv(virtual_overseer).await, - AllMessages::ChainApi(ChainApiMessage::FinalizedBlockNumber(rx)) => { - rx.send(Ok(0)).unwrap(); - } - ); - - let block_hash = Hash::repeat_byte(0x01); - let candidate_commitments = CandidateCommitments::default(); - let mut candidate_receipt = dummy_candidate_receipt_v2(block_hash); - candidate_receipt.commitments_hash = candidate_commitments.hash(); - let candidate_hash = candidate_receipt.hash(); - - let mut candidate_commitments2 = CandidateCommitments::default(); - candidate_commitments2.processed_downward_messages = 3; - let mut candidate_receipt2 = dummy_candidate_receipt_v2(block_hash); - candidate_receipt2.commitments_hash = candidate_commitments2.hash(); - let candidate_hash2 = candidate_receipt2.hash(); - - let slot = Slot::from(1); - let (chain_builder, _session_info) = build_chain_with_block_with_two_candidates( - block_hash, - slot, - sync_oracle_handle, - vec![candidate_receipt, candidate_receipt2], - ) - .await; - chain_builder.build(virtual_overseer).await; - - assert!(!clock.inner.lock().current_wakeup_is(1)); - clock.inner.lock().wakeup_all(1); - - assert!(clock.inner.lock().current_wakeup_is(slot_to_tick(slot))); - clock.inner.lock().wakeup_all(slot_to_tick(slot)); - - futures_timer::Delay::new(Duration::from_millis(200)).await; - - clock.inner.lock().wakeup_all(slot_to_tick(slot + 2)); - - assert_eq!(clock.inner.lock().wakeups.len(), 0); - - futures_timer::Delay::new(Duration::from_millis(200)).await; - - let candidate_entry = store.load_candidate_entry(&candidate_hash).unwrap().unwrap(); - let our_assignment = - candidate_entry.approval_entry(&block_hash).unwrap().our_assignment().unwrap(); - assert!(our_assignment.triggered()); - - let candidate_entry = store.load_candidate_entry(&candidate_hash2).unwrap().unwrap(); - let our_assignment = - candidate_entry.approval_entry(&block_hash).unwrap().our_assignment().unwrap(); - assert!(our_assignment.triggered()); -} - // Tests that for candidates that we did not approve yet, for which we triggered the assignment and // the approval work we restart the work to approve it. #[test] @@ -5028,212 +4920,6 @@ fn subsystem_sends_pending_approvals_on_approval_restart() { }); } -// Test that after restart approvals are sent after all assignments have been distributed. -#[test] -fn subsystem_sends_assignment_approval_in_correct_order_on_approval_restart() { - let assignment_criteria = Box::new(MockAssignmentCriteria( - || { - let mut assignments = HashMap::new(); - - let _ = assignments.insert( - CoreIndex(0), - approval_db::v2::OurAssignment { - cert: garbage_assignment_cert_v2(AssignmentCertKindV2::RelayVRFModuloCompact { - core_bitfield: vec![CoreIndex(0), CoreIndex(2)].try_into().unwrap(), - }), - tranche: 0, - validator_index: ValidatorIndex(0), - triggered: false, - } - .into(), - ); - - let _ = assignments.insert( - CoreIndex(1), - approval_db::v2::OurAssignment { - cert: garbage_assignment_cert_v2(AssignmentCertKindV2::RelayVRFDelay { - core_index: CoreIndex(1), - }), - tranche: 0, - validator_index: ValidatorIndex(0), - triggered: false, - } - .into(), - ); - assignments - }, - |_| Ok(0), - )); - let config = HarnessConfigBuilder::default().assignment_criteria(assignment_criteria).build(); - let store = config.backend(); - let store_clone = config.backend(); - - test_harness(config, |test_harness| async move { - let TestHarness { mut virtual_overseer, clock, sync_oracle_handle } = test_harness; - - setup_overseer_with_blocks_with_two_assignments_triggered( - &mut virtual_overseer, - store, - &clock, - sync_oracle_handle, - ) - .await; - - assert_matches!( - overseer_recv(&mut virtual_overseer).await, - AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeAssignment( - _, - _, - )) => { - } - ); - - recover_available_data(&mut virtual_overseer).await; - fetch_validation_code(&mut virtual_overseer).await; - - assert_matches!( - overseer_recv(&mut virtual_overseer).await, - AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeAssignment( - _, - _ - )) => { - } - ); - - recover_available_data(&mut virtual_overseer).await; - fetch_validation_code(&mut virtual_overseer).await; - - assert_matches!( - overseer_recv(&mut virtual_overseer).await, - AllMessages::CandidateValidation(CandidateValidationMessage::ValidateFromExhaustive { - exec_kind, - response_sender, - .. - }) if exec_kind == PvfExecKind::Approval => { - response_sender.send(Ok(ValidationResult::Valid(Default::default(), Default::default()))) - .unwrap(); - } - ); - - assert_matches!( - overseer_recv(&mut virtual_overseer).await, - AllMessages::CandidateValidation(CandidateValidationMessage::ValidateFromExhaustive { - exec_kind, - response_sender, - .. - }) if exec_kind == PvfExecKind::Approval => { - response_sender.send(Ok(ValidationResult::Valid(Default::default(), Default::default()))) - .unwrap(); - } - ); - - // Configure a big coalesce number, so that the signature is cached instead of being sent to - // approval-distribution. - assert_matches!( - overseer_recv(&mut virtual_overseer).await, - AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::ApprovalVotingParams(_, sender))) => { - let _ = sender.send(Ok(ApprovalVotingParams { - max_approval_coalesce_count: 2, - })); - } - ); - - assert_matches!( - overseer_recv(&mut virtual_overseer).await, - AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::ApprovalVotingParams(_, sender))) => { - let _ = sender.send(Ok(ApprovalVotingParams { - max_approval_coalesce_count: 2, - })); - } - ); - - assert_matches!( - overseer_recv(&mut virtual_overseer).await, - AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeApproval(_)) - ); - - // Assert that there are no more messages being sent by the subsystem - assert!(overseer_recv(&mut virtual_overseer).timeout(TIMEOUT / 2).await.is_none()); - - virtual_overseer - }); - - let config = HarnessConfigBuilder::default().backend(store_clone).major_syncing(true).build(); - // On restart we should first distribute all assignments covering a coalesced approval. - test_harness(config, |test_harness| async move { - let TestHarness { mut virtual_overseer, clock, sync_oracle_handle } = test_harness; - - assert_matches!( - overseer_recv(&mut virtual_overseer).await, - AllMessages::ChainApi(ChainApiMessage::FinalizedBlockNumber(rx)) => { - rx.send(Ok(0)).unwrap(); - } - ); - - let block_hash = Hash::repeat_byte(0x01); - let candidate_commitments = CandidateCommitments::default(); - let mut candidate_receipt = dummy_candidate_receipt_v2(block_hash); - candidate_receipt.commitments_hash = candidate_commitments.hash(); - - let mut candidate_commitments2 = CandidateCommitments::default(); - candidate_commitments2.processed_downward_messages = 3; - let mut candidate_receipt2 = dummy_candidate_receipt_v2(block_hash); - candidate_receipt2.commitments_hash = candidate_commitments2.hash(); - - let slot = Slot::from(1); - - clock.inner.lock().set_tick(slot_to_tick(slot + 2)); - let (chain_builder, _session_info) = build_chain_with_block_with_two_candidates( - block_hash, - slot, - sync_oracle_handle, - vec![candidate_receipt.into(), candidate_receipt2.into()], - ) - .await; - chain_builder.build(&mut virtual_overseer).await; - - futures_timer::Delay::new(Duration::from_millis(2000)).await; - - assert_matches!( - overseer_recv(&mut virtual_overseer).await, - AllMessages::ApprovalDistribution(ApprovalDistributionMessage::NewBlocks( - _, - )) => { - } - ); - - assert_matches!( - overseer_recv(&mut virtual_overseer).await, - AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeAssignment( - _, - _, - )) => { - } - ); - - assert_matches!( - overseer_recv(&mut virtual_overseer).await, - AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeAssignment( - _, - _, - )) => { - } - ); - - assert_matches!( - overseer_recv(&mut virtual_overseer).await, - AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeApproval(approval)) => { - assert_eq!(approval.candidate_indices.count_ones(), 2); - } - ); - - // Assert that there are no more messages being sent by the subsystem - assert!(overseer_recv(&mut virtual_overseer).timeout(TIMEOUT / 2).await.is_none()); - - virtual_overseer - }); -} - // Test we correctly update the timer when we mark the beginning of gathering assignments. #[test] fn test_gathering_assignments_statements() { diff --git a/polkadot/node/core/av-store/Cargo.toml b/polkadot/node/core/av-store/Cargo.toml index f3bd1f09caea..1d14e4cfba37 100644 --- a/polkadot/node/core/av-store/Cargo.toml +++ b/polkadot/node/core/av-store/Cargo.toml @@ -5,38 +5,36 @@ version = "7.0.0" authors.workspace = true edition.workspace = true license.workspace = true -homepage.workspace = true -repository.workspace = true [lints] workspace = true [dependencies] -bitvec = { workspace = true, default-features = true } futures = { workspace = true } futures-timer = { workspace = true } -gum = { workspace = true, default-features = true } kvdb = { workspace = true } thiserror = { workspace = true } +gum = { workspace = true, default-features = true } +bitvec = { workspace = true, default-features = true } codec = { features = ["derive"], workspace = true, default-features = true } polkadot-erasure-coding = { workspace = true, default-features = true } -polkadot-node-primitives = { workspace = true, default-features = true } polkadot-node-subsystem = { workspace = true, default-features = true } polkadot-node-subsystem-util = { workspace = true, default-features = true } polkadot-overseer = { workspace = true, default-features = true } polkadot-primitives = { workspace = true, default-features = true } +polkadot-node-primitives = { workspace = true, default-features = true } sp-consensus = { workspace = true } [dev-dependencies] +log = { workspace = true, default-features = true } assert_matches = { workspace = true } kvdb-memorydb = { workspace = true } -log = { workspace = true, default-features = true } sp-tracing = { workspace = true } -parking_lot = { workspace = true, default-features = true } -polkadot-node-subsystem-test-helpers = { workspace = true } -polkadot-node-subsystem-util = { workspace = true, default-features = true } -polkadot-primitives-test-helpers = { workspace = true } sp-core = { workspace = true, default-features = true } +polkadot-node-subsystem-util = { workspace = true, default-features = true } +polkadot-node-subsystem-test-helpers = { workspace = true } sp-keyring = { workspace = true, default-features = true } +parking_lot = { workspace = true, default-features = true } +polkadot-primitives-test-helpers = { workspace = true } diff --git a/polkadot/node/core/backing/Cargo.toml b/polkadot/node/core/backing/Cargo.toml index be829a84ee6e..cd1acf9daa93 100644 --- a/polkadot/node/core/backing/Cargo.toml +++ b/polkadot/node/core/backing/Cargo.toml @@ -5,37 +5,35 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "The Candidate Backing Subsystem. Tracks parachain candidates that can be backed, as well as the issuance of statements about candidates." -homepage.workspace = true -repository.workspace = true [lints] workspace = true [dependencies] -bitvec = { features = ["alloc"], workspace = true } -fatality = { workspace = true } futures = { workspace = true } -gum = { workspace = true, default-features = true } -polkadot-erasure-coding = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } polkadot-node-primitives = { workspace = true, default-features = true } +polkadot-parachain-primitives = { workspace = true, default-features = true } polkadot-node-subsystem = { workspace = true, default-features = true } polkadot-node-subsystem-util = { workspace = true, default-features = true } -polkadot-parachain-primitives = { workspace = true, default-features = true } -polkadot-primitives = { workspace = true, default-features = true } +polkadot-erasure-coding = { workspace = true, default-features = true } polkadot-statement-table = { workspace = true, default-features = true } -schnellru = { workspace = true } -sp-keystore = { workspace = true, default-features = true } +bitvec = { features = ["alloc"], workspace = true } +gum = { workspace = true, default-features = true } thiserror = { workspace = true } +fatality = { workspace = true } +schnellru = { workspace = true } [dev-dependencies] -assert_matches = { workspace = true } -futures = { features = ["thread-pool"], workspace = true } -polkadot-node-subsystem-test-helpers = { workspace = true } -polkadot-primitives = { workspace = true, features = ["test"] } -polkadot-primitives-test-helpers = { workspace = true } -rstest = { workspace = true } -sc-keystore = { workspace = true, default-features = true } -sp-application-crypto = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } +sp-application-crypto = { workspace = true, default-features = true } sp-keyring = { workspace = true, default-features = true } +sc-keystore = { workspace = true, default-features = true } sp-tracing = { workspace = true, default-features = true } +futures = { features = ["thread-pool"], workspace = true } +assert_matches = { workspace = true } +rstest = { workspace = true } +polkadot-node-subsystem-test-helpers = { workspace = true } +polkadot-primitives-test-helpers = { workspace = true } +polkadot-primitives = { workspace = true, features = ["test"] } diff --git a/polkadot/node/core/backing/src/error.rs b/polkadot/node/core/backing/src/error.rs index e1852be826f4..e09d8425f78a 100644 --- a/polkadot/node/core/backing/src/error.rs +++ b/polkadot/node/core/backing/src/error.rs @@ -105,9 +105,6 @@ pub enum Error { #[error("Availability store error")] StoreAvailableData(#[source] StoreAvailableDataError), - - #[error("Runtime API returned None for executor params")] - MissingExecutorParams, } /// Utility for eating top level errors and log them. diff --git a/polkadot/node/core/backing/src/lib.rs b/polkadot/node/core/backing/src/lib.rs index 8b54a8b5907b..30121418a2fd 100644 --- a/polkadot/node/core/backing/src/lib.rs +++ b/polkadot/node/core/backing/src/lib.rs @@ -93,15 +93,17 @@ use polkadot_node_subsystem::{ RuntimeApiMessage, RuntimeApiRequest, StatementDistributionMessage, StoreAvailableDataError, }, - overseer, ActiveLeavesUpdate, FromOrchestra, OverseerSignal, RuntimeApiError, SpawnedSubsystem, - SubsystemError, + overseer, ActiveLeavesUpdate, FromOrchestra, OverseerSignal, SpawnedSubsystem, SubsystemError, }; use polkadot_node_subsystem_util::{ self as util, - backing_implicit_view::View as ImplicitView, - request_claim_queue, request_disabled_validators, request_session_executor_params, - request_session_index_for_child, request_validator_groups, request_validators, - runtime::{self, request_min_backing_votes, ClaimQueueSnapshot}, + backing_implicit_view::{FetchError as ImplicitViewFetchError, View as ImplicitView}, + executor_params_at_relay_parent, request_from_runtime, request_session_index_for_child, + request_validator_groups, request_validators, + runtime::{ + self, fetch_claim_queue, prospective_parachains_mode, request_min_backing_votes, + ClaimQueueSnapshot, ProspectiveParachainsMode, + }, Validator, }; use polkadot_parachain_primitives::primitives::IsSystem; @@ -109,7 +111,7 @@ use polkadot_primitives::{ node_features::FeatureIndex, vstaging::{ BackedCandidate, CandidateReceiptV2 as CandidateReceipt, - CommittedCandidateReceiptV2 as CommittedCandidateReceipt, + CommittedCandidateReceiptV2 as CommittedCandidateReceipt, CoreState, }, CandidateCommitments, CandidateHash, CoreIndex, ExecutorParams, GroupIndex, GroupRotationInfo, Hash, Id as ParaId, IndexedVec, NodeFeatures, PersistedValidationData, SessionIndex, @@ -122,10 +124,10 @@ use polkadot_statement_table::{ SignedStatement as TableSignedStatement, Statement as TableStatement, Summary as TableSummary, }, - Context as TableContextTrait, Table, + Config as TableConfig, Context as TableContextTrait, Table, }; use sp_keystore::KeystorePtr; -use util::runtime::request_node_features; +use util::runtime::{get_disabled_validators_with_fallback, request_node_features}; mod error; @@ -212,12 +214,11 @@ where } struct PerRelayParentState { + prospective_parachains_mode: ProspectiveParachainsMode, /// The hash of the relay parent on top of which this job is doing it's work. parent: Hash, - /// The node features. - node_features: NodeFeatures, - /// The executor parameters. - executor_params: Arc, + /// Session index. + session_index: SessionIndex, /// The `CoreIndex` assigned to the local validator at this relay parent. assigned_core: Option, /// The candidates that are backed by enough validators in their group, by hash. @@ -254,193 +255,76 @@ struct PerCandidateState { relay_parent: Hash, } -/// A cache for storing data per-session to reduce repeated -/// runtime API calls and avoid redundant computations. -struct PerSessionCache { - /// Cache for storing validators list, retrieved from the runtime. - validators_cache: LruMap>>, - /// Cache for storing node features, retrieved from the runtime. - node_features_cache: LruMap>, - /// Cache for storing executor parameters, retrieved from the runtime. - executor_params_cache: LruMap>, - /// Cache for storing the minimum backing votes threshold, retrieved from the runtime. - minimum_backing_votes_cache: LruMap, - /// Cache for storing validator-to-group mappings, computed from validator groups. - validator_to_group_cache: - LruMap>>>, +enum ActiveLeafState { + // If prospective-parachains is disabled, one validator may only back one candidate per + // paraid. + ProspectiveParachainsDisabled { seconded: HashSet }, + ProspectiveParachainsEnabled { max_candidate_depth: usize, allowed_ancestry_len: usize }, } -impl Default for PerSessionCache { - /// Creates a new `PerSessionCache` with a default capacity. - fn default() -> Self { - Self::new(2) - } -} - -impl PerSessionCache { - /// Creates a new `PerSessionCache` with a given capacity. - fn new(capacity: u32) -> Self { - PerSessionCache { - validators_cache: LruMap::new(ByLength::new(capacity)), - node_features_cache: LruMap::new(ByLength::new(capacity)), - executor_params_cache: LruMap::new(ByLength::new(capacity)), - minimum_backing_votes_cache: LruMap::new(ByLength::new(capacity)), - validator_to_group_cache: LruMap::new(ByLength::new(capacity)), - } - } - - /// Gets validators from the cache or fetches them from the runtime if not present. - async fn validators( - &mut self, - session_index: SessionIndex, - parent: Hash, - sender: &mut impl overseer::SubsystemSender, - ) -> Result>, RuntimeApiError> { - // Try to get the validators list from the cache. - if let Some(validators) = self.validators_cache.get(&session_index) { - return Ok(Arc::clone(validators)); - } - - // Fetch the validators list from the runtime since it was not in the cache. - let validators: Vec = - request_validators(parent, sender).await.await.map_err(|err| { - RuntimeApiError::Execution { runtime_api_name: "Validators", source: Arc::new(err) } - })??; - - // Wrap the validators list in an Arc to avoid a deep copy when storing it in the cache. - let validators = Arc::new(validators); - - // Cache the fetched validators list for future use. - self.validators_cache.insert(session_index, Arc::clone(&validators)); - - Ok(validators) - } - - /// Gets the node features from the cache or fetches it from the runtime if not present. - async fn node_features( - &mut self, - session_index: SessionIndex, - parent: Hash, - sender: &mut impl overseer::SubsystemSender, - ) -> Result, Error> { - // Try to get the node features from the cache. - if let Some(node_features) = self.node_features_cache.get(&session_index) { - return Ok(node_features.clone()); +impl ActiveLeafState { + fn new(mode: ProspectiveParachainsMode) -> Self { + match mode { + ProspectiveParachainsMode::Disabled => + Self::ProspectiveParachainsDisabled { seconded: HashSet::new() }, + ProspectiveParachainsMode::Enabled { max_candidate_depth, allowed_ancestry_len } => + Self::ProspectiveParachainsEnabled { max_candidate_depth, allowed_ancestry_len }, } - - // Fetch the node features from the runtime since it was not in the cache. - let node_features: Option = - request_node_features(parent, session_index, sender).await?; - - // Cache the fetched node features for future use. - self.node_features_cache.insert(session_index, node_features.clone()); - - Ok(node_features) } - /// Gets the executor parameters from the cache or - /// fetches them from the runtime if not present. - async fn executor_params( - &mut self, - session_index: SessionIndex, - parent: Hash, - sender: &mut impl overseer::SubsystemSender, - ) -> Result, RuntimeApiError> { - // Try to get the executor parameters from the cache. - if let Some(executor_params) = self.executor_params_cache.get(&session_index) { - return Ok(Arc::clone(executor_params)); + fn add_seconded_candidate(&mut self, para_id: ParaId) { + if let Self::ProspectiveParachainsDisabled { seconded } = self { + seconded.insert(para_id); } - - // Fetch the executor parameters from the runtime since it was not in the cache. - let executor_params = request_session_executor_params(parent, session_index, sender) - .await - .await - .map_err(|err| RuntimeApiError::Execution { - runtime_api_name: "SessionExecutorParams", - source: Arc::new(err), - })?? - .ok_or_else(|| RuntimeApiError::Execution { - runtime_api_name: "SessionExecutorParams", - source: Arc::new(Error::MissingExecutorParams), - })?; - - // Wrap the executor parameters in an Arc to avoid a deep copy when storing it in the cache. - let executor_params = Arc::new(executor_params); - - // Cache the fetched executor parameters for future use. - self.executor_params_cache.insert(session_index, Arc::clone(&executor_params)); - - Ok(executor_params) } +} - /// Gets the minimum backing votes threshold from the - /// cache or fetches it from the runtime if not present. - async fn minimum_backing_votes( - &mut self, - session_index: SessionIndex, - parent: Hash, - sender: &mut impl overseer::SubsystemSender, - ) -> Result { - // Try to get the value from the cache. - if let Some(minimum_backing_votes) = self.minimum_backing_votes_cache.get(&session_index) { - return Ok(*minimum_backing_votes); +impl From<&ActiveLeafState> for ProspectiveParachainsMode { + fn from(state: &ActiveLeafState) -> Self { + match *state { + ActiveLeafState::ProspectiveParachainsDisabled { .. } => + ProspectiveParachainsMode::Disabled, + ActiveLeafState::ProspectiveParachainsEnabled { + max_candidate_depth, + allowed_ancestry_len, + } => ProspectiveParachainsMode::Enabled { max_candidate_depth, allowed_ancestry_len }, } - - // Fetch the value from the runtime since it was not in the cache. - let minimum_backing_votes = request_min_backing_votes(parent, session_index, sender) - .await - .map_err(|err| RuntimeApiError::Execution { - runtime_api_name: "MinimumBackingVotes", - source: Arc::new(err), - })?; - - // Cache the fetched value for future use. - self.minimum_backing_votes_cache.insert(session_index, minimum_backing_votes); - - Ok(minimum_backing_votes) - } - - /// Gets or computes the validator-to-group mapping for a session. - fn validator_to_group( - &mut self, - session_index: SessionIndex, - validators: &[ValidatorId], - validator_groups: &[Vec], - ) -> Arc>> { - let validator_to_group = self - .validator_to_group_cache - .get_or_insert(session_index, || { - let mut vector = vec![None; validators.len()]; - - for (group_idx, validator_group) in validator_groups.iter().enumerate() { - for validator in validator_group { - vector[validator.0 as usize] = Some(GroupIndex(group_idx as u32)); - } - } - - Arc::new(IndexedVec::<_, _>::from(vector)) - }) - .expect("Just inserted"); - - Arc::clone(validator_to_group) } } /// The state of the subsystem. struct State { /// The utility for managing the implicit and explicit views in a consistent way. + /// + /// We only feed leaves which have prospective parachains enabled to this view. implicit_view: ImplicitView, + /// State tracked for all active leaves, whether or not they have prospective parachains + /// enabled. + per_leaf: HashMap, /// State tracked for all relay-parents backing work is ongoing for. This includes /// all active leaves. + /// + /// relay-parents fall into one of 3 categories. + /// 1. active leaves which do support prospective parachains + /// 2. active leaves which do not support prospective parachains + /// 3. relay-chain blocks which are ancestors of an active leaf and do support prospective + /// parachains. + /// + /// Relay-chain blocks which don't support prospective parachains are + /// never included in the fragment chains of active leaves which do. + /// + /// While it would be technically possible to support such leaves in + /// fragment chains, it only benefits the transition period when asynchronous + /// backing is being enabled and complicates code. per_relay_parent: HashMap, /// State tracked for all candidates relevant to the implicit view. /// /// This is guaranteed to have an entry for each candidate with a relay parent in the implicit /// or explicit view for which a `Seconded` statement has been successfully imported. per_candidate: HashMap, - /// A local cache for storing per-session data. This cache helps to - /// reduce repeated calls to the runtime and avoid redundant computations. - per_session_cache: PerSessionCache, + /// Cache the per-session Validator->Group mapping. + validator_to_group_cache: + LruMap>>>, /// A clonable sender which is dispatched to background candidate validation tasks to inform /// the main task of the result. background_validation_tx: mpsc::Sender<(Hash, ValidatedCandidateCommand)>, @@ -455,9 +339,10 @@ impl State { ) -> Self { State { implicit_view: ImplicitView::default(), + per_leaf: HashMap::default(), per_relay_parent: HashMap::default(), per_candidate: HashMap::new(), - per_session_cache: PerSessionCache::default(), + validator_to_group_cache: LruMap::new(ByLength::new(2)), background_validation_tx, keystore, } @@ -785,8 +670,7 @@ struct BackgroundValidationParams { tx_command: mpsc::Sender<(Hash, ValidatedCandidateCommand)>, candidate: CandidateReceipt, relay_parent: Hash, - node_features: NodeFeatures, - executor_params: Arc, + session_index: SessionIndex, persisted_validation_data: PersistedValidationData, pov: PoVData, n_validators: usize, @@ -805,8 +689,7 @@ async fn validate_and_make_available( mut tx_command, candidate, relay_parent, - node_features, - executor_params, + session_index, persisted_validation_data, pov, n_validators, @@ -831,6 +714,15 @@ async fn validate_and_make_available( } }; + let executor_params = match executor_params_at_relay_parent(relay_parent, &mut sender).await { + Ok(ep) => ep, + Err(e) => return Err(Error::UtilError(e)), + }; + + let node_features = request_node_features(relay_parent, session_index, &mut sender) + .await? + .unwrap_or(NodeFeatures::EMPTY); + let pov = match pov { PoVData::Ready(pov) => pov, PoVData::FetchFromValidator { from_validator, candidate_hash, pov_hash } => @@ -866,7 +758,7 @@ async fn validate_and_make_available( validation_code, candidate.clone(), pov.clone(), - executor_params.as_ref().clone(), + executor_params, ) .await? }; @@ -962,42 +854,87 @@ async fn handle_active_leaves_update( update: ActiveLeavesUpdate, state: &mut State, ) -> Result<(), Error> { + enum LeafHasProspectiveParachains { + Enabled(Result), + Disabled, + } + // Activate in implicit view before deactivate, per the docs // on ImplicitView, this is more efficient. let res = if let Some(leaf) = update.activated { + // Only activate in implicit view if prospective + // parachains are enabled. + let mode = prospective_parachains_mode(ctx.sender(), leaf.hash).await?; + let leaf_hash = leaf.hash; - Some((leaf, state.implicit_view.activate_leaf(ctx.sender(), leaf_hash).await.map(|_| ()))) + Some(( + leaf, + match mode { + ProspectiveParachainsMode::Disabled => LeafHasProspectiveParachains::Disabled, + ProspectiveParachainsMode::Enabled { .. } => LeafHasProspectiveParachains::Enabled( + state.implicit_view.activate_leaf(ctx.sender(), leaf_hash).await.map(|_| mode), + ), + }, + )) } else { None }; for deactivated in update.deactivated { + state.per_leaf.remove(&deactivated); state.implicit_view.deactivate_leaf(deactivated); } // clean up `per_relay_parent` according to ancestry // of leaves. we do this so we can clean up candidates right after // as a result. + // + // when prospective parachains are disabled, the implicit view is empty, + // which means we'll clean up everything that's not a leaf - the expected behavior + // for pre-asynchronous backing. { - let remaining: HashSet<_> = state.implicit_view.all_allowed_relay_parents().collect(); + let remaining: HashSet<_> = state + .per_leaf + .keys() + .chain(state.implicit_view.all_allowed_relay_parents()) + .collect(); state.per_relay_parent.retain(|r, _| remaining.contains(&r)); } // clean up `per_candidate` according to which relay-parents // are known. + // + // when prospective parachains are disabled, we clean up all candidates + // because we've cleaned up all relay parents. this is correct. state .per_candidate .retain(|_, pc| state.per_relay_parent.contains_key(&pc.relay_parent)); // Get relay parents which might be fresh but might be known already // that are explicit or implicit from the new active leaf. - let fresh_relay_parents = match res { + let (fresh_relay_parents, leaf_mode) = match res { None => return Ok(()), - Some((leaf, Ok(_))) => { + Some((leaf, LeafHasProspectiveParachains::Disabled)) => { + // defensive in this case - for enabled, this manifests as an error. + if state.per_leaf.contains_key(&leaf.hash) { + return Ok(()) + } + + state + .per_leaf + .insert(leaf.hash, ActiveLeafState::new(ProspectiveParachainsMode::Disabled)); + + (vec![leaf.hash], ProspectiveParachainsMode::Disabled) + }, + Some((leaf, LeafHasProspectiveParachains::Enabled(Ok(prospective_parachains_mode)))) => { let fresh_relay_parents = state.implicit_view.known_allowed_relay_parents_under(&leaf.hash, None); + let active_leaf_state = ActiveLeafState::new(prospective_parachains_mode); + + state.per_leaf.insert(leaf.hash, active_leaf_state); + let fresh_relay_parent = match fresh_relay_parents { Some(f) => f.to_vec(), None => { @@ -1010,9 +947,9 @@ async fn handle_active_leaves_update( vec![leaf.hash] }, }; - fresh_relay_parent + (fresh_relay_parent, prospective_parachains_mode) }, - Some((leaf, Err(e))) => { + Some((leaf, LeafHasProspectiveParachains::Enabled(Err(e)))) => { gum::debug!( target: LOG_TARGET, leaf_hash = ?leaf.hash, @@ -1030,13 +967,26 @@ async fn handle_active_leaves_update( continue } + let mode = match state.per_leaf.get(&maybe_new) { + None => { + // If the relay-parent isn't a leaf itself, + // then it is guaranteed by the prospective parachains + // subsystem that it is an ancestor of a leaf which + // has prospective parachains enabled and that the + // block itself did. + leaf_mode + }, + Some(l) => l.into(), + }; + // construct a `PerRelayParent` from the runtime API // and insert it. let per = construct_per_relay_parent_state( ctx, maybe_new, &state.keystore, - &mut state.per_session_cache, + &mut state.validator_to_group_cache, + mode, ) .await?; @@ -1135,47 +1085,52 @@ async fn construct_per_relay_parent_state( ctx: &mut Context, relay_parent: Hash, keystore: &KeystorePtr, - per_session_cache: &mut PerSessionCache, + validator_to_group_cache: &mut LruMap< + SessionIndex, + Arc>>, + >, + mode: ProspectiveParachainsMode, ) -> Result, Error> { let parent = relay_parent; - let (session_index, groups, claim_queue, disabled_validators) = futures::try_join!( + let (session_index, validators, groups, cores) = futures::try_join!( request_session_index_for_child(parent, ctx.sender()).await, + request_validators(parent, ctx.sender()).await, request_validator_groups(parent, ctx.sender()).await, - request_claim_queue(parent, ctx.sender()).await, - request_disabled_validators(parent, ctx.sender()).await, + request_from_runtime(parent, ctx.sender(), |tx| { + RuntimeApiRequest::AvailabilityCores(tx) + },) + .await, ) .map_err(Error::JoinMultiple)?; let session_index = try_runtime_api!(session_index); - let validators = per_session_cache.validators(session_index, parent, ctx.sender()).await; - let validators = try_runtime_api!(validators); - - let node_features = per_session_cache - .node_features(session_index, parent, ctx.sender()) + let inject_core_index = request_node_features(parent, session_index, ctx.sender()) .await? - .unwrap_or(NodeFeatures::EMPTY); - - let inject_core_index = node_features + .unwrap_or(NodeFeatures::EMPTY) .get(FeatureIndex::ElasticScalingMVP as usize) .map(|b| *b) .unwrap_or(false); - let executor_params = - per_session_cache.executor_params(session_index, parent, ctx.sender()).await; - let executor_params = try_runtime_api!(executor_params); - gum::debug!(target: LOG_TARGET, inject_core_index, ?parent, "New state"); + let validators: Vec<_> = try_runtime_api!(validators); let (validator_groups, group_rotation_info) = try_runtime_api!(groups); + let cores = try_runtime_api!(cores); + let minimum_backing_votes = + try_runtime_api!(request_min_backing_votes(parent, session_index, ctx.sender()).await); - let minimum_backing_votes = per_session_cache - .minimum_backing_votes(session_index, parent, ctx.sender()) - .await; - let minimum_backing_votes = try_runtime_api!(minimum_backing_votes); - let claim_queue = try_runtime_api!(claim_queue); - let disabled_validators = try_runtime_api!(disabled_validators); + // TODO: https://github.com/paritytech/polkadot-sdk/issues/1940 + // Once runtime ver `DISABLED_VALIDATORS_RUNTIME_REQUIREMENT` is released remove this call to + // `get_disabled_validators_with_fallback`, add `request_disabled_validators` call to the + // `try_join!` above and use `try_runtime_api!` to get `disabled_validators` + let disabled_validators = + get_disabled_validators_with_fallback(ctx.sender(), parent).await.map_err(|e| { + Error::UtilError(TryFrom::try_from(e).expect("the conversion is infallible; qed")) + })?; + + let maybe_claim_queue = try_runtime_api!(fetch_claim_queue(ctx.sender(), parent).await); let signing_context = SigningContext { parent_hash: parent, session_index }; let validator = match Validator::construct( @@ -1197,15 +1152,33 @@ async fn construct_per_relay_parent_state( }, }; - let n_cores = validator_groups.len(); + let n_cores = cores.len(); let mut groups = HashMap::>::new(); let mut assigned_core = None; - for idx in 0..n_cores { + let has_claim_queue = maybe_claim_queue.is_some(); + let mut claim_queue = maybe_claim_queue.unwrap_or_default().0; + + for (idx, core) in cores.iter().enumerate() { let core_index = CoreIndex(idx as _); - if !claim_queue.contains_key(&core_index) { + if !has_claim_queue { + match core { + CoreState::Scheduled(scheduled) => + claim_queue.insert(core_index, [scheduled.para_id].into_iter().collect()), + CoreState::Occupied(occupied) if mode.is_enabled() => { + // Async backing makes it legal to build on top of + // occupied core. + if let Some(next) = &occupied.next_up_on_available { + claim_queue.insert(core_index, [next.para_id].into_iter().collect()) + } else { + continue + } + }, + _ => continue, + }; + } else if !claim_queue.contains_key(&core_index) { continue } @@ -1219,28 +1192,44 @@ async fn construct_per_relay_parent_state( } gum::debug!(target: LOG_TARGET, ?groups, "TableContext"); - let validator_to_group = - per_session_cache.validator_to_group(session_index, &validators, &validator_groups); + let validator_to_group = validator_to_group_cache + .get_or_insert(session_index, || { + let mut vector = vec![None; validators.len()]; - let table_context = - TableContext { validator, groups, validators: validators.to_vec(), disabled_validators }; + for (group_idx, validator_group) in validator_groups.iter().enumerate() { + for validator in validator_group { + vector[validator.0 as usize] = Some(GroupIndex(group_idx as u32)); + } + } + + Arc::new(IndexedVec::<_, _>::from(vector)) + }) + .expect("Just inserted"); + + let table_context = TableContext { validator, groups, validators, disabled_validators }; + let table_config = TableConfig { + allow_multiple_seconded: match mode { + ProspectiveParachainsMode::Enabled { .. } => true, + ProspectiveParachainsMode::Disabled => false, + }, + }; Ok(Some(PerRelayParentState { + prospective_parachains_mode: mode, parent, - node_features, - executor_params, + session_index, assigned_core, backed: HashSet::new(), - table: Table::new(), + table: Table::new(table_config), table_context, issued_statements: HashSet::new(), awaiting_validation: HashSet::new(), fallbacks: HashMap::new(), minimum_backing_votes, inject_core_index, - n_cores: validator_groups.len() as u32, + n_cores: cores.len() as u32, claim_queue: ClaimQueueSnapshot::from(claim_queue), - validator_to_group, + validator_to_group: validator_to_group.clone(), group_rotation_info, })) } @@ -1256,6 +1245,7 @@ enum SecondingAllowed { #[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] async fn seconding_sanity_check( ctx: &mut Context, + active_leaves: &HashMap, implicit_view: &ImplicitView, hypothetical_candidate: HypotheticalCandidate, ) -> SecondingAllowed { @@ -1266,36 +1256,49 @@ async fn seconding_sanity_check( let candidate_relay_parent = hypothetical_candidate.relay_parent(); let candidate_hash = hypothetical_candidate.candidate_hash(); - for head in implicit_view.leaves() { - // Check that the candidate relay parent is allowed for para, skip the - // leaf otherwise. - let allowed_parents_for_para = - implicit_view.known_allowed_relay_parents_under(head, Some(candidate_para)); - if !allowed_parents_for_para.unwrap_or_default().contains(&candidate_relay_parent) { - continue - } + for (head, leaf_state) in active_leaves { + if ProspectiveParachainsMode::from(leaf_state).is_enabled() { + // Check that the candidate relay parent is allowed for para, skip the + // leaf otherwise. + let allowed_parents_for_para = + implicit_view.known_allowed_relay_parents_under(head, Some(candidate_para)); + if !allowed_parents_for_para.unwrap_or_default().contains(&candidate_relay_parent) { + continue + } - let (tx, rx) = oneshot::channel(); - ctx.send_message(ProspectiveParachainsMessage::GetHypotheticalMembership( - HypotheticalMembershipRequest { - candidates: vec![hypothetical_candidate.clone()], - fragment_chain_relay_parent: Some(*head), - }, - tx, - )) - .await; - let response = rx.map_ok(move |candidate_memberships| { - let is_member_or_potential = candidate_memberships - .into_iter() - .find_map(|(candidate, leaves)| { - (candidate.candidate_hash() == candidate_hash).then_some(leaves) - }) - .and_then(|leaves| leaves.into_iter().find(|leaf| leaf == head)) - .is_some(); - - (is_member_or_potential, head) - }); - responses.push_back(response.boxed()); + let (tx, rx) = oneshot::channel(); + ctx.send_message(ProspectiveParachainsMessage::GetHypotheticalMembership( + HypotheticalMembershipRequest { + candidates: vec![hypothetical_candidate.clone()], + fragment_chain_relay_parent: Some(*head), + }, + tx, + )) + .await; + let response = rx.map_ok(move |candidate_memberships| { + let is_member_or_potential = candidate_memberships + .into_iter() + .find_map(|(candidate, leaves)| { + (candidate.candidate_hash() == candidate_hash).then_some(leaves) + }) + .and_then(|leaves| leaves.into_iter().find(|leaf| leaf == head)) + .is_some(); + + (is_member_or_potential, head) + }); + responses.push_back(response.boxed()); + } else { + if *head == candidate_relay_parent { + if let ActiveLeafState::ProspectiveParachainsDisabled { seconded } = leaf_state { + if seconded.contains(&candidate_para) { + // The leaf is already occupied. For non-prospective parachains, we only + // second one candidate. + return SecondingAllowed::No + } + } + responses.push_back(futures::future::ok((true, head)).boxed()); + } + } } if responses.is_empty() { @@ -1344,7 +1347,11 @@ async fn handle_can_second_request( tx: oneshot::Sender, ) { let relay_parent = request.candidate_relay_parent; - let response = if state.per_relay_parent.get(&relay_parent).is_some() { + let response = if state + .per_relay_parent + .get(&relay_parent) + .map_or(false, |pr_state| pr_state.prospective_parachains_mode.is_enabled()) + { let hypothetical_candidate = HypotheticalCandidate::Incomplete { candidate_hash: request.candidate_hash, candidate_para: request.candidate_para_id, @@ -1352,8 +1359,13 @@ async fn handle_can_second_request( candidate_relay_parent: relay_parent, }; - let result = - seconding_sanity_check(ctx, &state.implicit_view, hypothetical_candidate).await; + let result = seconding_sanity_check( + ctx, + &state.per_leaf, + &state.implicit_view, + hypothetical_candidate, + ) + .await; match result { SecondingAllowed::No => false, @@ -1406,14 +1418,16 @@ async fn handle_validated_candidate_command( // sanity check that we're allowed to second the candidate // and that it doesn't conflict with other candidates we've // seconded. - if let SecondingAllowed::No = seconding_sanity_check( + let hypothetical_membership = match seconding_sanity_check( ctx, + &state.per_leaf, &state.implicit_view, hypothetical_candidate, ) .await { - return Ok(()) + SecondingAllowed::No => return Ok(()), + SecondingAllowed::Yes(membership) => membership, }; let statement = @@ -1463,6 +1477,24 @@ async fn handle_validated_candidate_command( Some(p) => p.seconded_locally = true, } + // record seconded candidates for non-prospective-parachains mode. + for leaf in hypothetical_membership { + let leaf_data = match state.per_leaf.get_mut(&leaf) { + None => { + gum::warn!( + target: LOG_TARGET, + leaf_hash = ?leaf, + "Missing `per_leaf` for known active leaf." + ); + + continue + }, + Some(d) => d, + }; + + leaf_data.add_seconded_candidate(candidate.descriptor().para_id()); + } + rp_state.issued_statements.insert(candidate_hash); metrics.on_candidate_seconded(); @@ -1566,11 +1598,13 @@ fn sign_statement( /// Import a statement into the statement table and return the summary of the import. /// -/// This will fail with `Error::RejectedByProspectiveParachains` if the message type is seconded, -/// the candidate is fresh, and any of the following are true: +/// This will fail with `Error::RejectedByProspectiveParachains` if the message type +/// is seconded, the candidate is fresh, +/// and any of the following are true: /// 1. There is no `PersistedValidationData` attached. -/// 2. Prospective parachains subsystem returned an empty `HypotheticalMembership` i.e. did not -/// recognize the candidate as being applicable to any of the active leaves. +/// 2. Prospective parachains are enabled for the relay parent and the prospective parachains +/// subsystem returned an empty `HypotheticalMembership` i.e. did not recognize the candidate as +/// being applicable to any of the active leaves. #[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] async fn import_statement( ctx: &mut Context, @@ -1591,7 +1625,8 @@ async fn import_statement( // If this is a new candidate (statement is 'seconded' and candidate is unknown), // we need to create an entry in the `PerCandidateState` map. // - // We also need to inform the prospective parachains subsystem of the seconded candidate. + // If the relay parent supports prospective parachains, we also need + // to inform the prospective parachains subsystem of the seconded candidate. // If `ProspectiveParachainsMessage::Second` fails, then we return // Error::RejectedByProspectiveParachains. // @@ -1602,28 +1637,30 @@ async fn import_statement( // our active leaves. if let StatementWithPVD::Seconded(candidate, pvd) = statement.payload() { if !per_candidate.contains_key(&candidate_hash) { - let (tx, rx) = oneshot::channel(); - ctx.send_message(ProspectiveParachainsMessage::IntroduceSecondedCandidate( - IntroduceSecondedCandidateRequest { - candidate_para: candidate.descriptor.para_id(), - candidate_receipt: candidate.clone(), - persisted_validation_data: pvd.clone(), - }, - tx, - )) - .await; + if rp_state.prospective_parachains_mode.is_enabled() { + let (tx, rx) = oneshot::channel(); + ctx.send_message(ProspectiveParachainsMessage::IntroduceSecondedCandidate( + IntroduceSecondedCandidateRequest { + candidate_para: candidate.descriptor.para_id(), + candidate_receipt: candidate.clone(), + persisted_validation_data: pvd.clone(), + }, + tx, + )) + .await; - match rx.await { - Err(oneshot::Canceled) => { - gum::warn!( - target: LOG_TARGET, - "Could not reach the Prospective Parachains subsystem." - ); + match rx.await { + Err(oneshot::Canceled) => { + gum::warn!( + target: LOG_TARGET, + "Could not reach the Prospective Parachains subsystem." + ); - return Err(Error::RejectedByProspectiveParachains) - }, - Ok(false) => return Err(Error::RejectedByProspectiveParachains), - Ok(true) => {}, + return Err(Error::RejectedByProspectiveParachains) + }, + Ok(false) => return Err(Error::RejectedByProspectiveParachains), + Ok(true) => {}, + } } // Only save the candidate if it was approved by prospective parachains. @@ -1686,15 +1723,28 @@ async fn post_import_statement_actions( "Candidate backed", ); - // Inform the prospective parachains subsystem - // that the candidate is now backed. - ctx.send_message(ProspectiveParachainsMessage::CandidateBacked( - para_id, - candidate_hash, - )) - .await; - // Notify statement distribution of backed candidate. - ctx.send_message(StatementDistributionMessage::Backed(candidate_hash)).await; + if rp_state.prospective_parachains_mode.is_enabled() { + // Inform the prospective parachains subsystem + // that the candidate is now backed. + ctx.send_message(ProspectiveParachainsMessage::CandidateBacked( + para_id, + candidate_hash, + )) + .await; + // Notify statement distribution of backed candidate. + ctx.send_message(StatementDistributionMessage::Backed(candidate_hash)).await; + } else { + // The provisioner waits on candidate-backing, which means + // that we need to send unbounded messages to avoid cycles. + // + // Backed candidates are bounded by the number of validators, + // parachains, and the block production rate of the relay chain. + let message = ProvisionerMessage::ProvisionableData( + rp_state.parent, + ProvisionableData::BackedCandidate(backed.receipt()), + ); + ctx.send_unbounded_message(message); + } } else { gum::debug!(target: LOG_TARGET, ?candidate_hash, "Cannot get BackedCandidate"); } @@ -1845,8 +1895,7 @@ async fn kick_off_validation_work( tx_command: background_validation_tx.clone(), candidate: attesting.candidate, relay_parent: rp_state.parent, - node_features: rp_state.node_features.clone(), - executor_params: Arc::clone(&rp_state.executor_params), + session_index: rp_state.session_index, persisted_validation_data, pov, n_validators: rp_state.table_context.validators.len(), @@ -2000,8 +2049,7 @@ async fn validate_and_second( tx_command: background_validation_tx.clone(), candidate: candidate.clone(), relay_parent: rp_state.parent, - node_features: rp_state.node_features.clone(), - executor_params: Arc::clone(&rp_state.executor_params), + session_index: rp_state.session_index, persisted_validation_data, pov: PoVData::Ready(pov), n_validators: rp_state.table_context.validators.len(), diff --git a/polkadot/node/core/backing/src/tests/mod.rs b/polkadot/node/core/backing/src/tests/mod.rs index 1a5fbeda100c..97e25c04282c 100644 --- a/polkadot/node/core/backing/src/tests/mod.rs +++ b/polkadot/node/core/backing/src/tests/mod.rs @@ -14,23 +14,23 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . +use self::test_helpers::mock::new_leaf; use super::*; use assert_matches::assert_matches; use futures::{future, Future}; use polkadot_node_primitives::{BlockData, InvalidCandidate, SignedFullStatement, Statement}; use polkadot_node_subsystem::{ + errors::RuntimeApiError, messages::{ - AllMessages, ChainApiMessage, CollatorProtocolMessage, HypotheticalMembership, PvfExecKind, - RuntimeApiMessage, RuntimeApiRequest, ValidationFailed, + AllMessages, CollatorProtocolMessage, PvfExecKind, RuntimeApiMessage, RuntimeApiRequest, + ValidationFailed, }, - ActivatedLeaf, ActiveLeavesUpdate, FromOrchestra, OverseerSignal, TimeoutExt, + ActiveLeavesUpdate, FromOrchestra, OverseerSignal, TimeoutExt, }; -use polkadot_node_subsystem_test_helpers::mock::new_leaf; +use polkadot_node_subsystem_test_helpers as test_helpers; use polkadot_primitives::{ - node_features, - vstaging::{CoreState, MutateDescriptorV2, OccupiedCore}, - BlockNumber, CandidateDescriptor, GroupRotationInfo, HeadData, Header, PersistedValidationData, - ScheduledCore, SessionIndex, LEGACY_MIN_BACKING_VOTES, + node_features, vstaging::MutateDescriptorV2, CandidateDescriptor, GroupRotationInfo, HeadData, + PersistedValidationData, ScheduledCore, SessionIndex, LEGACY_MIN_BACKING_VOTES, }; use polkadot_primitives_test_helpers::{ dummy_candidate_receipt_bad_sig, dummy_collator, dummy_collator_signature, @@ -47,10 +47,10 @@ use std::{ time::Duration, }; -struct TestLeaf { - activated: ActivatedLeaf, - min_relay_parents: Vec<(ParaId, u32)>, -} +mod prospective_parachains; + +const ASYNC_BACKING_DISABLED_ERROR: RuntimeApiError = + RuntimeApiError::NotSupported { runtime_api_name: "test-runtime" }; fn table_statement_to_primitive(statement: TableStatement) -> Statement { match statement { @@ -69,14 +69,6 @@ fn dummy_pvd() -> PersistedValidationData { } } -#[derive(Default)] -struct PerSessionCacheState { - has_cached_validators: bool, - has_cached_node_features: bool, - has_cached_executor_params: bool, - has_cached_minimum_backing_votes: bool, -} - pub(crate) struct TestState { chain_ids: Vec, keystore: KeystorePtr, @@ -93,7 +85,6 @@ pub(crate) struct TestState { minimum_backing_votes: u32, disabled_validators: Vec, node_features: NodeFeatures, - per_session_cache_state: PerSessionCacheState, } impl TestState { @@ -166,7 +157,6 @@ impl Default for TestState { chain_ids, keystore, validators, - per_session_cache_state: PerSessionCacheState::default(), validator_public, validator_groups: (validator_groups, group_rotation_info), validator_to_group, @@ -190,8 +180,6 @@ fn test_harness>( keystore: KeystorePtr, test: impl FnOnce(VirtualOverseer) -> T, ) { - sp_tracing::init_for_tests(); - let pool = sp_core::testing::TaskExecutor::new(); let (context, virtual_overseer) = @@ -262,349 +250,176 @@ impl TestCandidateBuilder { } } -async fn assert_validation_request( - virtual_overseer: &mut VirtualOverseer, - validation_code: ValidationCode, -) { +// Tests that the subsystem performs actions that are required on startup. +async fn test_startup(virtual_overseer: &mut VirtualOverseer, test_state: &TestState) { + // Start work on some new parent. + virtual_overseer + .send(FromOrchestra::Signal(OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work( + new_leaf(test_state.relay_parent, 1), + )))) + .await; + assert_matches!( virtual_overseer.recv().await, AllMessages::RuntimeApi( - RuntimeApiMessage::Request(_, RuntimeApiRequest::ValidationCodeByHash(hash, tx)) - ) if hash == validation_code.hash() => { - tx.send(Ok(Some(validation_code))).unwrap(); + RuntimeApiMessage::Request(parent, RuntimeApiRequest::AsyncBackingParams(tx)) + ) if parent == test_state.relay_parent => { + tx.send(Err(ASYNC_BACKING_DISABLED_ERROR)).unwrap(); } ); -} -async fn assert_validate_from_exhaustive( - virtual_overseer: &mut VirtualOverseer, - assert_pvd: &PersistedValidationData, - assert_pov: &PoV, - assert_validation_code: &ValidationCode, - assert_candidate: &CommittedCandidateReceipt, - expected_head_data: &HeadData, - result_validation_data: PersistedValidationData, -) { + // Check that subsystem job issues a request for the session index for child. assert_matches!( virtual_overseer.recv().await, - AllMessages::CandidateValidation( - CandidateValidationMessage::ValidateFromExhaustive { - pov, - validation_data, - validation_code, - candidate_receipt, - exec_kind, - response_sender, - .. - }, - ) if validation_data == *assert_pvd && - validation_code == *assert_validation_code && - *pov == *assert_pov && candidate_receipt.descriptor == assert_candidate.descriptor && - matches!(exec_kind, PvfExecKind::BackingSystemParas(_)) && - candidate_receipt.commitments_hash == assert_candidate.commitments.hash() => - { - response_sender.send(Ok(ValidationResult::Valid( - CandidateCommitments { - head_data: expected_head_data.clone(), - horizontal_messages: Default::default(), - upward_messages: Default::default(), - new_validation_code: None, - processed_downward_messages: 0, - hrmp_watermark: 0, - }, - result_validation_data, - ))) - .unwrap(); + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::SessionIndexForChild(tx)) + ) if parent == test_state.relay_parent => { + tx.send(Ok(test_state.signing_context.session_index)).unwrap(); } ); -} -// Activates the initial leaf and returns the `ParaId` used. This function is a prerequisite for all -// tests. -async fn activate_initial_leaf( - virtual_overseer: &mut VirtualOverseer, - test_state: &mut TestState, -) -> ParaId { - const LEAF_A_BLOCK_NUMBER: BlockNumber = 100; - const LEAF_A_ANCESTRY_LEN: BlockNumber = 3; - let para_id = test_state.chain_ids[0]; - - let activated = new_leaf(test_state.relay_parent, LEAF_A_BLOCK_NUMBER - 1); - let min_relay_parents = vec![(para_id, LEAF_A_BLOCK_NUMBER - LEAF_A_ANCESTRY_LEN)]; - let test_leaf_a = TestLeaf { activated, min_relay_parents }; - - activate_leaf(virtual_overseer, test_leaf_a, test_state).await; - para_id -} + // Check that subsystem job issues a request for a validator set. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::Validators(tx)) + ) if parent == test_state.relay_parent => { + tx.send(Ok(test_state.validator_public.clone())).unwrap(); + } + ); -async fn assert_candidate_is_shared_and_seconded( - virtual_overseer: &mut VirtualOverseer, - relay_parent: &Hash, -) { + // Check that subsystem job issues a request for the validator groups. assert_matches!( virtual_overseer.recv().await, - AllMessages::StatementDistribution( - StatementDistributionMessage::Share( - parent_hash, - _signed_statement, - ) - ) if parent_hash == *relay_parent => {} + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::ValidatorGroups(tx)) + ) if parent == test_state.relay_parent => { + tx.send(Ok(test_state.validator_groups.clone())).unwrap(); + } ); + // Check that subsystem job issues a request for the availability cores. assert_matches!( virtual_overseer.recv().await, - AllMessages::CollatorProtocol(CollatorProtocolMessage::Seconded(hash, statement)) => { - assert_eq!(*relay_parent, hash); - assert_matches!(statement.payload(), Statement::Seconded(_)); + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::AvailabilityCores(tx)) + ) if parent == test_state.relay_parent => { + tx.send(Ok(test_state.availability_cores.clone())).unwrap(); } ); -} -async fn assert_candidate_is_shared_and_backed( - virtual_overseer: &mut VirtualOverseer, - relay_parent: &Hash, - expected_para_id: &ParaId, - expected_candidate_hash: &CandidateHash, -) { + // Node features request from runtime: all features are disabled. assert_matches!( virtual_overseer.recv().await, - AllMessages::StatementDistribution( - StatementDistributionMessage::Share(hash, _stmt) + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(_parent, RuntimeApiRequest::NodeFeatures(_session_index, tx)) ) => { - assert_eq!(*relay_parent, hash); + tx.send(Ok(test_state.node_features.clone())).unwrap(); } ); + // Check if subsystem job issues a request for the minimum backing votes. assert_matches!( virtual_overseer.recv().await, - AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::CandidateBacked( - candidate_para_id, candidate_hash - ), - ) if *expected_candidate_hash == candidate_hash && candidate_para_id == *expected_para_id + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + parent, + RuntimeApiRequest::MinimumBackingVotes(session_index, tx), + )) if parent == test_state.relay_parent && session_index == test_state.signing_context.session_index => { + tx.send(Ok(test_state.minimum_backing_votes)).unwrap(); + } ); + // Check that subsystem job issues a request for the runtime version. assert_matches!( virtual_overseer.recv().await, - AllMessages::StatementDistribution(StatementDistributionMessage::Backed ( - candidate_hash - )) if *expected_candidate_hash == candidate_hash + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::Version(tx)) + ) if parent == test_state.relay_parent => { + tx.send(Ok(RuntimeApiRequest::DISABLED_VALIDATORS_RUNTIME_REQUIREMENT)).unwrap(); + } ); -} - -fn get_parent_hash(hash: Hash) -> Hash { - Hash::from_low_u64_be(hash.to_low_u64_be() + 1) -} - -async fn activate_leaf( - virtual_overseer: &mut VirtualOverseer, - leaf: TestLeaf, - test_state: &mut TestState, -) { - let TestLeaf { activated, min_relay_parents } = leaf; - let leaf_hash = activated.hash; - let leaf_number = activated.number; - // Start work on some new parent. - virtual_overseer - .send(FromOrchestra::Signal(OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work( - activated, - )))) - .await; - - let min_min = *min_relay_parents - .iter() - .map(|(_, block_num)| block_num) - .min() - .unwrap_or(&leaf_number); - - let ancestry_len = leaf_number + 1 - min_min; - - let ancestry_hashes = std::iter::successors(Some(leaf_hash), |h| Some(get_parent_hash(*h))) - .take(ancestry_len as usize); - let ancestry_numbers = (min_min..=leaf_number).rev(); - let ancestry_iter = ancestry_hashes.zip(ancestry_numbers).peekable(); - - let mut next_overseer_message = None; - // How many blocks were actually requested. - let mut requested_len = 0; - { - let mut ancestry_iter = ancestry_iter.clone(); - while let Some((hash, number)) = ancestry_iter.next() { - // May be `None` for the last element. - let parent_hash = - ancestry_iter.peek().map(|(h, _)| *h).unwrap_or_else(|| get_parent_hash(hash)); - - let msg = virtual_overseer.recv().await; - // It may happen that some blocks were cached by implicit view, - // reuse the message. - if !matches!(&msg, AllMessages::ChainApi(ChainApiMessage::BlockHeader(..))) { - next_overseer_message.replace(msg); - break - } - - assert_matches!( - msg, - AllMessages::ChainApi( - ChainApiMessage::BlockHeader(_hash, tx) - ) if _hash == hash => { - let header = Header { - parent_hash, - number, - state_root: Hash::zero(), - extrinsics_root: Hash::zero(), - digest: Default::default(), - }; - - tx.send(Ok(Some(header))).unwrap(); - } - ); - - if requested_len == 0 { - assert_matches!( - virtual_overseer.recv().await, - AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::GetMinimumRelayParents(parent, tx) - ) if parent == leaf_hash => { - tx.send(min_relay_parents.clone()).unwrap(); - } - ); - } - requested_len += 1; + // Check that subsystem job issues a request for the disabled validators. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::DisabledValidators(tx)) + ) if parent == test_state.relay_parent => { + tx.send(Ok(test_state.disabled_validators.clone())).unwrap(); } - } - - for (hash, number) in ancestry_iter.take(requested_len) { - let msg = match next_overseer_message.take() { - Some(msg) => msg, - None => virtual_overseer.recv().await, - }; - - // Check that subsystem job issues a request for the session index for child. - assert_matches!( - msg, - AllMessages::RuntimeApi( - RuntimeApiMessage::Request(parent, RuntimeApiRequest::SessionIndexForChild(tx)) - ) if parent == hash => { - tx.send(Ok(test_state.signing_context.session_index)).unwrap(); - } - ); - - // Check that subsystem job issues a request for the validator groups. - assert_matches!( - virtual_overseer.recv().await, - AllMessages::RuntimeApi( - RuntimeApiMessage::Request(parent, RuntimeApiRequest::ValidatorGroups(tx)) - ) if parent == hash => { - let (validator_groups, mut group_rotation_info) = test_state.validator_groups.clone(); - group_rotation_info.now = number; - tx.send(Ok((validator_groups, group_rotation_info))).unwrap(); - } - ); + ); - assert_matches!( - virtual_overseer.recv().await, - AllMessages::RuntimeApi( - RuntimeApiMessage::Request(parent, RuntimeApiRequest::ClaimQueue(tx)) - ) if parent == hash => { - tx.send(Ok( - test_state.claim_queue.clone() - )).unwrap(); - } - ); + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::Version(tx)) + ) if parent == test_state.relay_parent => { + tx.send(Ok(RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT)).unwrap(); + } + ); - // Check that the subsystem job issues a request for the disabled validators. - assert_matches!( - virtual_overseer.recv().await, - AllMessages::RuntimeApi( - RuntimeApiMessage::Request(parent, RuntimeApiRequest::DisabledValidators(tx)) - ) if parent == hash => { - tx.send(Ok(test_state.disabled_validators.clone())).unwrap(); - } - ); + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::ClaimQueue(tx)) + ) if parent == test_state.relay_parent => { + tx.send(Ok( + test_state.claim_queue.clone() + )).unwrap(); + } + ); +} - if !test_state.per_session_cache_state.has_cached_validators { - // Check that subsystem job issues a request for a validator set. - assert_matches!( - virtual_overseer.recv().await, - AllMessages::RuntimeApi( - RuntimeApiMessage::Request(parent, RuntimeApiRequest::Validators(tx)) - ) if parent == hash => { - tx.send(Ok(test_state.validator_public.clone())).unwrap(); - } - ); - test_state.per_session_cache_state.has_cached_validators = true; +async fn assert_validation_requests( + virtual_overseer: &mut VirtualOverseer, + validation_code: ValidationCode, +) { + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(_, RuntimeApiRequest::ValidationCodeByHash(hash, tx)) + ) if hash == validation_code.hash() => { + tx.send(Ok(Some(validation_code))).unwrap(); } + ); - if !test_state.per_session_cache_state.has_cached_node_features { - // Node features request from runtime: all features are disabled. - assert_matches!( - virtual_overseer.recv().await, - AllMessages::RuntimeApi( - RuntimeApiMessage::Request(parent, RuntimeApiRequest::NodeFeatures(_session_index, tx)) - ) if parent == hash => { - tx.send(Ok(test_state.node_features.clone())).unwrap(); - } - ); - test_state.per_session_cache_state.has_cached_node_features = true; + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(_, RuntimeApiRequest::SessionIndexForChild(tx)) + ) => { + tx.send(Ok(1u32.into())).unwrap(); } + ); - if !test_state.per_session_cache_state.has_cached_executor_params { - // Check if subsystem job issues a request for the executor parameters. - assert_matches!( - virtual_overseer.recv().await, - AllMessages::RuntimeApi( - RuntimeApiMessage::Request(parent, RuntimeApiRequest::SessionExecutorParams(_session_index, tx)) - ) if parent == hash => { - tx.send(Ok(Some(ExecutorParams::default()))).unwrap(); - } - ); - test_state.per_session_cache_state.has_cached_executor_params = true; + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(_, RuntimeApiRequest::SessionExecutorParams(sess_idx, tx)) + ) if sess_idx == 1 => { + tx.send(Ok(Some(ExecutorParams::default()))).unwrap(); } + ); - if !test_state.per_session_cache_state.has_cached_minimum_backing_votes { - // Check if subsystem job issues a request for the minimum backing votes. - assert_matches!( - virtual_overseer.recv().await, - AllMessages::RuntimeApi(RuntimeApiMessage::Request( - parent, - RuntimeApiRequest::MinimumBackingVotes(session_index, tx), - )) if parent == hash && session_index == test_state.signing_context.session_index => { - tx.send(Ok(test_state.minimum_backing_votes)).unwrap(); - } - ); - test_state.per_session_cache_state.has_cached_minimum_backing_votes = true; + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(_, RuntimeApiRequest::NodeFeatures(sess_idx, tx)) + ) if sess_idx == 1 => { + tx.send(Ok(NodeFeatures::EMPTY)).unwrap(); } - } + ); } -async fn assert_validate_seconded_candidate( +async fn assert_validate_from_exhaustive( virtual_overseer: &mut VirtualOverseer, - relay_parent: Hash, - candidate: &CommittedCandidateReceipt, - assert_pov: &PoV, assert_pvd: &PersistedValidationData, + assert_pov: &PoV, assert_validation_code: &ValidationCode, + assert_candidate: &CommittedCandidateReceipt, expected_head_data: &HeadData, - fetch_pov: bool, + result_validation_data: PersistedValidationData, ) { - assert_validation_request(virtual_overseer, assert_validation_code.clone()).await; - - if fetch_pov { - assert_matches!( - virtual_overseer.recv().await, - AllMessages::AvailabilityDistribution( - AvailabilityDistributionMessage::FetchPoV { - relay_parent: hash, - tx, - .. - } - ) if hash == relay_parent => { - tx.send(assert_pov.clone()).unwrap(); - } - ); - } - assert_matches!( virtual_overseer.recv().await, AllMessages::CandidateValidation( @@ -619,9 +434,9 @@ async fn assert_validate_seconded_candidate( }, ) if validation_data == *assert_pvd && validation_code == *assert_validation_code && - *pov == *assert_pov && candidate_receipt.descriptor == candidate.descriptor && + *pov == *assert_pov && candidate_receipt.descriptor == assert_candidate.descriptor && matches!(exec_kind, PvfExecKind::BackingSystemParas(_)) && - candidate_receipt.commitments_hash == candidate.commitments.hash() => + candidate_receipt.commitments_hash == assert_candidate.commitments.hash() => { response_sender.send(Ok(ValidationResult::Valid( CandidateCommitments { @@ -632,79 +447,30 @@ async fn assert_validate_seconded_candidate( processed_downward_messages: 0, hrmp_watermark: 0, }, - assert_pvd.clone(), + result_validation_data, ))) .unwrap(); } ); - - assert_matches!( - virtual_overseer.recv().await, - AllMessages::AvailabilityStore( - AvailabilityStoreMessage::StoreAvailableData { candidate_hash, tx, .. } - ) if candidate_hash == candidate.hash() => { - tx.send(Ok(())).unwrap(); - } - ); -} - -pub(crate) async fn assert_hypothetical_membership_requests( - virtual_overseer: &mut VirtualOverseer, - mut expected_requests: Vec<( - HypotheticalMembershipRequest, - Vec<(HypotheticalCandidate, HypotheticalMembership)>, - )>, -) { - // Requests come with no particular order. - let requests_num = expected_requests.len(); - - for _ in 0..requests_num { - assert_matches!( - virtual_overseer.recv().await, - AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::GetHypotheticalMembership(request, tx), - ) => { - let idx = match expected_requests.iter().position(|r| r.0 == request) { - Some(idx) => idx, - None => - panic!( - "unexpected hypothetical membership request, no match found for {:?}", - request - ), - }; - let resp = std::mem::take(&mut expected_requests[idx].1); - tx.send(resp).unwrap(); - - expected_requests.remove(idx); - } - ); - } } -pub(crate) fn make_hypothetical_membership_response( - hypothetical_candidate: HypotheticalCandidate, - relay_parent_hash: Hash, -) -> Vec<(HypotheticalCandidate, HypotheticalMembership)> { - vec![(hypothetical_candidate, vec![relay_parent_hash])] -} - -// Test that a `CandidateBackingMessage::Second` issues validation work and in case validation is -// successful issues correct messages. +// Test that a `CandidateBackingMessage::Second` issues validation work +// and in case validation is successful issues a `StatementDistributionMessage`. #[test] fn backing_second_works() { - let mut test_state = TestState::default(); + let test_state = TestState::default(); test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { - let para_id = activate_initial_leaf(&mut virtual_overseer, &mut test_state).await; + test_startup(&mut virtual_overseer, &test_state).await; let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; let pvd = dummy_pvd(); let validation_code = ValidationCode(vec![1, 2, 3]); - let expected_head_data = test_state.head_data.get(¶_id).unwrap(); + let expected_head_data = test_state.head_data.get(&test_state.chain_ids[0]).unwrap(); let pov_hash = pov.hash(); let candidate = TestCandidateBuilder { - para_id, + para_id: test_state.chain_ids[0], relay_parent: test_state.relay_parent, pov_hash, head_data: expected_head_data.clone(), @@ -723,52 +489,45 @@ fn backing_second_works() { virtual_overseer.send(FromOrchestra::Communication { msg: second }).await; - assert_validate_seconded_candidate( + assert_validation_requests(&mut virtual_overseer, validation_code.clone()).await; + + assert_validate_from_exhaustive( &mut virtual_overseer, - test_state.relay_parent, - &candidate, - &pov, &pvd, + &pov, &validation_code, + &candidate, expected_head_data, - false, - ) - .await; - - let hypothetical_candidate = HypotheticalCandidate::Complete { - candidate_hash: candidate.hash(), - receipt: Arc::new(candidate.clone()), - persisted_validation_data: pvd.clone(), - }; - let expected_request = HypotheticalMembershipRequest { - candidates: vec![hypothetical_candidate.clone()], - fragment_chain_relay_parent: Some(test_state.relay_parent), - }; - let expected_response = - make_hypothetical_membership_response(hypothetical_candidate, test_state.relay_parent); - assert_hypothetical_membership_requests( - &mut virtual_overseer, - vec![(expected_request, expected_response)], + test_state.validation_data.clone(), ) .await; assert_matches!( virtual_overseer.recv().await, - AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::IntroduceSecondedCandidate( - req, - tx, - ), - ) if - req.candidate_receipt == candidate - && req.candidate_para == para_id - && pvd == req.persisted_validation_data => { - tx.send(true).unwrap(); + AllMessages::AvailabilityStore( + AvailabilityStoreMessage::StoreAvailableData { candidate_hash, tx, .. } + ) if candidate_hash == candidate.hash() => { + tx.send(Ok(())).unwrap(); } ); - assert_candidate_is_shared_and_seconded(&mut virtual_overseer, &test_state.relay_parent) - .await; + assert_matches!( + virtual_overseer.recv().await, + AllMessages::StatementDistribution( + StatementDistributionMessage::Share( + parent_hash, + _signed_statement, + ) + ) if parent_hash == test_state.relay_parent => {} + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::CollatorProtocol(CollatorProtocolMessage::Seconded(hash, statement)) => { + assert_eq!(test_state.relay_parent, hash); + assert_matches!(statement.payload(), Statement::Seconded(_)); + } + ); virtual_overseer .send(FromOrchestra::Signal(OverseerSignal::ActiveLeaves( @@ -795,7 +554,7 @@ fn backing_works(#[case] elastic_scaling_mvp: bool) { } test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { - let para_id = activate_initial_leaf(&mut virtual_overseer, &mut test_state).await; + test_startup(&mut virtual_overseer, &test_state).await; let pov_ab = PoV { block_data: BlockData(vec![1, 2, 3]) }; let pvd_ab = dummy_pvd(); @@ -803,10 +562,10 @@ fn backing_works(#[case] elastic_scaling_mvp: bool) { let pov_hash = pov_ab.hash(); - let expected_head_data = test_state.head_data.get(¶_id).unwrap(); + let expected_head_data = test_state.head_data.get(&test_state.chain_ids[0]).unwrap(); let candidate_a = TestCandidateBuilder { - para_id, + para_id: test_state.chain_ids[0], relay_parent: test_state.relay_parent, pov_hash, head_data: expected_head_data.clone(), @@ -817,6 +576,7 @@ fn backing_works(#[case] elastic_scaling_mvp: bool) { .build(); let candidate_a_hash = candidate_a.hash(); + let candidate_a_commitments_hash = candidate_a.commitments.hash(); let public1 = Keystore::sr25519_generate_new( &*test_state.keystore, @@ -858,40 +618,85 @@ fn backing_works(#[case] elastic_scaling_mvp: bool) { virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; + assert_validation_requests(&mut virtual_overseer, validation_code_ab.clone()).await; + + // Sending a `Statement::Seconded` for our assignment will start + // validation process. The first thing requested is the PoV. assert_matches!( virtual_overseer.recv().await, - AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::IntroduceSecondedCandidate( - req, + AllMessages::AvailabilityDistribution( + AvailabilityDistributionMessage::FetchPoV { + relay_parent, tx, - ), - ) if - req.candidate_receipt == candidate_a - && req.candidate_para == para_id - && pvd_ab == req.persisted_validation_data => { - tx.send(true).unwrap(); + .. + } + ) if relay_parent == test_state.relay_parent => { + tx.send(pov_ab.clone()).unwrap(); } ); - assert_validate_seconded_candidate( - &mut virtual_overseer, - candidate_a.descriptor.relay_parent(), - &candidate_a, - &pov_ab, - &pvd_ab, - &validation_code_ab, - expected_head_data, - true, - ) - .await; + // The next step is the actual request to Validation subsystem + // to validate the `Seconded` candidate. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::CandidateValidation( + CandidateValidationMessage::ValidateFromExhaustive { + validation_data, + validation_code, + candidate_receipt, + pov, + exec_kind, + response_sender, + .. + }, + ) if validation_data == pvd_ab && + validation_code == validation_code_ab && + *pov == pov_ab && candidate_receipt.descriptor == candidate_a.descriptor && + matches!(exec_kind, PvfExecKind::BackingSystemParas(_)) && + candidate_receipt.commitments_hash == candidate_a_commitments_hash => + { + response_sender.send(Ok( + ValidationResult::Valid(CandidateCommitments { + head_data: expected_head_data.clone(), + upward_messages: Default::default(), + horizontal_messages: Default::default(), + new_validation_code: None, + processed_downward_messages: 0, + hrmp_watermark: 0, + }, test_state.validation_data.clone()), + )).unwrap(); + } + ); - assert_candidate_is_shared_and_backed( - &mut virtual_overseer, - &test_state.relay_parent, - ¶_id, - &candidate_a_hash, - ) - .await; + assert_matches!( + virtual_overseer.recv().await, + AllMessages::AvailabilityStore( + AvailabilityStoreMessage::StoreAvailableData { candidate_hash, tx, .. } + ) if candidate_hash == candidate_a.hash() => { + tx.send(Ok(())).unwrap(); + } + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::StatementDistribution( + StatementDistributionMessage::Share(hash, _stmt) + ) => { + assert_eq!(test_state.relay_parent, hash); + } + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::Provisioner( + ProvisionerMessage::ProvisionableData( + _, + ProvisionableData::BackedCandidate(candidate_receipt) + ) + ) => { + assert_eq!(candidate_receipt, candidate_a.to_plain()); + } + ); let statement = CandidateBackingMessage::Statement(test_state.relay_parent, signed_b.clone()); @@ -967,7 +772,7 @@ fn get_backed_candidate_preserves_order() { .insert(CoreIndex(2), [test_state.chain_ids[1]].into_iter().collect()); test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { - activate_initial_leaf(&mut virtual_overseer, &mut test_state).await; + test_startup(&mut virtual_overseer, &test_state).await; let pov_a = PoV { block_data: BlockData(vec![1, 2, 3]) }; let pov_b = PoV { block_data: BlockData(vec![3, 4, 5]) }; @@ -1076,37 +881,17 @@ fn get_backed_candidate_preserves_order() { virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; - // Prospective parachains are notified about candidate seconded first. assert_matches!( virtual_overseer.recv().await, - AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::IntroduceSecondedCandidate( - req, - tx, - ), - ) if - req.candidate_receipt == candidate - && req.candidate_para == candidate.descriptor.para_id() - && pvd == req.persisted_validation_data => { - tx.send(true).unwrap(); + AllMessages::Provisioner( + ProvisionerMessage::ProvisionableData( + _, + ProvisionableData::BackedCandidate(candidate_receipt) + ) + ) => { + assert_eq!(candidate_receipt, candidate.to_plain()); } ); - - assert_matches!( - virtual_overseer.recv().await, - AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::CandidateBacked( - candidate_para_id, candidate_hash - ), - ) if candidate.hash() == candidate_hash && candidate_para_id == candidate.descriptor.para_id() - ); - - assert_matches!( - virtual_overseer.recv().await, - AllMessages::StatementDistribution(StatementDistributionMessage::Backed ( - candidate_hash - )) if candidate.hash() == candidate_hash - ); } // Happy case, all candidates should be present. @@ -1386,9 +1171,9 @@ fn extract_core_index_from_statement_works() { #[test] fn backing_works_while_validation_ongoing() { - let mut test_state = TestState::default(); + let test_state = TestState::default(); test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { - let para_id = activate_initial_leaf(&mut virtual_overseer, &mut test_state).await; + test_startup(&mut virtual_overseer, &test_state).await; let pov_abc = PoV { block_data: BlockData(vec![1, 2, 3]) }; let pvd_abc = dummy_pvd(); @@ -1468,22 +1253,7 @@ fn backing_works_while_validation_ongoing() { CandidateBackingMessage::Statement(test_state.relay_parent, signed_a.clone()); virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; - assert_matches!( - virtual_overseer.recv().await, - AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::IntroduceSecondedCandidate( - req, - tx, - ), - ) if - req.candidate_receipt == candidate_a - && req.candidate_para == para_id - && pvd_abc == req.persisted_validation_data => { - tx.send(true).unwrap(); - } - ); - - assert_validation_request(&mut virtual_overseer, validation_code_abc.clone()).await; + assert_validation_requests(&mut virtual_overseer, validation_code_abc.clone()).await; // Sending a `Statement::Seconded` for our assignment will start // validation process. The first thing requested is PoV from the @@ -1535,11 +1305,15 @@ fn backing_works_while_validation_ongoing() { // Candidate gets backed entirely by other votes. assert_matches!( virtual_overseer.recv().await, - AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::CandidateBacked( - candidate_para_id, candidate_hash - ), - ) if candidate_a_hash == candidate_hash && candidate_para_id == para_id + AllMessages::Provisioner( + ProvisionerMessage::ProvisionableData( + _, + ProvisionableData::BackedCandidate(CandidateReceipt { + descriptor, + .. + }) + ) + ) if descriptor == candidate_a.descriptor ); let statement = @@ -1588,12 +1362,13 @@ fn backing_works_while_validation_ongoing() { }); } -// Issuing conflicting statements on the same candidate should be a misbehavior. +// Issuing conflicting statements on the same candidate should +// be a misbehavior. #[test] fn backing_misbehavior_works() { - let mut test_state = TestState::default(); + let test_state = TestState::default(); test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { - let para_id = activate_initial_leaf(&mut virtual_overseer, &mut test_state).await; + test_startup(&mut virtual_overseer, &test_state).await; let pov_a = PoV { block_data: BlockData(vec![1, 2, 3]) }; @@ -1615,6 +1390,8 @@ fn backing_misbehavior_works() { .build(); let candidate_a_hash = candidate_a.hash(); + let candidate_a_commitments_hash = candidate_a.commitments.hash(); + let public2 = Keystore::sr25519_generate_new( &*test_state.keystore, ValidatorId::ID, @@ -1648,41 +1425,85 @@ fn backing_misbehavior_works() { virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; - // Prospective parachains are notified about candidate seconded first. + assert_validation_requests(&mut virtual_overseer, validation_code_a.clone()).await; + assert_matches!( virtual_overseer.recv().await, - AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::IntroduceSecondedCandidate( - req, + AllMessages::AvailabilityDistribution( + AvailabilityDistributionMessage::FetchPoV { + relay_parent, tx, - ), - ) if - req.candidate_receipt == candidate_a - && req.candidate_para == para_id - && pvd_a == req.persisted_validation_data => { - tx.send(true).unwrap(); + .. + } + ) if relay_parent == test_state.relay_parent => { + tx.send(pov_a.clone()).unwrap(); } ); - assert_validate_seconded_candidate( - &mut virtual_overseer, - test_state.relay_parent, - &candidate_a, - &pov_a, - &pvd_a, - &validation_code_a, - expected_head_data, - true, - ) - .await; + assert_matches!( + virtual_overseer.recv().await, + AllMessages::CandidateValidation( + CandidateValidationMessage::ValidateFromExhaustive { + validation_data, + validation_code, + candidate_receipt, + pov, + exec_kind, + response_sender, + .. + }, + ) if validation_data == pvd_a && + validation_code == validation_code_a && + *pov == pov_a && candidate_receipt.descriptor == candidate_a.descriptor && + matches!(exec_kind, PvfExecKind::BackingSystemParas(_)) && + candidate_a_commitments_hash == candidate_receipt.commitments_hash => + { + response_sender.send(Ok( + ValidationResult::Valid(CandidateCommitments { + head_data: expected_head_data.clone(), + upward_messages: Default::default(), + horizontal_messages: Default::default(), + new_validation_code: None, + processed_downward_messages: 0, + hrmp_watermark: 0, + }, test_state.validation_data.clone()), + )).unwrap(); + } + ); - assert_candidate_is_shared_and_backed( - &mut virtual_overseer, - &test_state.relay_parent, - ¶_id, - &candidate_a_hash, - ) - .await; + assert_matches!( + virtual_overseer.recv().await, + AllMessages::AvailabilityStore( + AvailabilityStoreMessage::StoreAvailableData { candidate_hash, tx, .. } + ) if candidate_hash == candidate_a.hash() => { + tx.send(Ok(())).unwrap(); + } + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::StatementDistribution( + StatementDistributionMessage::Share( + relay_parent, + signed_statement, + ) + ) if relay_parent == test_state.relay_parent => { + assert_eq!(*signed_statement.payload(), StatementWithPVD::Valid(candidate_a_hash)); + } + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::Provisioner( + ProvisionerMessage::ProvisionableData( + _, + ProvisionableData::BackedCandidate(CandidateReceipt { + descriptor, + .. + }) + ) + ) if descriptor == candidate_a.descriptor + ); // This `Valid` statement is redundant after the `Seconded` statement already sent. let statement = @@ -1727,13 +1548,13 @@ fn backing_misbehavior_works() { }); } -// Test that if we are asked to second an invalid candidate we can still second a valid one -// afterwards. +// Test that if we are asked to second an invalid candidate we +// can still second a valid one afterwards. #[test] -fn backing_doesnt_second_invalid() { - let mut test_state = TestState::default(); +fn backing_dont_second_invalid() { + let test_state = TestState::default(); test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { - let para_id = activate_initial_leaf(&mut virtual_overseer, &mut test_state).await; + test_startup(&mut virtual_overseer, &test_state).await; let pov_block_a = PoV { block_data: BlockData(vec![42, 43, 44]) }; let pvd_a = dummy_pvd(); @@ -1784,7 +1605,7 @@ fn backing_doesnt_second_invalid() { virtual_overseer.send(FromOrchestra::Communication { msg: second }).await; - assert_validation_request(&mut virtual_overseer, validation_code_a.clone()).await; + assert_validation_requests(&mut virtual_overseer, validation_code_a.clone()).await; assert_matches!( virtual_overseer.recv().await, @@ -1824,18 +1645,38 @@ fn backing_doesnt_second_invalid() { virtual_overseer.send(FromOrchestra::Communication { msg: second }).await; - assert_validation_request(&mut virtual_overseer, validation_code_b.clone()).await; + assert_validation_requests(&mut virtual_overseer, validation_code_b.clone()).await; - assert_validate_from_exhaustive( - &mut virtual_overseer, - &pvd_b, - &pov_block_b, - &validation_code_b, - &candidate_b, - expected_head_data, - test_state.validation_data.clone(), - ) - .await; + assert_matches!( + virtual_overseer.recv().await, + AllMessages::CandidateValidation( + CandidateValidationMessage::ValidateFromExhaustive { + validation_data, + validation_code, + candidate_receipt, + pov, + exec_kind, + response_sender, + .. + }, + ) if validation_data == pvd_b && + validation_code == validation_code_b && + *pov == pov_block_b && candidate_receipt.descriptor == candidate_b.descriptor && + matches!(exec_kind, PvfExecKind::BackingSystemParas(_)) && + candidate_b.commitments.hash() == candidate_receipt.commitments_hash => + { + response_sender.send(Ok( + ValidationResult::Valid(CandidateCommitments { + head_data: expected_head_data.clone(), + upward_messages: Default::default(), + horizontal_messages: Default::default(), + new_validation_code: None, + processed_downward_messages: 0, + hrmp_watermark: 0, + }, pvd_b.clone()), + )).unwrap(); + } + ); assert_matches!( virtual_overseer.recv().await, @@ -1846,42 +1687,15 @@ fn backing_doesnt_second_invalid() { } ); - let hypothetical_candidate_b = HypotheticalCandidate::Complete { - candidate_hash: candidate_b.hash(), - receipt: Arc::new(candidate_b.clone()), - persisted_validation_data: pvd_a.clone(), // ??? - }; - let expected_request_b = HypotheticalMembershipRequest { - candidates: vec![hypothetical_candidate_b.clone()], - fragment_chain_relay_parent: Some(test_state.relay_parent), - }; - let expected_response_b = make_hypothetical_membership_response( - hypothetical_candidate_b.clone(), - test_state.relay_parent, - ); - - assert_hypothetical_membership_requests( - &mut virtual_overseer, - vec![ - // (expected_request_a, expected_response_a), - (expected_request_b, expected_response_b), - ], - ) - .await; - - // Prospective parachains are notified. assert_matches!( virtual_overseer.recv().await, - AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::IntroduceSecondedCandidate( - req, - tx, - ), - ) => { - assert_eq!(req.candidate_receipt, candidate_b); - assert_eq!(req.candidate_para, para_id); - assert_eq!(pvd_a, req.persisted_validation_data); // ??? - tx.send(true).unwrap(); + AllMessages::StatementDistribution( + StatementDistributionMessage::Share( + parent_hash, + signed_statement, + ) + ) if parent_hash == test_state.relay_parent => { + assert_eq!(*signed_statement.payload(), StatementWithPVD::Seconded(candidate_b, pvd_b.clone())); } ); @@ -1894,13 +1708,13 @@ fn backing_doesnt_second_invalid() { }); } -// Test that if we have already issued a statement (in this case `Invalid`) about a candidate we -// will not be issuing a `Seconded` statement on it. +// Test that if we have already issued a statement (in this case `Invalid`) about a +// candidate we will not be issuing a `Seconded` statement on it. #[test] fn backing_second_after_first_fails_works() { - let mut test_state = TestState::default(); + let test_state = TestState::default(); test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { - let para_id = activate_initial_leaf(&mut virtual_overseer, &mut test_state).await; + test_startup(&mut virtual_overseer, &test_state).await; let pov_a = PoV { block_data: BlockData(vec![42, 43, 44]) }; let pvd_a = dummy_pvd(); @@ -1943,22 +1757,7 @@ fn backing_second_after_first_fails_works() { virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; - assert_matches!( - virtual_overseer.recv().await, - AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::IntroduceSecondedCandidate( - req, - tx, - ), - ) if - req.candidate_receipt == candidate - && req.candidate_para == para_id - && pvd_a == req.persisted_validation_data => { - tx.send(true).unwrap(); - } - ); - - assert_validation_request(&mut virtual_overseer, validation_code_a.clone()).await; + assert_validation_requests(&mut virtual_overseer, validation_code_a.clone()).await; // Subsystem requests PoV and requests validation. assert_matches!( @@ -2041,7 +1840,7 @@ fn backing_second_after_first_fails_works() { // triggered on the prev step. virtual_overseer.send(FromOrchestra::Communication { msg: second }).await; - assert_validation_request(&mut virtual_overseer, validation_code_to_second.clone()).await; + assert_validation_requests(&mut virtual_overseer, validation_code_to_second.clone()).await; assert_matches!( virtual_overseer.recv().await, @@ -2055,13 +1854,13 @@ fn backing_second_after_first_fails_works() { }); } -// Test that if the validation of the candidate has failed this does not stop the work of this -// subsystem and so it is not fatal to the node. +// That that if the validation of the candidate has failed this does not stop +// the work of this subsystem and so it is not fatal to the node. #[test] fn backing_works_after_failed_validation() { - let mut test_state = TestState::default(); + let test_state = TestState::default(); test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { - let para_id = activate_initial_leaf(&mut virtual_overseer, &mut test_state).await; + test_startup(&mut virtual_overseer, &test_state).await; let pov_a = PoV { block_data: BlockData(vec![42, 43, 44]) }; let pvd_a = dummy_pvd(); @@ -2102,22 +1901,7 @@ fn backing_works_after_failed_validation() { virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; - assert_matches!( - virtual_overseer.recv().await, - AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::IntroduceSecondedCandidate( - req, - tx, - ), - ) if - req.candidate_receipt == candidate - && req.candidate_para == para_id - && pvd_a == req.persisted_validation_data => { - tx.send(true).unwrap(); - } - ); - - assert_validation_request(&mut virtual_overseer, validation_code_a.clone()).await; + assert_validation_requests(&mut virtual_overseer, validation_code_a.clone()).await; // Subsystem requests PoV and requests validation. assert_matches!( @@ -2252,16 +2036,16 @@ fn candidate_backing_reorders_votes() { // Test whether we retry on failed PoV fetching. #[test] fn retry_works() { - let mut test_state = TestState::default(); + // sp_tracing::try_init_simple(); + let test_state = TestState::default(); test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { - let para_id = activate_initial_leaf(&mut virtual_overseer, &mut test_state).await; + test_startup(&mut virtual_overseer, &test_state).await; let pov_a = PoV { block_data: BlockData(vec![42, 43, 44]) }; let pvd_a = dummy_pvd(); let validation_code_a = ValidationCode(vec![1, 2, 3]); let pov_hash = pov_a.hash(); - let expected_head_data = test_state.head_data.get(¶_id).unwrap(); let candidate = TestCandidateBuilder { para_id: test_state.chain_ids[0], @@ -2270,7 +2054,7 @@ fn retry_works() { erasure_root: make_erasure_root(&test_state, pov_a.clone(), pvd_a.clone()), persisted_validation_data_hash: pvd_a.hash(), validation_code: validation_code_a.0.clone(), - head_data: expected_head_data.clone(), + ..Default::default() } .build(); @@ -2328,22 +2112,7 @@ fn retry_works() { CandidateBackingMessage::Statement(test_state.relay_parent, signed_a.clone()); virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; - assert_matches!( - virtual_overseer.recv().await, - AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::IntroduceSecondedCandidate( - req, - tx, - ), - ) if - req.candidate_receipt == candidate - && req.candidate_para == para_id - && pvd_a == req.persisted_validation_data => { - tx.send(true).unwrap(); - } - ); - - assert_validation_request(&mut virtual_overseer, validation_code_a.clone()).await; + assert_validation_requests(&mut virtual_overseer, validation_code_a.clone()).await; // Subsystem requests PoV and requests validation. // We cancel - should mean retry on next backing statement. @@ -2365,31 +2134,43 @@ fn retry_works() { virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; // Not deterministic which message comes first: - for _ in 0u32..3 { + for _ in 0u32..6 { match virtual_overseer.recv().await { - AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::CandidateBacked( - candidate_para_id, - candidate_hash, - ), - ) if candidate_hash == candidate_hash && candidate_para_id == para_id => { - assert_eq!(candidate_para_id, para_id); - assert_eq!(candidate_hash, candidate.hash()); + AllMessages::Provisioner(ProvisionerMessage::ProvisionableData( + _, + ProvisionableData::BackedCandidate(CandidateReceipt { descriptor, .. }), + )) => { + assert_eq!(descriptor, candidate.descriptor); }, AllMessages::AvailabilityDistribution( AvailabilityDistributionMessage::FetchPoV { relay_parent, tx, .. }, ) if relay_parent == test_state.relay_parent => { std::mem::drop(tx); }, - AllMessages::StatementDistribution(StatementDistributionMessage::Backed( - candidate_hash, - )) if candidate_hash == candidate.hash() => {}, AllMessages::RuntimeApi(RuntimeApiMessage::Request( _, RuntimeApiRequest::ValidationCodeByHash(hash, tx), )) if hash == validation_code_a.hash() => { tx.send(Ok(Some(validation_code_a.clone()))).unwrap(); }, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _, + RuntimeApiRequest::SessionIndexForChild(tx), + )) => { + tx.send(Ok(1u32.into())).unwrap(); + }, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _, + RuntimeApiRequest::SessionExecutorParams(1, tx), + )) => { + tx.send(Ok(Some(ExecutorParams::default()))).unwrap(); + }, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _, + RuntimeApiRequest::NodeFeatures(1, tx), + )) => { + tx.send(Ok(NodeFeatures::EMPTY)).unwrap(); + }, msg => { assert!(false, "Unexpected message: {:?}", msg); }, @@ -2400,6 +2181,8 @@ fn retry_works() { CandidateBackingMessage::Statement(test_state.relay_parent, signed_c.clone()); virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; + assert_validation_requests(&mut virtual_overseer, validation_code_a.clone()).await; + assert_matches!( virtual_overseer.recv().await, AllMessages::AvailabilityDistribution( @@ -2438,10 +2221,10 @@ fn retry_works() { #[test] fn observes_backing_even_if_not_validator() { - let mut test_state = TestState::default(); + let test_state = TestState::default(); let empty_keystore = Arc::new(sc_keystore::LocalKeystore::in_memory()); test_harness(empty_keystore, |mut virtual_overseer| async move { - let para_id = activate_initial_leaf(&mut virtual_overseer, &mut test_state).await; + test_startup(&mut virtual_overseer, &test_state).await; let pov = PoV { block_data: BlockData(vec![1, 2, 3]) }; let pvd = dummy_pvd(); @@ -2522,22 +2305,6 @@ fn observes_backing_even_if_not_validator() { virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; - // Prospective parachains are notified about candidate seconded first. - assert_matches!( - virtual_overseer.recv().await, - AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::IntroduceSecondedCandidate( - req, - tx, - ), - ) if - req.candidate_receipt == candidate_a - && req.candidate_para == para_id - && pvd == req.persisted_validation_data => { - tx.send(true).unwrap(); - } - ); - let statement = CandidateBackingMessage::Statement(test_state.relay_parent, signed_b.clone()); @@ -2545,11 +2312,14 @@ fn observes_backing_even_if_not_validator() { assert_matches!( virtual_overseer.recv().await, - AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::CandidateBacked( - candidate_para_id, candidate_hash - ), - ) if candidate_a_hash == candidate_hash && candidate_para_id == para_id + AllMessages::Provisioner( + ProvisionerMessage::ProvisionableData( + _, + ProvisionableData::BackedCandidate(candidate_receipt) + ) + ) => { + assert_eq!(candidate_receipt, candidate_a.to_plain()); + } ); let statement = @@ -2566,27 +2336,155 @@ fn observes_backing_even_if_not_validator() { }); } +// Tests that it's impossible to second multiple candidates per relay parent +// without prospective parachains. #[test] -fn new_leaf_view_doesnt_clobber_old() { - let mut test_state = TestState::default(); - let relay_parent_2 = Hash::repeat_byte(1); - assert_ne!(test_state.relay_parent, relay_parent_2); +fn cannot_second_multiple_candidates_per_parent() { + let test_state = TestState::default(); test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { - activate_initial_leaf(&mut virtual_overseer, &mut test_state).await; - - // New leaf that doesn't clobber old. - { - let old_relay_parent = test_state.relay_parent; - test_state.relay_parent = relay_parent_2; + test_startup(&mut virtual_overseer, &test_state).await; - const LEAF_B_BLOCK_NUMBER: BlockNumber = 101; - const LEAF_B_ANCESTRY_LEN: BlockNumber = 3; - let para_id = test_state.chain_ids[0]; - let activated = new_leaf(test_state.relay_parent, LEAF_B_BLOCK_NUMBER - 1); - let min_relay_parents = vec![(para_id, LEAF_B_BLOCK_NUMBER - LEAF_B_ANCESTRY_LEN)]; - let test_leaf_b = TestLeaf { activated, min_relay_parents }; + let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; + let pvd = dummy_pvd(); + let validation_code = ValidationCode(vec![1, 2, 3]); + + let expected_head_data = test_state.head_data.get(&test_state.chain_ids[0]).unwrap(); + + let pov_hash = pov.hash(); + let candidate_builder = TestCandidateBuilder { + para_id: test_state.chain_ids[0], + relay_parent: test_state.relay_parent, + pov_hash, + head_data: expected_head_data.clone(), + erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()), + persisted_validation_data_hash: pvd.hash(), + validation_code: validation_code.0.clone(), + }; + let candidate = candidate_builder.clone().build(); + + let second = CandidateBackingMessage::Second( + test_state.relay_parent, + candidate.to_plain(), + pvd.clone(), + pov.clone(), + ); + + virtual_overseer.send(FromOrchestra::Communication { msg: second }).await; + + assert_validation_requests(&mut virtual_overseer, validation_code.clone()).await; + + assert_validate_from_exhaustive( + &mut virtual_overseer, + &pvd, + &pov, + &validation_code, + &candidate, + expected_head_data, + test_state.validation_data.clone(), + ) + .await; + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::AvailabilityStore( + AvailabilityStoreMessage::StoreAvailableData { candidate_hash, tx, .. } + ) if candidate_hash == candidate.hash() => { + tx.send(Ok(())).unwrap(); + } + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::StatementDistribution( + StatementDistributionMessage::Share( + parent_hash, + _signed_statement, + ) + ) if parent_hash == test_state.relay_parent => {} + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::CollatorProtocol(CollatorProtocolMessage::Seconded(hash, statement)) => { + assert_eq!(test_state.relay_parent, hash); + assert_matches!(statement.payload(), Statement::Seconded(_)); + } + ); + + // Try to second candidate with the same relay parent again. + + // Make sure the candidate hash is different. + let validation_code = ValidationCode(vec![4, 5, 6]); + let mut candidate_builder = candidate_builder; + candidate_builder.validation_code = validation_code.0.clone(); + let candidate = candidate_builder.build(); + + let second = CandidateBackingMessage::Second( + test_state.relay_parent, + candidate.to_plain(), + pvd.clone(), + pov.clone(), + ); + + virtual_overseer.send(FromOrchestra::Communication { msg: second }).await; + + // The validation is still requested. + assert_validation_requests(&mut virtual_overseer, validation_code.clone()).await; + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::CandidateValidation( + CandidateValidationMessage::ValidateFromExhaustive { response_sender, .. }, + ) => { + response_sender.send(Ok(ValidationResult::Valid( + CandidateCommitments { + head_data: expected_head_data.clone(), + horizontal_messages: Default::default(), + upward_messages: Default::default(), + new_validation_code: None, + processed_downward_messages: 0, + hrmp_watermark: 0, + }, + test_state.validation_data.clone(), + ))) + .unwrap(); + } + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::AvailabilityStore( + AvailabilityStoreMessage::StoreAvailableData { candidate_hash, tx, .. } + ) if candidate_hash == candidate.hash() => { + tx.send(Ok(())).unwrap(); + } + ); + + // Validation done, but the candidate is rejected cause of 0-depth being already occupied. + + assert!(virtual_overseer + .recv() + .timeout(std::time::Duration::from_millis(50)) + .await + .is_none()); + + virtual_overseer + }); +} + +#[test] +fn new_leaf_view_doesnt_clobber_old() { + let mut test_state = TestState::default(); + let relay_parent_2 = Hash::repeat_byte(1); + assert_ne!(test_state.relay_parent, relay_parent_2); + test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { + test_startup(&mut virtual_overseer, &test_state).await; - activate_leaf(&mut virtual_overseer, test_leaf_b, &mut test_state).await; + // New leaf that doesn't clobber old. + { + let old_relay_parent = test_state.relay_parent; + test_state.relay_parent = relay_parent_2; + test_startup(&mut virtual_overseer, &test_state).await; test_state.relay_parent = old_relay_parent; } @@ -2639,7 +2537,7 @@ fn disabled_validator_doesnt_distribute_statement_on_receiving_second() { test_state.disabled_validators.push(ValidatorIndex(0)); test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { - activate_initial_leaf(&mut virtual_overseer, &mut test_state).await; + test_startup(&mut virtual_overseer, &test_state).await; let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; let pvd = dummy_pvd(); @@ -2687,7 +2585,7 @@ fn disabled_validator_doesnt_distribute_statement_on_receiving_statement() { test_state.disabled_validators.push(ValidatorIndex(0)); test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { - let para_id = activate_initial_leaf(&mut virtual_overseer, &mut test_state).await; + test_startup(&mut virtual_overseer, &test_state).await; let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; let pvd = dummy_pvd(); @@ -2729,21 +2627,6 @@ fn disabled_validator_doesnt_distribute_statement_on_receiving_statement() { virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; - assert_matches!( - virtual_overseer.recv().await, - AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::IntroduceSecondedCandidate( - req, - tx, - ), - ) if - req.candidate_receipt == candidate - && req.candidate_para == para_id - && pvd == req.persisted_validation_data => { - tx.send(true).unwrap(); - } - ); - // Ensure backing subsystem is not doing any work assert_matches!(virtual_overseer.recv().timeout(Duration::from_secs(1)).await, None); @@ -2764,7 +2647,7 @@ fn validator_ignores_statements_from_disabled_validators() { test_state.disabled_validators.push(ValidatorIndex(2)); test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { - let para_id = activate_initial_leaf(&mut virtual_overseer, &mut test_state).await; + test_startup(&mut virtual_overseer, &test_state).await; let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; let pvd = dummy_pvd(); @@ -2783,6 +2666,7 @@ fn validator_ignores_statements_from_disabled_validators() { validation_code: validation_code.0.clone(), } .build(); + let candidate_commitments_hash = candidate.commitments.hash(); let public2 = Keystore::sr25519_generate_new( &*test_state.keystore, @@ -2834,1198 +2718,93 @@ fn validator_ignores_statements_from_disabled_validators() { virtual_overseer.send(FromOrchestra::Communication { msg: statement_3 }).await; - // Prospective parachains are notified about candidate seconded first. + assert_validation_requests(&mut virtual_overseer, validation_code.clone()).await; + + // Sending a `Statement::Seconded` for our assignment will start + // validation process. The first thing requested is the PoV. assert_matches!( virtual_overseer.recv().await, - AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::IntroduceSecondedCandidate( - req, + AllMessages::AvailabilityDistribution( + AvailabilityDistributionMessage::FetchPoV { + relay_parent, tx, - ), - ) if - req.candidate_receipt == candidate - && req.candidate_para == para_id - && pvd == req.persisted_validation_data => { - tx.send(true).unwrap(); + .. + } + ) if relay_parent == test_state.relay_parent => { + tx.send(pov.clone()).unwrap(); } ); - assert_validate_seconded_candidate( - &mut virtual_overseer, - test_state.relay_parent, - &candidate, - &pov, - &pvd, - &validation_code, - expected_head_data, - true, - ) - .await; - - assert_candidate_is_shared_and_backed( - &mut virtual_overseer, - &test_state.relay_parent, - ¶_id, - &candidate.hash(), - ) - .await; - - virtual_overseer - .send(FromOrchestra::Signal(OverseerSignal::ActiveLeaves( - ActiveLeavesUpdate::stop_work(test_state.relay_parent), - ))) - .await; - virtual_overseer - }); -} - -// Test that `seconding_sanity_check` works when a candidate is allowed -// for all leaves. -#[test] -fn seconding_sanity_check_allowed_on_all() { - let mut test_state = TestState::default(); - test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { - // Candidate is seconded in a parent of the activated `leaf_a`. - const LEAF_A_BLOCK_NUMBER: BlockNumber = 100; - const LEAF_A_ANCESTRY_LEN: BlockNumber = 3; - let para_id = test_state.chain_ids[0]; - - // `a` is grandparent of `b`. - let leaf_a_hash = Hash::from_low_u64_be(130); - let leaf_a_parent = get_parent_hash(leaf_a_hash); - let activated = new_leaf(leaf_a_hash, LEAF_A_BLOCK_NUMBER); - let min_relay_parents = vec![(para_id, LEAF_A_BLOCK_NUMBER - LEAF_A_ANCESTRY_LEN)]; - let test_leaf_a = TestLeaf { activated, min_relay_parents }; - - const LEAF_B_BLOCK_NUMBER: BlockNumber = LEAF_A_BLOCK_NUMBER + 2; - const LEAF_B_ANCESTRY_LEN: BlockNumber = 4; - - let leaf_b_hash = Hash::from_low_u64_be(128); - let activated = new_leaf(leaf_b_hash, LEAF_B_BLOCK_NUMBER); - let min_relay_parents = vec![(para_id, LEAF_B_BLOCK_NUMBER - LEAF_B_ANCESTRY_LEN)]; - let test_leaf_b = TestLeaf { activated, min_relay_parents }; - - activate_leaf(&mut virtual_overseer, test_leaf_a, &mut test_state).await; - activate_leaf(&mut virtual_overseer, test_leaf_b, &mut test_state).await; - - let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; - let pvd = dummy_pvd(); - let validation_code = ValidationCode(vec![1, 2, 3]); - - let expected_head_data = test_state.head_data.get(¶_id).unwrap(); - - let pov_hash = pov.hash(); - let candidate = TestCandidateBuilder { - para_id, - relay_parent: leaf_a_parent, - pov_hash, - head_data: expected_head_data.clone(), - erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()), - persisted_validation_data_hash: pvd.hash(), - validation_code: validation_code.0.clone(), - } - .build(); - - let second = CandidateBackingMessage::Second( - leaf_a_hash, - candidate.to_plain(), - pvd.clone(), - pov.clone(), + // The next step is the actual request to Validation subsystem + // to validate the `Seconded` candidate. + let expected_pov = pov; + let expected_validation_code = validation_code; + assert_matches!( + virtual_overseer.recv().await, + AllMessages::CandidateValidation( + CandidateValidationMessage::ValidateFromExhaustive { + validation_data, + validation_code, + candidate_receipt, + pov, + executor_params: _, + exec_kind, + response_sender, + } + ) if validation_data == pvd && + validation_code == expected_validation_code && + *pov == expected_pov && candidate_receipt.descriptor == candidate.descriptor && + matches!(exec_kind, PvfExecKind::BackingSystemParas(_)) && + candidate_commitments_hash == candidate_receipt.commitments_hash => + { + response_sender.send(Ok( + ValidationResult::Valid(CandidateCommitments { + head_data: expected_head_data.clone(), + upward_messages: Default::default(), + horizontal_messages: Default::default(), + new_validation_code: None, + processed_downward_messages: 0, + hrmp_watermark: 0, + }, test_state.validation_data.clone()), + )).unwrap(); + } ); - virtual_overseer.send(FromOrchestra::Communication { msg: second }).await; - - assert_validate_seconded_candidate( - &mut virtual_overseer, - leaf_a_parent, - &candidate, - &pov, - &pvd, - &validation_code, - expected_head_data, - false, - ) - .await; - - // `seconding_sanity_check` - let hypothetical_candidate = HypotheticalCandidate::Complete { - candidate_hash: candidate.hash(), - receipt: Arc::new(candidate.clone()), - persisted_validation_data: pvd.clone(), - }; - let expected_request_a = HypotheticalMembershipRequest { - candidates: vec![hypothetical_candidate.clone()], - fragment_chain_relay_parent: Some(leaf_a_hash), - }; - let expected_response_a = - make_hypothetical_membership_response(hypothetical_candidate.clone(), leaf_a_hash); - let expected_request_b = HypotheticalMembershipRequest { - candidates: vec![hypothetical_candidate.clone()], - fragment_chain_relay_parent: Some(leaf_b_hash), - }; - let expected_response_b = - make_hypothetical_membership_response(hypothetical_candidate, leaf_b_hash); - assert_hypothetical_membership_requests( - &mut virtual_overseer, - vec![ - (expected_request_a, expected_response_a), - (expected_request_b, expected_response_b), - ], - ) - .await; - // Prospective parachains are notified. assert_matches!( virtual_overseer.recv().await, - AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::IntroduceSecondedCandidate( - req, - tx, - ), - ) if - req.candidate_receipt == candidate - && req.candidate_para == para_id - && pvd == req.persisted_validation_data => { - tx.send(true).unwrap(); + AllMessages::AvailabilityStore( + AvailabilityStoreMessage::StoreAvailableData { candidate_hash, tx, .. } + ) if candidate_hash == candidate.hash() => { + tx.send(Ok(())).unwrap(); } ); - assert_candidate_is_shared_and_seconded(&mut virtual_overseer, &leaf_a_parent).await; - - virtual_overseer - }); -} - -// Test that `seconding_sanity_check` disallows seconding when a candidate is disallowed -// for all leaves. -#[test] -fn seconding_sanity_check_disallowed() { - let mut test_state = TestState::default(); - test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { - // Candidate is seconded in a parent of the activated `leaf_a`. - const LEAF_A_BLOCK_NUMBER: BlockNumber = 100; - const LEAF_A_ANCESTRY_LEN: BlockNumber = 3; - let para_id = test_state.chain_ids[0]; - - let leaf_b_hash = Hash::from_low_u64_be(128); - // `a` is grandparent of `b`. - let leaf_a_hash = Hash::from_low_u64_be(130); - let leaf_a_parent = get_parent_hash(leaf_a_hash); - let activated = new_leaf(leaf_a_hash, LEAF_A_BLOCK_NUMBER); - let min_relay_parents = vec![(para_id, LEAF_A_BLOCK_NUMBER - LEAF_A_ANCESTRY_LEN)]; - let test_leaf_a = TestLeaf { activated, min_relay_parents }; - - const LEAF_B_BLOCK_NUMBER: BlockNumber = LEAF_A_BLOCK_NUMBER + 2; - const LEAF_B_ANCESTRY_LEN: BlockNumber = 4; - - let activated = new_leaf(leaf_b_hash, LEAF_B_BLOCK_NUMBER); - let min_relay_parents = vec![(para_id, LEAF_B_BLOCK_NUMBER - LEAF_B_ANCESTRY_LEN)]; - let test_leaf_b = TestLeaf { activated, min_relay_parents }; - - activate_leaf(&mut virtual_overseer, test_leaf_a, &mut test_state).await; - - let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; - let pvd = dummy_pvd(); - let validation_code = ValidationCode(vec![1, 2, 3]); - - let expected_head_data = test_state.head_data.get(¶_id).unwrap().clone(); - - let pov_hash = pov.hash(); - let candidate = TestCandidateBuilder { - para_id, - relay_parent: leaf_a_parent, - pov_hash, - head_data: expected_head_data.clone(), - erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()), - persisted_validation_data_hash: pvd.hash(), - validation_code: validation_code.0.clone(), - } - .build(); + assert_matches!( + virtual_overseer.recv().await, + AllMessages::StatementDistribution( + StatementDistributionMessage::Share(hash, _stmt) + ) => { + assert_eq!(test_state.relay_parent, hash); + } + ); - let second = CandidateBackingMessage::Second( - leaf_a_hash, - candidate.to_plain(), - pvd.clone(), - pov.clone(), + assert_matches!( + virtual_overseer.recv().await, + AllMessages::Provisioner( + ProvisionerMessage::ProvisionableData( + _, + ProvisionableData::BackedCandidate(candidate_receipt) + ) + ) => { + assert_eq!(candidate_receipt, candidate.to_plain()); + } ); - virtual_overseer.send(FromOrchestra::Communication { msg: second }).await; - - assert_validate_seconded_candidate( - &mut virtual_overseer, - leaf_a_parent, - &candidate, - &pov, - &pvd, - &validation_code, - &expected_head_data, - false, - ) - .await; - - // `seconding_sanity_check` - let hypothetical_candidate = HypotheticalCandidate::Complete { - candidate_hash: candidate.hash(), - receipt: Arc::new(candidate.clone()), - persisted_validation_data: pvd.clone(), - }; - let expected_request_a = HypotheticalMembershipRequest { - candidates: vec![hypothetical_candidate.clone()], - fragment_chain_relay_parent: Some(leaf_a_hash), - }; - let expected_response_a = - make_hypothetical_membership_response(hypothetical_candidate, leaf_a_hash); - assert_hypothetical_membership_requests( - &mut virtual_overseer, - vec![(expected_request_a, expected_response_a)], - ) - .await; - // Prospective parachains are notified. - assert_matches!( - virtual_overseer.recv().await, - AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::IntroduceSecondedCandidate( - req, - tx, - ), - ) if - req.candidate_receipt == candidate - && req.candidate_para == para_id - && pvd == req.persisted_validation_data => { - tx.send(true).unwrap(); - } - ); - - assert_candidate_is_shared_and_seconded(&mut virtual_overseer, &leaf_a_parent).await; - - activate_leaf(&mut virtual_overseer, test_leaf_b, &mut test_state).await; - let leaf_a_grandparent = get_parent_hash(leaf_a_parent); - let candidate = TestCandidateBuilder { - para_id, - relay_parent: leaf_a_grandparent, - pov_hash, - head_data: expected_head_data.clone(), - erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()), - persisted_validation_data_hash: pvd.hash(), - validation_code: validation_code.0.clone(), - } - .build(); - - let second = CandidateBackingMessage::Second( - leaf_a_hash, - candidate.to_plain(), - pvd.clone(), - pov.clone(), - ); - - virtual_overseer.send(FromOrchestra::Communication { msg: second }).await; - - assert_validate_seconded_candidate( - &mut virtual_overseer, - leaf_a_grandparent, - &candidate, - &pov, - &pvd, - &validation_code, - &expected_head_data, - false, - ) - .await; - - // `seconding_sanity_check` - - let hypothetical_candidate = HypotheticalCandidate::Complete { - candidate_hash: candidate.hash(), - receipt: Arc::new(candidate), - persisted_validation_data: pvd, - }; - let expected_request_a = HypotheticalMembershipRequest { - candidates: vec![hypothetical_candidate.clone()], - fragment_chain_relay_parent: Some(leaf_a_hash), - }; - let expected_empty_response = vec![(hypothetical_candidate.clone(), vec![])]; - let expected_request_b = HypotheticalMembershipRequest { - candidates: vec![hypothetical_candidate.clone()], - fragment_chain_relay_parent: Some(leaf_b_hash), - }; - assert_hypothetical_membership_requests( - &mut virtual_overseer, - vec![ - (expected_request_a, expected_empty_response.clone()), - (expected_request_b, expected_empty_response), - ], - ) - .await; - - assert!(virtual_overseer - .recv() - .timeout(std::time::Duration::from_millis(50)) - .await - .is_none()); - virtual_overseer - }); -} - -// Test that `seconding_sanity_check` allows seconding a candidate when it's allowed on at least one -// leaf. -#[test] -fn seconding_sanity_check_allowed_on_at_least_one_leaf() { - let mut test_state = TestState::default(); - test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { - // Candidate is seconded in a parent of the activated `leaf_a`. - const LEAF_A_BLOCK_NUMBER: BlockNumber = 100; - const LEAF_A_ANCESTRY_LEN: BlockNumber = 3; - let para_id = test_state.chain_ids[0]; - - // `a` is grandparent of `b`. - let leaf_a_hash = Hash::from_low_u64_be(130); - let leaf_a_parent = get_parent_hash(leaf_a_hash); - let activated = new_leaf(leaf_a_hash, LEAF_A_BLOCK_NUMBER); - let min_relay_parents = vec![(para_id, LEAF_A_BLOCK_NUMBER - LEAF_A_ANCESTRY_LEN)]; - let test_leaf_a = TestLeaf { activated, min_relay_parents }; - - const LEAF_B_BLOCK_NUMBER: BlockNumber = LEAF_A_BLOCK_NUMBER + 2; - const LEAF_B_ANCESTRY_LEN: BlockNumber = 4; - - let leaf_b_hash = Hash::from_low_u64_be(128); - let activated = new_leaf(leaf_b_hash, LEAF_B_BLOCK_NUMBER); - let min_relay_parents = vec![(para_id, LEAF_B_BLOCK_NUMBER - LEAF_B_ANCESTRY_LEN)]; - let test_leaf_b = TestLeaf { activated, min_relay_parents }; - - activate_leaf(&mut virtual_overseer, test_leaf_a, &mut test_state).await; - activate_leaf(&mut virtual_overseer, test_leaf_b, &mut test_state).await; - - let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; - let pvd = dummy_pvd(); - let validation_code = ValidationCode(vec![1, 2, 3]); - - let expected_head_data = test_state.head_data.get(¶_id).unwrap(); - - let pov_hash = pov.hash(); - let candidate = TestCandidateBuilder { - para_id, - relay_parent: leaf_a_parent, - pov_hash, - head_data: expected_head_data.clone(), - erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()), - persisted_validation_data_hash: pvd.hash(), - validation_code: validation_code.0.clone(), - } - .build(); - - let second = CandidateBackingMessage::Second( - leaf_a_hash, - candidate.to_plain(), - pvd.clone(), - pov.clone(), - ); - - virtual_overseer.send(FromOrchestra::Communication { msg: second }).await; - - assert_validate_seconded_candidate( - &mut virtual_overseer, - leaf_a_parent, - &candidate, - &pov, - &pvd, - &validation_code, - expected_head_data, - false, - ) - .await; - - // `seconding_sanity_check` - let hypothetical_candidate = HypotheticalCandidate::Complete { - candidate_hash: candidate.hash(), - receipt: Arc::new(candidate.clone()), - persisted_validation_data: pvd.clone(), - }; - let expected_request_a = HypotheticalMembershipRequest { - candidates: vec![hypothetical_candidate.clone()], - fragment_chain_relay_parent: Some(leaf_a_hash), - }; - let expected_response_a = - make_hypothetical_membership_response(hypothetical_candidate.clone(), leaf_a_hash); - let expected_request_b = HypotheticalMembershipRequest { - candidates: vec![hypothetical_candidate.clone()], - fragment_chain_relay_parent: Some(leaf_b_hash), - }; - let expected_response_b = vec![(hypothetical_candidate.clone(), vec![])]; - assert_hypothetical_membership_requests( - &mut virtual_overseer, - vec![ - (expected_request_a, expected_response_a), - (expected_request_b, expected_response_b), - ], - ) - .await; - // Prospective parachains are notified. - assert_matches!( - virtual_overseer.recv().await, - AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::IntroduceSecondedCandidate( - req, - tx, - ), - ) if - req.candidate_receipt == candidate - && req.candidate_para == para_id - && pvd == req.persisted_validation_data => { - tx.send(true).unwrap(); - } - ); - - assert_candidate_is_shared_and_seconded(&mut virtual_overseer, &leaf_a_parent).await; - - virtual_overseer - }); -} - -// Test that a seconded candidate which is not approved by prospective parachains -// subsystem doesn't change the view. -#[test] -fn prospective_parachains_reject_candidate() { - let mut test_state = TestState::default(); - test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { - // Candidate is seconded in a parent of the activated `leaf_a`. - const LEAF_A_BLOCK_NUMBER: BlockNumber = 100; - const LEAF_A_ANCESTRY_LEN: BlockNumber = 3; - let para_id = test_state.chain_ids[0]; - - let leaf_a_hash = Hash::from_low_u64_be(130); - let leaf_a_parent = get_parent_hash(leaf_a_hash); - let activated = new_leaf(leaf_a_hash, LEAF_A_BLOCK_NUMBER); - let min_relay_parents = vec![(para_id, LEAF_A_BLOCK_NUMBER - LEAF_A_ANCESTRY_LEN)]; - let test_leaf_a = TestLeaf { activated, min_relay_parents }; - - activate_leaf(&mut virtual_overseer, test_leaf_a, &mut test_state).await; - - let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; - let pvd = dummy_pvd(); - let validation_code = ValidationCode(vec![1, 2, 3]); - - let expected_head_data = test_state.head_data.get(¶_id).unwrap(); - - let pov_hash = pov.hash(); - let candidate = TestCandidateBuilder { - para_id, - relay_parent: leaf_a_parent, - pov_hash, - head_data: expected_head_data.clone(), - erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()), - persisted_validation_data_hash: pvd.hash(), - validation_code: validation_code.0.clone(), - } - .build(); - - let second = CandidateBackingMessage::Second( - leaf_a_hash, - candidate.to_plain(), - pvd.clone(), - pov.clone(), - ); - - virtual_overseer.send(FromOrchestra::Communication { msg: second }).await; - - assert_validate_seconded_candidate( - &mut virtual_overseer, - leaf_a_parent, - &candidate, - &pov, - &pvd, - &validation_code, - expected_head_data, - false, - ) - .await; - - // `seconding_sanity_check` - let hypothetical_candidate = HypotheticalCandidate::Complete { - candidate_hash: candidate.hash(), - receipt: Arc::new(candidate.clone()), - persisted_validation_data: pvd.clone(), - }; - let expected_request_a = vec![( - HypotheticalMembershipRequest { - candidates: vec![hypothetical_candidate.clone()], - fragment_chain_relay_parent: Some(leaf_a_hash), - }, - make_hypothetical_membership_response(hypothetical_candidate, leaf_a_hash), - )]; - assert_hypothetical_membership_requests(&mut virtual_overseer, expected_request_a.clone()) - .await; - - // Prospective parachains are notified. - assert_matches!( - virtual_overseer.recv().await, - AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::IntroduceSecondedCandidate( - req, - tx, - ), - ) if - req.candidate_receipt == candidate - && req.candidate_para == para_id - && pvd == req.persisted_validation_data => { - // Reject it. - tx.send(false).unwrap(); - } - ); - - assert_matches!( - virtual_overseer.recv().await, - AllMessages::CollatorProtocol(CollatorProtocolMessage::Invalid( - relay_parent, - candidate_receipt, - )) if candidate_receipt.descriptor() == &candidate.descriptor && - candidate_receipt.commitments_hash == candidate.commitments.hash() && - relay_parent == leaf_a_parent - ); - - // Try seconding the same candidate. - - let second = CandidateBackingMessage::Second( - leaf_a_hash, - candidate.to_plain(), - pvd.clone(), - pov.clone(), - ); - - virtual_overseer.send(FromOrchestra::Communication { msg: second }).await; - - assert_validate_seconded_candidate( - &mut virtual_overseer, - leaf_a_parent, - &candidate, - &pov, - &pvd, - &validation_code, - expected_head_data, - false, - ) - .await; - - // `seconding_sanity_check` - assert_hypothetical_membership_requests(&mut virtual_overseer, expected_request_a).await; - // Prospective parachains are notified. - assert_matches!( - virtual_overseer.recv().await, - AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::IntroduceSecondedCandidate( - req, - tx, - ), - ) if - req.candidate_receipt == candidate - && req.candidate_para == para_id - && pvd == req.persisted_validation_data => { - tx.send(true).unwrap(); - } - ); - - assert_candidate_is_shared_and_seconded(&mut virtual_overseer, &leaf_a_parent).await; - - virtual_overseer - }); -} - -// Test that a validator can second multiple candidates per single relay parent. -#[test] -fn second_multiple_candidates_per_relay_parent() { - let mut test_state = TestState::default(); - test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { - // Candidate `a` is seconded in a parent of the activated `leaf`. - const LEAF_BLOCK_NUMBER: BlockNumber = 100; - const LEAF_ANCESTRY_LEN: BlockNumber = 3; - let para_id = test_state.chain_ids[0]; - - let leaf_hash = Hash::from_low_u64_be(130); - let leaf_parent = get_parent_hash(leaf_hash); - let leaf_grandparent = get_parent_hash(leaf_parent); - let activated = new_leaf(leaf_hash, LEAF_BLOCK_NUMBER); - let min_relay_parents = vec![(para_id, LEAF_BLOCK_NUMBER - LEAF_ANCESTRY_LEN)]; - let test_leaf_a = TestLeaf { activated, min_relay_parents }; - - activate_leaf(&mut virtual_overseer, test_leaf_a, &mut test_state).await; - - let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; - let pvd = dummy_pvd(); - let validation_code = ValidationCode(vec![1, 2, 3]); - - let expected_head_data = test_state.head_data.get(¶_id).unwrap(); - - let pov_hash = pov.hash(); - let candidate_a = TestCandidateBuilder { - para_id, - relay_parent: leaf_parent, - pov_hash, - head_data: expected_head_data.clone(), - erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()), - persisted_validation_data_hash: pvd.hash(), - validation_code: validation_code.0.clone(), - }; - let mut candidate_b = candidate_a.clone(); - candidate_b.relay_parent = leaf_grandparent; - - let candidate_a = candidate_a.build(); - let candidate_b = candidate_b.build(); - - for candidate in &[candidate_a, candidate_b] { - let second = CandidateBackingMessage::Second( - leaf_hash, - candidate.to_plain(), - pvd.clone(), - pov.clone(), - ); - - virtual_overseer.send(FromOrchestra::Communication { msg: second }).await; - - assert_validate_seconded_candidate( - &mut virtual_overseer, - candidate.descriptor.relay_parent(), - &candidate, - &pov, - &pvd, - &validation_code, - expected_head_data, - false, - ) - .await; - - // `seconding_sanity_check` - let hypothetical_candidate = HypotheticalCandidate::Complete { - candidate_hash: candidate.hash(), - receipt: Arc::new(candidate.clone()), - persisted_validation_data: pvd.clone(), - }; - let expected_request_a = vec![( - HypotheticalMembershipRequest { - candidates: vec![hypothetical_candidate.clone()], - fragment_chain_relay_parent: Some(leaf_hash), - }, - make_hypothetical_membership_response(hypothetical_candidate, leaf_hash), - )]; - assert_hypothetical_membership_requests( - &mut virtual_overseer, - expected_request_a.clone(), - ) - .await; - - // Prospective parachains are notified. - assert_matches!( - virtual_overseer.recv().await, - AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::IntroduceSecondedCandidate( - req, - tx, - ), - ) if - &req.candidate_receipt == candidate - && req.candidate_para == para_id - && pvd == req.persisted_validation_data - => { - tx.send(true).unwrap(); - } - ); - - assert_candidate_is_shared_and_seconded( - &mut virtual_overseer, - &candidate.descriptor.relay_parent(), - ) - .await; - } - - virtual_overseer - }); -} - -// Tests that validators start work on consecutive prospective parachain blocks. -#[test] -fn concurrent_dependent_candidates() { - let mut test_state = TestState::default(); - test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { - // Candidate `a` is seconded in a grandparent of the activated `leaf`, - // candidate `b` -- in parent. - const LEAF_BLOCK_NUMBER: BlockNumber = 100; - const LEAF_ANCESTRY_LEN: BlockNumber = 3; - let para_id = test_state.chain_ids[0]; - - let leaf_hash = Hash::from_low_u64_be(130); - let leaf_parent = get_parent_hash(leaf_hash); - let leaf_grandparent = get_parent_hash(leaf_parent); - let activated = new_leaf(leaf_hash, LEAF_BLOCK_NUMBER); - let min_relay_parents = vec![(para_id, LEAF_BLOCK_NUMBER - LEAF_ANCESTRY_LEN)]; - let test_leaf_a = TestLeaf { activated, min_relay_parents }; - - activate_leaf(&mut virtual_overseer, test_leaf_a, &mut test_state).await; - - let head_data = &[ - HeadData(vec![10, 20, 30]), // Before `a`. - HeadData(vec![11, 21, 31]), // After `a`. - HeadData(vec![12, 22]), // After `b`. - ]; - - let pov_a = PoV { block_data: BlockData(vec![42, 43, 44]) }; - let pvd_a = PersistedValidationData { - parent_head: head_data[0].clone(), - relay_parent_number: LEAF_BLOCK_NUMBER - 2, - relay_parent_storage_root: Hash::zero(), - max_pov_size: 1024, - }; - - let pov_b = PoV { block_data: BlockData(vec![22, 14, 100]) }; - let pvd_b = PersistedValidationData { - parent_head: head_data[1].clone(), - relay_parent_number: LEAF_BLOCK_NUMBER - 1, - relay_parent_storage_root: Hash::zero(), - max_pov_size: 1024, - }; - let validation_code = ValidationCode(vec![1, 2, 3]); - - let candidate_a = TestCandidateBuilder { - para_id, - relay_parent: leaf_grandparent, - pov_hash: pov_a.hash(), - head_data: head_data[1].clone(), - erasure_root: make_erasure_root(&test_state, pov_a.clone(), pvd_a.clone()), - persisted_validation_data_hash: pvd_a.hash(), - validation_code: validation_code.0.clone(), - } - .build(); - let candidate_b = TestCandidateBuilder { - para_id, - relay_parent: leaf_parent, - pov_hash: pov_b.hash(), - head_data: head_data[2].clone(), - erasure_root: make_erasure_root(&test_state, pov_b.clone(), pvd_b.clone()), - persisted_validation_data_hash: pvd_b.hash(), - validation_code: validation_code.0.clone(), - } - .build(); - let candidate_a_hash = candidate_a.hash(); - let candidate_b_hash = candidate_b.hash(); - - let public1 = Keystore::sr25519_generate_new( - &*test_state.keystore, - ValidatorId::ID, - Some(&test_state.validators[5].to_seed()), - ) - .expect("Insert key into keystore"); - let public2 = Keystore::sr25519_generate_new( - &*test_state.keystore, - ValidatorId::ID, - Some(&test_state.validators[2].to_seed()), - ) - .expect("Insert key into keystore"); - - // Signing context should have a parent hash candidate is based on. - let signing_context = - SigningContext { parent_hash: leaf_grandparent, session_index: test_state.session() }; - let signed_a = SignedFullStatementWithPVD::sign( - &test_state.keystore, - StatementWithPVD::Seconded(candidate_a.clone(), pvd_a.clone()), - &signing_context, - ValidatorIndex(2), - &public2.into(), - ) - .ok() - .flatten() - .expect("should be signed"); - - let signing_context = - SigningContext { parent_hash: leaf_parent, session_index: test_state.session() }; - let signed_b = SignedFullStatementWithPVD::sign( - &test_state.keystore, - StatementWithPVD::Seconded(candidate_b.clone(), pvd_b.clone()), - &signing_context, - ValidatorIndex(5), - &public1.into(), - ) - .ok() - .flatten() - .expect("should be signed"); - - let statement_a = CandidateBackingMessage::Statement(leaf_grandparent, signed_a.clone()); - let statement_b = CandidateBackingMessage::Statement(leaf_parent, signed_b.clone()); - - virtual_overseer.send(FromOrchestra::Communication { msg: statement_a }).await; - - // At this point the subsystem waits for response, the previous message is received, - // send a second one without blocking. - let _ = virtual_overseer - .tx - .start_send_unpin(FromOrchestra::Communication { msg: statement_b }); - - let mut valid_statements = HashSet::new(); - let mut backed_statements = HashSet::new(); - - loop { - let msg = virtual_overseer - .recv() - .timeout(std::time::Duration::from_secs(1)) - .await - .expect("overseer recv timed out"); - - // Order is not guaranteed since we have 2 statements being handled concurrently. - match msg { - AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::IntroduceSecondedCandidate(_, tx), - ) => { - tx.send(true).unwrap(); - }, - AllMessages::RuntimeApi(RuntimeApiMessage::Request( - _, - RuntimeApiRequest::ValidationCodeByHash(_, tx), - )) => { - tx.send(Ok(Some(validation_code.clone()))).unwrap(); - }, - AllMessages::AvailabilityDistribution( - AvailabilityDistributionMessage::FetchPoV { candidate_hash, tx, .. }, - ) => { - let pov = if candidate_hash == candidate_a_hash { - &pov_a - } else if candidate_hash == candidate_b_hash { - &pov_b - } else { - panic!("unknown candidate hash") - }; - tx.send(pov.clone()).unwrap(); - }, - AllMessages::CandidateValidation( - CandidateValidationMessage::ValidateFromExhaustive { - candidate_receipt, - response_sender, - .. - }, - ) => { - let candidate_hash = candidate_receipt.hash(); - let (head_data, pvd) = if candidate_hash == candidate_a_hash { - (&head_data[1], &pvd_a) - } else if candidate_hash == candidate_b_hash { - (&head_data[2], &pvd_b) - } else { - panic!("unknown candidate hash") - }; - response_sender - .send(Ok(ValidationResult::Valid( - CandidateCommitments { - head_data: head_data.clone(), - horizontal_messages: Default::default(), - upward_messages: Default::default(), - new_validation_code: None, - processed_downward_messages: 0, - hrmp_watermark: 0, - }, - pvd.clone(), - ))) - .unwrap(); - }, - AllMessages::AvailabilityStore(AvailabilityStoreMessage::StoreAvailableData { - tx, - .. - }) => { - tx.send(Ok(())).unwrap(); - }, - AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::CandidateBacked(..), - ) => {}, - AllMessages::StatementDistribution(StatementDistributionMessage::Share( - _, - statement, - )) => { - assert_eq!(statement.validator_index(), ValidatorIndex(0)); - let payload = statement.payload(); - assert_matches!( - payload.clone(), - StatementWithPVD::Valid(hash) - if hash == candidate_a_hash || hash == candidate_b_hash => - { - assert!(valid_statements.insert(hash)); - } - ); - }, - AllMessages::StatementDistribution(StatementDistributionMessage::Backed(hash)) => { - // Ensure that `Share` was received first for the candidate. - assert!(valid_statements.contains(&hash)); - backed_statements.insert(hash); - - if backed_statements.len() == 2 { - break - } - }, - AllMessages::RuntimeApi(RuntimeApiMessage::Request( - _, - RuntimeApiRequest::SessionIndexForChild(tx), - )) => { - tx.send(Ok(1u32.into())).unwrap(); - }, - AllMessages::RuntimeApi(RuntimeApiMessage::Request( - _, - RuntimeApiRequest::SessionExecutorParams(sess_idx, tx), - )) => { - assert_eq!(sess_idx, 1); - tx.send(Ok(Some(ExecutorParams::default()))).unwrap(); - }, - AllMessages::RuntimeApi(RuntimeApiMessage::Request( - _parent, - RuntimeApiRequest::ValidatorGroups(tx), - )) => { - tx.send(Ok(test_state.validator_groups.clone())).unwrap(); - }, - AllMessages::RuntimeApi(RuntimeApiMessage::Request( - _, - RuntimeApiRequest::NodeFeatures(sess_idx, tx), - )) => { - assert_eq!(sess_idx, 1); - tx.send(Ok(NodeFeatures::EMPTY)).unwrap(); - }, - AllMessages::RuntimeApi(RuntimeApiMessage::Request( - _parent, - RuntimeApiRequest::AvailabilityCores(tx), - )) => { - tx.send(Ok(test_state.availability_cores.clone())).unwrap(); - }, - _ => panic!("unexpected message received from overseer: {:?}", msg), - } - } - - assert!(valid_statements.contains(&candidate_a_hash)); - assert!(valid_statements.contains(&candidate_b_hash)); - assert!(backed_statements.contains(&candidate_a_hash)); - assert!(backed_statements.contains(&candidate_b_hash)); - - virtual_overseer - }); -} - -// Test that multiple candidates from different paras can occupy the same depth -// in a given relay parent. -#[test] -fn seconding_sanity_check_occupy_same_depth() { - let mut test_state = TestState::default(); - test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { - // Candidate `a` is seconded in a parent of the activated `leaf`. - const LEAF_BLOCK_NUMBER: BlockNumber = 100; - const LEAF_ANCESTRY_LEN: BlockNumber = 3; - - let para_id_a = test_state.chain_ids[0]; - let para_id_b = test_state.chain_ids[1]; - - let leaf_hash = Hash::from_low_u64_be(130); - let leaf_parent = get_parent_hash(leaf_hash); - - let activated = new_leaf(leaf_hash, LEAF_BLOCK_NUMBER); - let min_block_number = LEAF_BLOCK_NUMBER - LEAF_ANCESTRY_LEN; - let min_relay_parents = vec![(para_id_a, min_block_number), (para_id_b, min_block_number)]; - let test_leaf_a = TestLeaf { activated, min_relay_parents }; - - activate_leaf(&mut virtual_overseer, test_leaf_a, &mut test_state).await; - - let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; - let pvd = dummy_pvd(); - let validation_code = ValidationCode(vec![1, 2, 3]); - - let expected_head_data_a = test_state.head_data.get(¶_id_a).unwrap(); - let expected_head_data_b = test_state.head_data.get(¶_id_b).unwrap(); - - let pov_hash = pov.hash(); - let candidate_a = TestCandidateBuilder { - para_id: para_id_a, - relay_parent: leaf_parent, - pov_hash, - head_data: expected_head_data_a.clone(), - erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()), - persisted_validation_data_hash: pvd.hash(), - validation_code: validation_code.0.clone(), - }; - - let mut candidate_b = candidate_a.clone(); - candidate_b.para_id = para_id_b; - candidate_b.head_data = expected_head_data_b.clone(); - // A rotation happens, test validator is assigned to second para here. - candidate_b.relay_parent = leaf_hash; - - let candidate_a = (candidate_a.build(), expected_head_data_a, para_id_a); - let candidate_b = (candidate_b.build(), expected_head_data_b, para_id_b); - - for candidate in &[candidate_a, candidate_b] { - let (candidate, expected_head_data, para_id) = candidate; - let second = CandidateBackingMessage::Second( - leaf_hash, - candidate.to_plain(), - pvd.clone(), - pov.clone(), - ); - - virtual_overseer.send(FromOrchestra::Communication { msg: second }).await; - - assert_validate_seconded_candidate( - &mut virtual_overseer, - candidate.descriptor.relay_parent(), - &candidate, - &pov, - &pvd, - &validation_code, - expected_head_data, - false, - ) - .await; - - // `seconding_sanity_check` - let hypothetical_candidate = HypotheticalCandidate::Complete { - candidate_hash: candidate.hash(), - receipt: Arc::new(candidate.clone()), - persisted_validation_data: pvd.clone(), - }; - let expected_request_a = vec![( - HypotheticalMembershipRequest { - candidates: vec![hypothetical_candidate.clone()], - fragment_chain_relay_parent: Some(leaf_hash), - }, - // Send the same membership for both candidates. - make_hypothetical_membership_response(hypothetical_candidate, leaf_hash), - )]; - - assert_hypothetical_membership_requests( - &mut virtual_overseer, - expected_request_a.clone(), - ) - .await; - - // Prospective parachains are notified. - assert_matches!( - virtual_overseer.recv().await, - AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::IntroduceSecondedCandidate( - req, - tx, - ), - ) if - &req.candidate_receipt == candidate - && &req.candidate_para == para_id - && pvd == req.persisted_validation_data - => { - tx.send(true).unwrap(); - } - ); - - assert_candidate_is_shared_and_seconded( - &mut virtual_overseer, - &candidate.descriptor.relay_parent(), - ) + .send(FromOrchestra::Signal(OverseerSignal::ActiveLeaves( + ActiveLeavesUpdate::stop_work(test_state.relay_parent), + ))) .await; - } - - virtual_overseer - }); -} - -// Test that the subsystem doesn't skip occupied cores assignments. -#[test] -fn occupied_core_assignment() { - let mut test_state = TestState::default(); - test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { - // Candidate is seconded in a parent of the activated `leaf_a`. - const LEAF_A_BLOCK_NUMBER: BlockNumber = 100; - const LEAF_A_ANCESTRY_LEN: BlockNumber = 3; - let para_id = test_state.chain_ids[0]; - let previous_para_id = test_state.chain_ids[1]; - - // Set the core state to occupied. - let mut candidate_descriptor = - polkadot_primitives_test_helpers::dummy_candidate_descriptor(Hash::zero()); - candidate_descriptor.para_id = previous_para_id; - test_state.availability_cores[0] = CoreState::Occupied(OccupiedCore { - group_responsible: Default::default(), - next_up_on_available: Some(ScheduledCore { para_id, collator: None }), - occupied_since: 100_u32, - time_out_at: 200_u32, - next_up_on_time_out: None, - availability: Default::default(), - candidate_descriptor: candidate_descriptor.into(), - candidate_hash: Default::default(), - }); - - let leaf_a_hash = Hash::from_low_u64_be(130); - let leaf_a_parent = get_parent_hash(leaf_a_hash); - let activated = new_leaf(leaf_a_hash, LEAF_A_BLOCK_NUMBER); - let min_relay_parents = vec![(para_id, LEAF_A_BLOCK_NUMBER - LEAF_A_ANCESTRY_LEN)]; - let test_leaf_a = TestLeaf { activated, min_relay_parents }; - - activate_leaf(&mut virtual_overseer, test_leaf_a, &mut test_state).await; - - let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; - let pvd = dummy_pvd(); - let validation_code = ValidationCode(vec![1, 2, 3]); - - let expected_head_data = test_state.head_data.get(¶_id).unwrap(); - - let pov_hash = pov.hash(); - let candidate = TestCandidateBuilder { - para_id, - relay_parent: leaf_a_parent, - pov_hash, - head_data: expected_head_data.clone(), - erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()), - persisted_validation_data_hash: pvd.hash(), - validation_code: validation_code.0.clone(), - } - .build(); - - let second = CandidateBackingMessage::Second( - leaf_a_hash, - candidate.to_plain(), - pvd.clone(), - pov.clone(), - ); - - virtual_overseer.send(FromOrchestra::Communication { msg: second }).await; - - assert_validate_seconded_candidate( - &mut virtual_overseer, - leaf_a_parent, - &candidate, - &pov, - &pvd, - &validation_code, - expected_head_data, - false, - ) - .await; - - // `seconding_sanity_check` - let hypothetical_candidate = HypotheticalCandidate::Complete { - candidate_hash: candidate.hash(), - receipt: Arc::new(candidate.clone()), - persisted_validation_data: pvd.clone(), - }; - let expected_request = vec![( - HypotheticalMembershipRequest { - candidates: vec![hypothetical_candidate.clone()], - fragment_chain_relay_parent: Some(leaf_a_hash), - }, - make_hypothetical_membership_response(hypothetical_candidate, leaf_a_hash), - )]; - assert_hypothetical_membership_requests(&mut virtual_overseer, expected_request).await; - // Prospective parachains are notified. - assert_matches!( - virtual_overseer.recv().await, - AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::IntroduceSecondedCandidate( - req, - tx, - ), - ) if - req.candidate_receipt == candidate - && req.candidate_para == para_id - && pvd == req.persisted_validation_data - => { - tx.send(true).unwrap(); - } - ); - - assert_candidate_is_shared_and_seconded(&mut virtual_overseer, &leaf_a_parent).await; - virtual_overseer }); } diff --git a/polkadot/node/core/backing/src/tests/prospective_parachains.rs b/polkadot/node/core/backing/src/tests/prospective_parachains.rs new file mode 100644 index 000000000000..db5409ee4bd5 --- /dev/null +++ b/polkadot/node/core/backing/src/tests/prospective_parachains.rs @@ -0,0 +1,1742 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Tests for the backing subsystem with enabled prospective parachains. + +use polkadot_node_subsystem::{ + messages::{ChainApiMessage, HypotheticalMembership}, + ActivatedLeaf, TimeoutExt, +}; +use polkadot_primitives::{vstaging::OccupiedCore, AsyncBackingParams, BlockNumber, Header}; + +use super::*; + +const ASYNC_BACKING_PARAMETERS: AsyncBackingParams = + AsyncBackingParams { max_candidate_depth: 4, allowed_ancestry_len: 3 }; + +struct TestLeaf { + activated: ActivatedLeaf, + min_relay_parents: Vec<(ParaId, u32)>, +} + +fn get_parent_hash(hash: Hash) -> Hash { + Hash::from_low_u64_be(hash.to_low_u64_be() + 1) +} + +async fn activate_leaf( + virtual_overseer: &mut VirtualOverseer, + leaf: TestLeaf, + test_state: &TestState, +) { + let TestLeaf { activated, min_relay_parents } = leaf; + let leaf_hash = activated.hash; + let leaf_number = activated.number; + // Start work on some new parent. + virtual_overseer + .send(FromOrchestra::Signal(OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work( + activated, + )))) + .await; + + // Prospective parachains mode is temporarily defined by the Runtime API version. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::AsyncBackingParams(tx)) + ) if parent == leaf_hash => { + tx.send(Ok(ASYNC_BACKING_PARAMETERS)).unwrap(); + } + ); + + let min_min = *min_relay_parents + .iter() + .map(|(_, block_num)| block_num) + .min() + .unwrap_or(&leaf_number); + + let ancestry_len = leaf_number + 1 - min_min; + + let ancestry_hashes = std::iter::successors(Some(leaf_hash), |h| Some(get_parent_hash(*h))) + .take(ancestry_len as usize); + let ancestry_numbers = (min_min..=leaf_number).rev(); + let ancestry_iter = ancestry_hashes.zip(ancestry_numbers).peekable(); + + let mut next_overseer_message = None; + // How many blocks were actually requested. + let mut requested_len = 0; + { + let mut ancestry_iter = ancestry_iter.clone(); + while let Some((hash, number)) = ancestry_iter.next() { + // May be `None` for the last element. + let parent_hash = + ancestry_iter.peek().map(|(h, _)| *h).unwrap_or_else(|| get_parent_hash(hash)); + + let msg = virtual_overseer.recv().await; + // It may happen that some blocks were cached by implicit view, + // reuse the message. + if !matches!(&msg, AllMessages::ChainApi(ChainApiMessage::BlockHeader(..))) { + next_overseer_message.replace(msg); + break + } + + assert_matches!( + msg, + AllMessages::ChainApi( + ChainApiMessage::BlockHeader(_hash, tx) + ) if _hash == hash => { + let header = Header { + parent_hash, + number, + state_root: Hash::zero(), + extrinsics_root: Hash::zero(), + digest: Default::default(), + }; + + tx.send(Ok(Some(header))).unwrap(); + } + ); + + if requested_len == 0 { + assert_matches!( + virtual_overseer.recv().await, + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::GetMinimumRelayParents(parent, tx) + ) if parent == leaf_hash => { + tx.send(min_relay_parents.clone()).unwrap(); + } + ); + } + + requested_len += 1; + } + } + + for (hash, number) in ancestry_iter.take(requested_len) { + let msg = match next_overseer_message.take() { + Some(msg) => msg, + None => virtual_overseer.recv().await, + }; + + // Check that subsystem job issues a request for the session index for child. + assert_matches!( + msg, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::SessionIndexForChild(tx)) + ) if parent == hash => { + tx.send(Ok(test_state.signing_context.session_index)).unwrap(); + } + ); + + // Check that subsystem job issues a request for a validator set. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::Validators(tx)) + ) if parent == hash => { + tx.send(Ok(test_state.validator_public.clone())).unwrap(); + } + ); + + // Check that subsystem job issues a request for the validator groups. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::ValidatorGroups(tx)) + ) if parent == hash => { + let (validator_groups, mut group_rotation_info) = test_state.validator_groups.clone(); + group_rotation_info.now = number; + tx.send(Ok((validator_groups, group_rotation_info))).unwrap(); + } + ); + + // Check that subsystem job issues a request for the availability cores. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::AvailabilityCores(tx)) + ) if parent == hash => { + tx.send(Ok(test_state.availability_cores.clone())).unwrap(); + } + ); + + // Node features request from runtime: all features are disabled. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::NodeFeatures(_session_index, tx)) + ) if parent == hash => { + tx.send(Ok(Default::default())).unwrap(); + } + ); + + // Check if subsystem job issues a request for the minimum backing votes. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + parent, + RuntimeApiRequest::MinimumBackingVotes(session_index, tx), + )) if parent == hash && session_index == test_state.signing_context.session_index => { + tx.send(Ok(test_state.minimum_backing_votes)).unwrap(); + } + ); + + // Check that subsystem job issues a request for the runtime version. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::Version(tx)) + ) if parent == hash => { + tx.send(Ok(RuntimeApiRequest::DISABLED_VALIDATORS_RUNTIME_REQUIREMENT)).unwrap(); + } + ); + + // Check that the subsystem job issues a request for the disabled validators. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::DisabledValidators(tx)) + ) if parent == hash => { + tx.send(Ok(Vec::new())).unwrap(); + } + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::Version(tx)) + ) if parent == hash => { + tx.send(Ok(RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT)).unwrap(); + } + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::ClaimQueue(tx)) + ) if parent == hash => { + tx.send(Ok( + test_state.claim_queue.clone() + )).unwrap(); + } + ); + } +} + +async fn assert_validate_seconded_candidate( + virtual_overseer: &mut VirtualOverseer, + relay_parent: Hash, + candidate: &CommittedCandidateReceipt, + assert_pov: &PoV, + assert_pvd: &PersistedValidationData, + assert_validation_code: &ValidationCode, + expected_head_data: &HeadData, + fetch_pov: bool, +) { + assert_validation_requests(virtual_overseer, assert_validation_code.clone()).await; + + if fetch_pov { + assert_matches!( + virtual_overseer.recv().await, + AllMessages::AvailabilityDistribution( + AvailabilityDistributionMessage::FetchPoV { + relay_parent: hash, + tx, + .. + } + ) if hash == relay_parent => { + tx.send(assert_pov.clone()).unwrap(); + } + ); + } + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::CandidateValidation(CandidateValidationMessage::ValidateFromExhaustive { + validation_data, + validation_code, + candidate_receipt, + pov, + exec_kind, + response_sender, + .. + }) if &validation_data == assert_pvd && + &validation_code == assert_validation_code && + &*pov == assert_pov && + candidate_receipt.descriptor == candidate.descriptor && + matches!(exec_kind, PvfExecKind::BackingSystemParas(_)) && + candidate.commitments.hash() == candidate_receipt.commitments_hash => + { + response_sender.send(Ok(ValidationResult::Valid( + CandidateCommitments { + head_data: expected_head_data.clone(), + horizontal_messages: Default::default(), + upward_messages: Default::default(), + new_validation_code: None, + processed_downward_messages: 0, + hrmp_watermark: 0, + }, + assert_pvd.clone(), + ))) + .unwrap(); + } + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::AvailabilityStore( + AvailabilityStoreMessage::StoreAvailableData { candidate_hash, tx, .. } + ) if candidate_hash == candidate.hash() => { + tx.send(Ok(())).unwrap(); + } + ); +} + +async fn assert_hypothetical_membership_requests( + virtual_overseer: &mut VirtualOverseer, + mut expected_requests: Vec<( + HypotheticalMembershipRequest, + Vec<(HypotheticalCandidate, HypotheticalMembership)>, + )>, +) { + // Requests come with no particular order. + let requests_num = expected_requests.len(); + + for _ in 0..requests_num { + assert_matches!( + virtual_overseer.recv().await, + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::GetHypotheticalMembership(request, tx), + ) => { + let idx = match expected_requests.iter().position(|r| r.0 == request) { + Some(idx) => idx, + None => + panic!( + "unexpected hypothetical membership request, no match found for {:?}", + request + ), + }; + let resp = std::mem::take(&mut expected_requests[idx].1); + tx.send(resp).unwrap(); + + expected_requests.remove(idx); + } + ); + } +} + +fn make_hypothetical_membership_response( + hypothetical_candidate: HypotheticalCandidate, + relay_parent_hash: Hash, +) -> Vec<(HypotheticalCandidate, HypotheticalMembership)> { + vec![(hypothetical_candidate, vec![relay_parent_hash])] +} + +// Test that `seconding_sanity_check` works when a candidate is allowed +// for all leaves. +#[test] +fn seconding_sanity_check_allowed_on_all() { + let test_state = TestState::default(); + test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { + // Candidate is seconded in a parent of the activated `leaf_a`. + const LEAF_A_BLOCK_NUMBER: BlockNumber = 100; + const LEAF_A_ANCESTRY_LEN: BlockNumber = 3; + let para_id = test_state.chain_ids[0]; + + // `a` is grandparent of `b`. + let leaf_a_hash = Hash::from_low_u64_be(130); + let leaf_a_parent = get_parent_hash(leaf_a_hash); + let activated = new_leaf(leaf_a_hash, LEAF_A_BLOCK_NUMBER); + let min_relay_parents = vec![(para_id, LEAF_A_BLOCK_NUMBER - LEAF_A_ANCESTRY_LEN)]; + let test_leaf_a = TestLeaf { activated, min_relay_parents }; + + const LEAF_B_BLOCK_NUMBER: BlockNumber = LEAF_A_BLOCK_NUMBER + 2; + const LEAF_B_ANCESTRY_LEN: BlockNumber = 4; + + let leaf_b_hash = Hash::from_low_u64_be(128); + let activated = new_leaf(leaf_b_hash, LEAF_B_BLOCK_NUMBER); + let min_relay_parents = vec![(para_id, LEAF_B_BLOCK_NUMBER - LEAF_B_ANCESTRY_LEN)]; + let test_leaf_b = TestLeaf { activated, min_relay_parents }; + + activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state).await; + activate_leaf(&mut virtual_overseer, test_leaf_b, &test_state).await; + + let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; + let pvd = dummy_pvd(); + let validation_code = ValidationCode(vec![1, 2, 3]); + + let expected_head_data = test_state.head_data.get(¶_id).unwrap(); + + let pov_hash = pov.hash(); + let candidate = TestCandidateBuilder { + para_id, + relay_parent: leaf_a_parent, + pov_hash, + head_data: expected_head_data.clone(), + erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()), + persisted_validation_data_hash: pvd.hash(), + validation_code: validation_code.0.clone(), + } + .build(); + + let second = CandidateBackingMessage::Second( + leaf_a_hash, + candidate.to_plain(), + pvd.clone(), + pov.clone(), + ); + + virtual_overseer.send(FromOrchestra::Communication { msg: second }).await; + + assert_validate_seconded_candidate( + &mut virtual_overseer, + leaf_a_parent, + &candidate, + &pov, + &pvd, + &validation_code, + expected_head_data, + false, + ) + .await; + + // `seconding_sanity_check` + let hypothetical_candidate = HypotheticalCandidate::Complete { + candidate_hash: candidate.hash(), + receipt: Arc::new(candidate.clone()), + persisted_validation_data: pvd.clone(), + }; + let expected_request_a = HypotheticalMembershipRequest { + candidates: vec![hypothetical_candidate.clone()], + fragment_chain_relay_parent: Some(leaf_a_hash), + }; + let expected_response_a = + make_hypothetical_membership_response(hypothetical_candidate.clone(), leaf_a_hash); + let expected_request_b = HypotheticalMembershipRequest { + candidates: vec![hypothetical_candidate.clone()], + fragment_chain_relay_parent: Some(leaf_b_hash), + }; + let expected_response_b = + make_hypothetical_membership_response(hypothetical_candidate, leaf_b_hash); + assert_hypothetical_membership_requests( + &mut virtual_overseer, + vec![ + (expected_request_a, expected_response_a), + (expected_request_b, expected_response_b), + ], + ) + .await; + // Prospective parachains are notified. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::IntroduceSecondedCandidate( + req, + tx, + ), + ) if + req.candidate_receipt == candidate + && req.candidate_para == para_id + && pvd == req.persisted_validation_data => { + tx.send(true).unwrap(); + } + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::StatementDistribution( + StatementDistributionMessage::Share( + parent_hash, + _signed_statement, + ) + ) if parent_hash == leaf_a_parent => {} + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::CollatorProtocol(CollatorProtocolMessage::Seconded(hash, statement)) => { + assert_eq!(leaf_a_parent, hash); + assert_matches!(statement.payload(), Statement::Seconded(_)); + } + ); + + virtual_overseer + }); +} + +// Test that `seconding_sanity_check` disallows seconding when a candidate is disallowed +// for all leaves. +#[test] +fn seconding_sanity_check_disallowed() { + let test_state = TestState::default(); + test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { + // Candidate is seconded in a parent of the activated `leaf_a`. + const LEAF_A_BLOCK_NUMBER: BlockNumber = 100; + const LEAF_A_ANCESTRY_LEN: BlockNumber = 3; + let para_id = test_state.chain_ids[0]; + + let leaf_b_hash = Hash::from_low_u64_be(128); + // `a` is grandparent of `b`. + let leaf_a_hash = Hash::from_low_u64_be(130); + let leaf_a_parent = get_parent_hash(leaf_a_hash); + let activated = new_leaf(leaf_a_hash, LEAF_A_BLOCK_NUMBER); + let min_relay_parents = vec![(para_id, LEAF_A_BLOCK_NUMBER - LEAF_A_ANCESTRY_LEN)]; + let test_leaf_a = TestLeaf { activated, min_relay_parents }; + + const LEAF_B_BLOCK_NUMBER: BlockNumber = LEAF_A_BLOCK_NUMBER + 2; + const LEAF_B_ANCESTRY_LEN: BlockNumber = 4; + + let activated = new_leaf(leaf_b_hash, LEAF_B_BLOCK_NUMBER); + let min_relay_parents = vec![(para_id, LEAF_B_BLOCK_NUMBER - LEAF_B_ANCESTRY_LEN)]; + let test_leaf_b = TestLeaf { activated, min_relay_parents }; + + activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state).await; + + let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; + let pvd = dummy_pvd(); + let validation_code = ValidationCode(vec![1, 2, 3]); + + let expected_head_data = test_state.head_data.get(¶_id).unwrap(); + + let pov_hash = pov.hash(); + let candidate = TestCandidateBuilder { + para_id, + relay_parent: leaf_a_parent, + pov_hash, + head_data: expected_head_data.clone(), + erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()), + persisted_validation_data_hash: pvd.hash(), + validation_code: validation_code.0.clone(), + } + .build(); + + let second = CandidateBackingMessage::Second( + leaf_a_hash, + candidate.to_plain(), + pvd.clone(), + pov.clone(), + ); + + virtual_overseer.send(FromOrchestra::Communication { msg: second }).await; + + assert_validate_seconded_candidate( + &mut virtual_overseer, + leaf_a_parent, + &candidate, + &pov, + &pvd, + &validation_code, + expected_head_data, + false, + ) + .await; + + // `seconding_sanity_check` + let hypothetical_candidate = HypotheticalCandidate::Complete { + candidate_hash: candidate.hash(), + receipt: Arc::new(candidate.clone()), + persisted_validation_data: pvd.clone(), + }; + let expected_request_a = HypotheticalMembershipRequest { + candidates: vec![hypothetical_candidate.clone()], + fragment_chain_relay_parent: Some(leaf_a_hash), + }; + let expected_response_a = + make_hypothetical_membership_response(hypothetical_candidate, leaf_a_hash); + assert_hypothetical_membership_requests( + &mut virtual_overseer, + vec![(expected_request_a, expected_response_a)], + ) + .await; + // Prospective parachains are notified. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::IntroduceSecondedCandidate( + req, + tx, + ), + ) if + req.candidate_receipt == candidate + && req.candidate_para == para_id + && pvd == req.persisted_validation_data => { + tx.send(true).unwrap(); + } + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::StatementDistribution( + StatementDistributionMessage::Share( + parent_hash, + _signed_statement, + ) + ) if parent_hash == leaf_a_parent => {} + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::CollatorProtocol(CollatorProtocolMessage::Seconded(hash, statement)) => { + assert_eq!(leaf_a_parent, hash); + assert_matches!(statement.payload(), Statement::Seconded(_)); + } + ); + + activate_leaf(&mut virtual_overseer, test_leaf_b, &test_state).await; + let leaf_a_grandparent = get_parent_hash(leaf_a_parent); + let candidate = TestCandidateBuilder { + para_id, + relay_parent: leaf_a_grandparent, + pov_hash, + head_data: expected_head_data.clone(), + erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()), + persisted_validation_data_hash: pvd.hash(), + validation_code: validation_code.0.clone(), + } + .build(); + + let second = CandidateBackingMessage::Second( + leaf_a_hash, + candidate.to_plain(), + pvd.clone(), + pov.clone(), + ); + + virtual_overseer.send(FromOrchestra::Communication { msg: second }).await; + + assert_validate_seconded_candidate( + &mut virtual_overseer, + leaf_a_grandparent, + &candidate, + &pov, + &pvd, + &validation_code, + expected_head_data, + false, + ) + .await; + + // `seconding_sanity_check` + + let hypothetical_candidate = HypotheticalCandidate::Complete { + candidate_hash: candidate.hash(), + receipt: Arc::new(candidate), + persisted_validation_data: pvd, + }; + let expected_request_a = HypotheticalMembershipRequest { + candidates: vec![hypothetical_candidate.clone()], + fragment_chain_relay_parent: Some(leaf_a_hash), + }; + let expected_empty_response = vec![(hypothetical_candidate.clone(), vec![])]; + let expected_request_b = HypotheticalMembershipRequest { + candidates: vec![hypothetical_candidate.clone()], + fragment_chain_relay_parent: Some(leaf_b_hash), + }; + assert_hypothetical_membership_requests( + &mut virtual_overseer, + vec![ + (expected_request_a, expected_empty_response.clone()), + (expected_request_b, expected_empty_response), + ], + ) + .await; + + assert!(virtual_overseer + .recv() + .timeout(std::time::Duration::from_millis(50)) + .await + .is_none()); + + virtual_overseer + }); +} + +// Test that `seconding_sanity_check` allows seconding a candidate when it's allowed on at least one +// leaf. +#[test] +fn seconding_sanity_check_allowed_on_at_least_one_leaf() { + let test_state = TestState::default(); + test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { + // Candidate is seconded in a parent of the activated `leaf_a`. + const LEAF_A_BLOCK_NUMBER: BlockNumber = 100; + const LEAF_A_ANCESTRY_LEN: BlockNumber = 3; + let para_id = test_state.chain_ids[0]; + + // `a` is grandparent of `b`. + let leaf_a_hash = Hash::from_low_u64_be(130); + let leaf_a_parent = get_parent_hash(leaf_a_hash); + let activated = new_leaf(leaf_a_hash, LEAF_A_BLOCK_NUMBER); + let min_relay_parents = vec![(para_id, LEAF_A_BLOCK_NUMBER - LEAF_A_ANCESTRY_LEN)]; + let test_leaf_a = TestLeaf { activated, min_relay_parents }; + + const LEAF_B_BLOCK_NUMBER: BlockNumber = LEAF_A_BLOCK_NUMBER + 2; + const LEAF_B_ANCESTRY_LEN: BlockNumber = 4; + + let leaf_b_hash = Hash::from_low_u64_be(128); + let activated = new_leaf(leaf_b_hash, LEAF_B_BLOCK_NUMBER); + let min_relay_parents = vec![(para_id, LEAF_B_BLOCK_NUMBER - LEAF_B_ANCESTRY_LEN)]; + let test_leaf_b = TestLeaf { activated, min_relay_parents }; + + activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state).await; + activate_leaf(&mut virtual_overseer, test_leaf_b, &test_state).await; + + let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; + let pvd = dummy_pvd(); + let validation_code = ValidationCode(vec![1, 2, 3]); + + let expected_head_data = test_state.head_data.get(¶_id).unwrap(); + + let pov_hash = pov.hash(); + let candidate = TestCandidateBuilder { + para_id, + relay_parent: leaf_a_parent, + pov_hash, + head_data: expected_head_data.clone(), + erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()), + persisted_validation_data_hash: pvd.hash(), + validation_code: validation_code.0.clone(), + } + .build(); + + let second = CandidateBackingMessage::Second( + leaf_a_hash, + candidate.to_plain(), + pvd.clone(), + pov.clone(), + ); + + virtual_overseer.send(FromOrchestra::Communication { msg: second }).await; + + assert_validate_seconded_candidate( + &mut virtual_overseer, + leaf_a_parent, + &candidate, + &pov, + &pvd, + &validation_code, + expected_head_data, + false, + ) + .await; + + // `seconding_sanity_check` + let hypothetical_candidate = HypotheticalCandidate::Complete { + candidate_hash: candidate.hash(), + receipt: Arc::new(candidate.clone()), + persisted_validation_data: pvd.clone(), + }; + let expected_request_a = HypotheticalMembershipRequest { + candidates: vec![hypothetical_candidate.clone()], + fragment_chain_relay_parent: Some(leaf_a_hash), + }; + let expected_response_a = + make_hypothetical_membership_response(hypothetical_candidate.clone(), leaf_a_hash); + let expected_request_b = HypotheticalMembershipRequest { + candidates: vec![hypothetical_candidate.clone()], + fragment_chain_relay_parent: Some(leaf_b_hash), + }; + let expected_response_b = vec![(hypothetical_candidate.clone(), vec![])]; + assert_hypothetical_membership_requests( + &mut virtual_overseer, + vec![ + (expected_request_a, expected_response_a), + (expected_request_b, expected_response_b), + ], + ) + .await; + // Prospective parachains are notified. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::IntroduceSecondedCandidate( + req, + tx, + ), + ) if + req.candidate_receipt == candidate + && req.candidate_para == para_id + && pvd == req.persisted_validation_data => { + tx.send(true).unwrap(); + } + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::StatementDistribution( + StatementDistributionMessage::Share( + parent_hash, + _signed_statement, + ) + ) if parent_hash == leaf_a_parent => {} + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::CollatorProtocol(CollatorProtocolMessage::Seconded(hash, statement)) => { + assert_eq!(leaf_a_parent, hash); + assert_matches!(statement.payload(), Statement::Seconded(_)); + } + ); + + virtual_overseer + }); +} + +// Test that a seconded candidate which is not approved by prospective parachains +// subsystem doesn't change the view. +#[test] +fn prospective_parachains_reject_candidate() { + let test_state = TestState::default(); + test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { + // Candidate is seconded in a parent of the activated `leaf_a`. + const LEAF_A_BLOCK_NUMBER: BlockNumber = 100; + const LEAF_A_ANCESTRY_LEN: BlockNumber = 3; + let para_id = test_state.chain_ids[0]; + + let leaf_a_hash = Hash::from_low_u64_be(130); + let leaf_a_parent = get_parent_hash(leaf_a_hash); + let activated = new_leaf(leaf_a_hash, LEAF_A_BLOCK_NUMBER); + let min_relay_parents = vec![(para_id, LEAF_A_BLOCK_NUMBER - LEAF_A_ANCESTRY_LEN)]; + let test_leaf_a = TestLeaf { activated, min_relay_parents }; + + activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state).await; + + let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; + let pvd = dummy_pvd(); + let validation_code = ValidationCode(vec![1, 2, 3]); + + let expected_head_data = test_state.head_data.get(¶_id).unwrap(); + + let pov_hash = pov.hash(); + let candidate = TestCandidateBuilder { + para_id, + relay_parent: leaf_a_parent, + pov_hash, + head_data: expected_head_data.clone(), + erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()), + persisted_validation_data_hash: pvd.hash(), + validation_code: validation_code.0.clone(), + } + .build(); + + let second = CandidateBackingMessage::Second( + leaf_a_hash, + candidate.to_plain(), + pvd.clone(), + pov.clone(), + ); + + virtual_overseer.send(FromOrchestra::Communication { msg: second }).await; + + assert_validate_seconded_candidate( + &mut virtual_overseer, + leaf_a_parent, + &candidate, + &pov, + &pvd, + &validation_code, + expected_head_data, + false, + ) + .await; + + // `seconding_sanity_check` + let hypothetical_candidate = HypotheticalCandidate::Complete { + candidate_hash: candidate.hash(), + receipt: Arc::new(candidate.clone()), + persisted_validation_data: pvd.clone(), + }; + let expected_request_a = vec![( + HypotheticalMembershipRequest { + candidates: vec![hypothetical_candidate.clone()], + fragment_chain_relay_parent: Some(leaf_a_hash), + }, + make_hypothetical_membership_response(hypothetical_candidate, leaf_a_hash), + )]; + assert_hypothetical_membership_requests(&mut virtual_overseer, expected_request_a.clone()) + .await; + + // Prospective parachains are notified. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::IntroduceSecondedCandidate( + req, + tx, + ), + ) if + req.candidate_receipt == candidate + && req.candidate_para == para_id + && pvd == req.persisted_validation_data => { + // Reject it. + tx.send(false).unwrap(); + } + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::CollatorProtocol(CollatorProtocolMessage::Invalid( + relay_parent, + candidate_receipt, + )) if candidate_receipt.descriptor == candidate.descriptor && + candidate_receipt.commitments_hash == candidate.commitments.hash() && + relay_parent == leaf_a_parent + ); + + // Try seconding the same candidate. + + let second = CandidateBackingMessage::Second( + leaf_a_hash, + candidate.to_plain(), + pvd.clone(), + pov.clone(), + ); + + virtual_overseer.send(FromOrchestra::Communication { msg: second }).await; + + assert_validate_seconded_candidate( + &mut virtual_overseer, + leaf_a_parent, + &candidate, + &pov, + &pvd, + &validation_code, + expected_head_data, + false, + ) + .await; + + // `seconding_sanity_check` + assert_hypothetical_membership_requests(&mut virtual_overseer, expected_request_a).await; + // Prospective parachains are notified. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::IntroduceSecondedCandidate( + req, + tx, + ), + ) if + req.candidate_receipt == candidate + && req.candidate_para == para_id + && pvd == req.persisted_validation_data => { + tx.send(true).unwrap(); + } + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::StatementDistribution( + StatementDistributionMessage::Share( + parent_hash, + _signed_statement, + ) + ) if parent_hash == leaf_a_parent => {} + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::CollatorProtocol(CollatorProtocolMessage::Seconded(hash, statement)) => { + assert_eq!(leaf_a_parent, hash); + assert_matches!(statement.payload(), Statement::Seconded(_)); + } + ); + + virtual_overseer + }); +} + +// Test that a validator can second multiple candidates per single relay parent. +#[test] +fn second_multiple_candidates_per_relay_parent() { + let test_state = TestState::default(); + test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { + // Candidate `a` is seconded in a parent of the activated `leaf`. + const LEAF_BLOCK_NUMBER: BlockNumber = 100; + const LEAF_ANCESTRY_LEN: BlockNumber = 3; + let para_id = test_state.chain_ids[0]; + + let leaf_hash = Hash::from_low_u64_be(130); + let leaf_parent = get_parent_hash(leaf_hash); + let leaf_grandparent = get_parent_hash(leaf_parent); + let activated = new_leaf(leaf_hash, LEAF_BLOCK_NUMBER); + let min_relay_parents = vec![(para_id, LEAF_BLOCK_NUMBER - LEAF_ANCESTRY_LEN)]; + let test_leaf_a = TestLeaf { activated, min_relay_parents }; + + activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state).await; + + let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; + let pvd = dummy_pvd(); + let validation_code = ValidationCode(vec![1, 2, 3]); + + let expected_head_data = test_state.head_data.get(¶_id).unwrap(); + + let pov_hash = pov.hash(); + let candidate_a = TestCandidateBuilder { + para_id, + relay_parent: leaf_parent, + pov_hash, + head_data: expected_head_data.clone(), + erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()), + persisted_validation_data_hash: pvd.hash(), + validation_code: validation_code.0.clone(), + }; + let mut candidate_b = candidate_a.clone(); + candidate_b.relay_parent = leaf_grandparent; + + let candidate_a = candidate_a.build(); + let candidate_b = candidate_b.build(); + + for candidate in &[candidate_a, candidate_b] { + let second = CandidateBackingMessage::Second( + leaf_hash, + candidate.to_plain(), + pvd.clone(), + pov.clone(), + ); + + virtual_overseer.send(FromOrchestra::Communication { msg: second }).await; + + assert_validate_seconded_candidate( + &mut virtual_overseer, + candidate.descriptor.relay_parent(), + &candidate, + &pov, + &pvd, + &validation_code, + expected_head_data, + false, + ) + .await; + + // `seconding_sanity_check` + let hypothetical_candidate = HypotheticalCandidate::Complete { + candidate_hash: candidate.hash(), + receipt: Arc::new(candidate.clone()), + persisted_validation_data: pvd.clone(), + }; + let expected_request_a = vec![( + HypotheticalMembershipRequest { + candidates: vec![hypothetical_candidate.clone()], + fragment_chain_relay_parent: Some(leaf_hash), + }, + make_hypothetical_membership_response(hypothetical_candidate, leaf_hash), + )]; + assert_hypothetical_membership_requests( + &mut virtual_overseer, + expected_request_a.clone(), + ) + .await; + + // Prospective parachains are notified. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::IntroduceSecondedCandidate( + req, + tx, + ), + ) if + &req.candidate_receipt == candidate + && req.candidate_para == para_id + && pvd == req.persisted_validation_data + => { + tx.send(true).unwrap(); + } + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::StatementDistribution( + StatementDistributionMessage::Share( + parent_hash, + _signed_statement, + ) + ) if parent_hash == candidate.descriptor.relay_parent() => {} + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::CollatorProtocol(CollatorProtocolMessage::Seconded(hash, statement)) => { + assert_eq!(candidate.descriptor.relay_parent(), hash); + assert_matches!(statement.payload(), Statement::Seconded(_)); + } + ); + } + + virtual_overseer + }); +} + +// Test that the candidate reaches quorum successfully. +#[test] +fn backing_works() { + let test_state = TestState::default(); + test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { + // Candidate `a` is seconded in a parent of the activated `leaf`. + const LEAF_BLOCK_NUMBER: BlockNumber = 100; + const LEAF_ANCESTRY_LEN: BlockNumber = 3; + let para_id = test_state.chain_ids[0]; + + let leaf_hash = Hash::from_low_u64_be(130); + let leaf_parent = get_parent_hash(leaf_hash); + let activated = new_leaf(leaf_hash, LEAF_BLOCK_NUMBER); + let min_relay_parents = vec![(para_id, LEAF_BLOCK_NUMBER - LEAF_ANCESTRY_LEN)]; + let test_leaf_a = TestLeaf { activated, min_relay_parents }; + + activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state).await; + + let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; + let pvd = dummy_pvd(); + let validation_code = ValidationCode(vec![1, 2, 3]); + + let expected_head_data = test_state.head_data.get(¶_id).unwrap(); + + let pov_hash = pov.hash(); + + let candidate_a = TestCandidateBuilder { + para_id, + relay_parent: leaf_parent, + pov_hash, + head_data: expected_head_data.clone(), + erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()), + validation_code: validation_code.0.clone(), + persisted_validation_data_hash: pvd.hash(), + } + .build(); + + let candidate_a_hash = candidate_a.hash(); + + let public1 = Keystore::sr25519_generate_new( + &*test_state.keystore, + ValidatorId::ID, + Some(&test_state.validators[5].to_seed()), + ) + .expect("Insert key into keystore"); + let public2 = Keystore::sr25519_generate_new( + &*test_state.keystore, + ValidatorId::ID, + Some(&test_state.validators[2].to_seed()), + ) + .expect("Insert key into keystore"); + + // Signing context should have a parent hash candidate is based on. + let signing_context = + SigningContext { parent_hash: leaf_parent, session_index: test_state.session() }; + let signed_a = SignedFullStatementWithPVD::sign( + &test_state.keystore, + StatementWithPVD::Seconded(candidate_a.clone(), pvd.clone()), + &signing_context, + ValidatorIndex(2), + &public2.into(), + ) + .ok() + .flatten() + .expect("should be signed"); + + let signed_b = SignedFullStatementWithPVD::sign( + &test_state.keystore, + StatementWithPVD::Valid(candidate_a_hash), + &signing_context, + ValidatorIndex(5), + &public1.into(), + ) + .ok() + .flatten() + .expect("should be signed"); + + let statement = CandidateBackingMessage::Statement(leaf_parent, signed_a.clone()); + + virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; + + // Prospective parachains are notified about candidate seconded first. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::IntroduceSecondedCandidate( + req, + tx, + ), + ) if + req.candidate_receipt == candidate_a + && req.candidate_para == para_id + && pvd == req.persisted_validation_data => { + tx.send(true).unwrap(); + } + ); + + assert_validate_seconded_candidate( + &mut virtual_overseer, + candidate_a.descriptor.relay_parent(), + &candidate_a, + &pov, + &pvd, + &validation_code, + expected_head_data, + true, + ) + .await; + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::StatementDistribution( + StatementDistributionMessage::Share(hash, _stmt) + ) => { + assert_eq!(leaf_parent, hash); + } + ); + + // Prospective parachains and collator protocol are notified about candidate backed. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::CandidateBacked( + candidate_para_id, candidate_hash + ), + ) if candidate_a_hash == candidate_hash && candidate_para_id == para_id + ); + assert_matches!( + virtual_overseer.recv().await, + AllMessages::StatementDistribution(StatementDistributionMessage::Backed ( + candidate_hash + )) if candidate_a_hash == candidate_hash + ); + + let statement = CandidateBackingMessage::Statement(leaf_parent, signed_b.clone()); + + virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; + + virtual_overseer + }); +} + +// Tests that validators start work on consecutive prospective parachain blocks. +#[test] +fn concurrent_dependent_candidates() { + let test_state = TestState::default(); + test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { + // Candidate `a` is seconded in a grandparent of the activated `leaf`, + // candidate `b` -- in parent. + const LEAF_BLOCK_NUMBER: BlockNumber = 100; + const LEAF_ANCESTRY_LEN: BlockNumber = 3; + let para_id = test_state.chain_ids[0]; + + let leaf_hash = Hash::from_low_u64_be(130); + let leaf_parent = get_parent_hash(leaf_hash); + let leaf_grandparent = get_parent_hash(leaf_parent); + let activated = new_leaf(leaf_hash, LEAF_BLOCK_NUMBER); + let min_relay_parents = vec![(para_id, LEAF_BLOCK_NUMBER - LEAF_ANCESTRY_LEN)]; + let test_leaf_a = TestLeaf { activated, min_relay_parents }; + + activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state).await; + + let head_data = &[ + HeadData(vec![10, 20, 30]), // Before `a`. + HeadData(vec![11, 21, 31]), // After `a`. + HeadData(vec![12, 22]), // After `b`. + ]; + + let pov_a = PoV { block_data: BlockData(vec![42, 43, 44]) }; + let pvd_a = PersistedValidationData { + parent_head: head_data[0].clone(), + relay_parent_number: LEAF_BLOCK_NUMBER - 2, + relay_parent_storage_root: Hash::zero(), + max_pov_size: 1024, + }; + + let pov_b = PoV { block_data: BlockData(vec![22, 14, 100]) }; + let pvd_b = PersistedValidationData { + parent_head: head_data[1].clone(), + relay_parent_number: LEAF_BLOCK_NUMBER - 1, + relay_parent_storage_root: Hash::zero(), + max_pov_size: 1024, + }; + let validation_code = ValidationCode(vec![1, 2, 3]); + + let candidate_a = TestCandidateBuilder { + para_id, + relay_parent: leaf_grandparent, + pov_hash: pov_a.hash(), + head_data: head_data[1].clone(), + erasure_root: make_erasure_root(&test_state, pov_a.clone(), pvd_a.clone()), + persisted_validation_data_hash: pvd_a.hash(), + validation_code: validation_code.0.clone(), + } + .build(); + let candidate_b = TestCandidateBuilder { + para_id, + relay_parent: leaf_parent, + pov_hash: pov_b.hash(), + head_data: head_data[2].clone(), + erasure_root: make_erasure_root(&test_state, pov_b.clone(), pvd_b.clone()), + persisted_validation_data_hash: pvd_b.hash(), + validation_code: validation_code.0.clone(), + } + .build(); + let candidate_a_hash = candidate_a.hash(); + let candidate_b_hash = candidate_b.hash(); + + let public1 = Keystore::sr25519_generate_new( + &*test_state.keystore, + ValidatorId::ID, + Some(&test_state.validators[5].to_seed()), + ) + .expect("Insert key into keystore"); + let public2 = Keystore::sr25519_generate_new( + &*test_state.keystore, + ValidatorId::ID, + Some(&test_state.validators[2].to_seed()), + ) + .expect("Insert key into keystore"); + + // Signing context should have a parent hash candidate is based on. + let signing_context = + SigningContext { parent_hash: leaf_grandparent, session_index: test_state.session() }; + let signed_a = SignedFullStatementWithPVD::sign( + &test_state.keystore, + StatementWithPVD::Seconded(candidate_a.clone(), pvd_a.clone()), + &signing_context, + ValidatorIndex(2), + &public2.into(), + ) + .ok() + .flatten() + .expect("should be signed"); + + let signing_context = + SigningContext { parent_hash: leaf_parent, session_index: test_state.session() }; + let signed_b = SignedFullStatementWithPVD::sign( + &test_state.keystore, + StatementWithPVD::Seconded(candidate_b.clone(), pvd_b.clone()), + &signing_context, + ValidatorIndex(5), + &public1.into(), + ) + .ok() + .flatten() + .expect("should be signed"); + + let statement_a = CandidateBackingMessage::Statement(leaf_grandparent, signed_a.clone()); + let statement_b = CandidateBackingMessage::Statement(leaf_parent, signed_b.clone()); + + virtual_overseer.send(FromOrchestra::Communication { msg: statement_a }).await; + + // At this point the subsystem waits for response, the previous message is received, + // send a second one without blocking. + let _ = virtual_overseer + .tx + .start_send_unpin(FromOrchestra::Communication { msg: statement_b }); + + let mut valid_statements = HashSet::new(); + let mut backed_statements = HashSet::new(); + + loop { + let msg = virtual_overseer + .recv() + .timeout(std::time::Duration::from_secs(1)) + .await + .expect("overseer recv timed out"); + + // Order is not guaranteed since we have 2 statements being handled concurrently. + match msg { + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::IntroduceSecondedCandidate(_, tx), + ) => { + tx.send(true).unwrap(); + }, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _, + RuntimeApiRequest::ValidationCodeByHash(_, tx), + )) => { + tx.send(Ok(Some(validation_code.clone()))).unwrap(); + }, + AllMessages::AvailabilityDistribution( + AvailabilityDistributionMessage::FetchPoV { candidate_hash, tx, .. }, + ) => { + let pov = if candidate_hash == candidate_a_hash { + &pov_a + } else if candidate_hash == candidate_b_hash { + &pov_b + } else { + panic!("unknown candidate hash") + }; + tx.send(pov.clone()).unwrap(); + }, + AllMessages::CandidateValidation( + CandidateValidationMessage::ValidateFromExhaustive { + candidate_receipt, + response_sender, + .. + }, + ) => { + let candidate_hash = candidate_receipt.hash(); + let (head_data, pvd) = if candidate_hash == candidate_a_hash { + (&head_data[1], &pvd_a) + } else if candidate_hash == candidate_b_hash { + (&head_data[2], &pvd_b) + } else { + panic!("unknown candidate hash") + }; + response_sender + .send(Ok(ValidationResult::Valid( + CandidateCommitments { + head_data: head_data.clone(), + horizontal_messages: Default::default(), + upward_messages: Default::default(), + new_validation_code: None, + processed_downward_messages: 0, + hrmp_watermark: 0, + }, + pvd.clone(), + ))) + .unwrap(); + }, + AllMessages::AvailabilityStore(AvailabilityStoreMessage::StoreAvailableData { + tx, + .. + }) => { + tx.send(Ok(())).unwrap(); + }, + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::CandidateBacked(..), + ) => {}, + AllMessages::StatementDistribution(StatementDistributionMessage::Share( + _, + statement, + )) => { + assert_eq!(statement.validator_index(), ValidatorIndex(0)); + let payload = statement.payload(); + assert_matches!( + payload.clone(), + StatementWithPVD::Valid(hash) + if hash == candidate_a_hash || hash == candidate_b_hash => + { + assert!(valid_statements.insert(hash)); + } + ); + }, + AllMessages::StatementDistribution(StatementDistributionMessage::Backed(hash)) => { + // Ensure that `Share` was received first for the candidate. + assert!(valid_statements.contains(&hash)); + backed_statements.insert(hash); + + if backed_statements.len() == 2 { + break + } + }, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _, + RuntimeApiRequest::SessionIndexForChild(tx), + )) => { + tx.send(Ok(1u32.into())).unwrap(); + }, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _, + RuntimeApiRequest::SessionExecutorParams(sess_idx, tx), + )) => { + assert_eq!(sess_idx, 1); + tx.send(Ok(Some(ExecutorParams::default()))).unwrap(); + }, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _parent, + RuntimeApiRequest::ValidatorGroups(tx), + )) => { + tx.send(Ok(test_state.validator_groups.clone())).unwrap(); + }, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _, + RuntimeApiRequest::NodeFeatures(sess_idx, tx), + )) => { + assert_eq!(sess_idx, 1); + tx.send(Ok(NodeFeatures::EMPTY)).unwrap(); + }, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _parent, + RuntimeApiRequest::AvailabilityCores(tx), + )) => { + tx.send(Ok(test_state.availability_cores.clone())).unwrap(); + }, + _ => panic!("unexpected message received from overseer: {:?}", msg), + } + } + + assert!(valid_statements.contains(&candidate_a_hash)); + assert!(valid_statements.contains(&candidate_b_hash)); + assert!(backed_statements.contains(&candidate_a_hash)); + assert!(backed_statements.contains(&candidate_b_hash)); + + virtual_overseer + }); +} + +// Test that multiple candidates from different paras can occupy the same depth +// in a given relay parent. +#[test] +fn seconding_sanity_check_occupy_same_depth() { + let test_state = TestState::default(); + test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { + // Candidate `a` is seconded in a parent of the activated `leaf`. + const LEAF_BLOCK_NUMBER: BlockNumber = 100; + const LEAF_ANCESTRY_LEN: BlockNumber = 3; + + let para_id_a = test_state.chain_ids[0]; + let para_id_b = test_state.chain_ids[1]; + + let leaf_hash = Hash::from_low_u64_be(130); + let leaf_parent = get_parent_hash(leaf_hash); + + let activated = new_leaf(leaf_hash, LEAF_BLOCK_NUMBER); + let min_block_number = LEAF_BLOCK_NUMBER - LEAF_ANCESTRY_LEN; + let min_relay_parents = vec![(para_id_a, min_block_number), (para_id_b, min_block_number)]; + let test_leaf_a = TestLeaf { activated, min_relay_parents }; + + activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state).await; + + let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; + let pvd = dummy_pvd(); + let validation_code = ValidationCode(vec![1, 2, 3]); + + let expected_head_data_a = test_state.head_data.get(¶_id_a).unwrap(); + let expected_head_data_b = test_state.head_data.get(¶_id_b).unwrap(); + + let pov_hash = pov.hash(); + let candidate_a = TestCandidateBuilder { + para_id: para_id_a, + relay_parent: leaf_parent, + pov_hash, + head_data: expected_head_data_a.clone(), + erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()), + persisted_validation_data_hash: pvd.hash(), + validation_code: validation_code.0.clone(), + }; + + let mut candidate_b = candidate_a.clone(); + candidate_b.para_id = para_id_b; + candidate_b.head_data = expected_head_data_b.clone(); + // A rotation happens, test validator is assigned to second para here. + candidate_b.relay_parent = leaf_hash; + + let candidate_a = (candidate_a.build(), expected_head_data_a, para_id_a); + let candidate_b = (candidate_b.build(), expected_head_data_b, para_id_b); + + for candidate in &[candidate_a, candidate_b] { + let (candidate, expected_head_data, para_id) = candidate; + let second = CandidateBackingMessage::Second( + leaf_hash, + candidate.to_plain(), + pvd.clone(), + pov.clone(), + ); + + virtual_overseer.send(FromOrchestra::Communication { msg: second }).await; + + assert_validate_seconded_candidate( + &mut virtual_overseer, + candidate.descriptor.relay_parent(), + &candidate, + &pov, + &pvd, + &validation_code, + expected_head_data, + false, + ) + .await; + + // `seconding_sanity_check` + let hypothetical_candidate = HypotheticalCandidate::Complete { + candidate_hash: candidate.hash(), + receipt: Arc::new(candidate.clone()), + persisted_validation_data: pvd.clone(), + }; + let expected_request_a = vec![( + HypotheticalMembershipRequest { + candidates: vec![hypothetical_candidate.clone()], + fragment_chain_relay_parent: Some(leaf_hash), + }, + // Send the same membership for both candidates. + make_hypothetical_membership_response(hypothetical_candidate, leaf_hash), + )]; + + assert_hypothetical_membership_requests( + &mut virtual_overseer, + expected_request_a.clone(), + ) + .await; + + // Prospective parachains are notified. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::IntroduceSecondedCandidate( + req, + tx, + ), + ) if + &req.candidate_receipt == candidate + && &req.candidate_para == para_id + && pvd == req.persisted_validation_data + => { + tx.send(true).unwrap(); + } + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::StatementDistribution( + StatementDistributionMessage::Share( + parent_hash, + _signed_statement, + ) + ) if parent_hash == candidate.descriptor.relay_parent() => {} + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::CollatorProtocol(CollatorProtocolMessage::Seconded(hash, statement)) => { + assert_eq!(candidate.descriptor.relay_parent(), hash); + assert_matches!(statement.payload(), Statement::Seconded(_)); + } + ); + } + + virtual_overseer + }); +} + +// Test that the subsystem doesn't skip occupied cores assignments. +#[test] +fn occupied_core_assignment() { + let mut test_state = TestState::default(); + test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { + // Candidate is seconded in a parent of the activated `leaf_a`. + const LEAF_A_BLOCK_NUMBER: BlockNumber = 100; + const LEAF_A_ANCESTRY_LEN: BlockNumber = 3; + let para_id = test_state.chain_ids[0]; + let previous_para_id = test_state.chain_ids[1]; + + // Set the core state to occupied. + let mut candidate_descriptor = + polkadot_primitives_test_helpers::dummy_candidate_descriptor(Hash::zero()); + candidate_descriptor.para_id = previous_para_id; + test_state.availability_cores[0] = CoreState::Occupied(OccupiedCore { + group_responsible: Default::default(), + next_up_on_available: Some(ScheduledCore { para_id, collator: None }), + occupied_since: 100_u32, + time_out_at: 200_u32, + next_up_on_time_out: None, + availability: Default::default(), + candidate_descriptor: candidate_descriptor.into(), + candidate_hash: Default::default(), + }); + + let leaf_a_hash = Hash::from_low_u64_be(130); + let leaf_a_parent = get_parent_hash(leaf_a_hash); + let activated = new_leaf(leaf_a_hash, LEAF_A_BLOCK_NUMBER); + let min_relay_parents = vec![(para_id, LEAF_A_BLOCK_NUMBER - LEAF_A_ANCESTRY_LEN)]; + let test_leaf_a = TestLeaf { activated, min_relay_parents }; + + activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state).await; + + let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; + let pvd = dummy_pvd(); + let validation_code = ValidationCode(vec![1, 2, 3]); + + let expected_head_data = test_state.head_data.get(¶_id).unwrap(); + + let pov_hash = pov.hash(); + let candidate = TestCandidateBuilder { + para_id, + relay_parent: leaf_a_parent, + pov_hash, + head_data: expected_head_data.clone(), + erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()), + persisted_validation_data_hash: pvd.hash(), + validation_code: validation_code.0.clone(), + } + .build(); + + let second = CandidateBackingMessage::Second( + leaf_a_hash, + candidate.to_plain(), + pvd.clone(), + pov.clone(), + ); + + virtual_overseer.send(FromOrchestra::Communication { msg: second }).await; + + assert_validate_seconded_candidate( + &mut virtual_overseer, + leaf_a_parent, + &candidate, + &pov, + &pvd, + &validation_code, + expected_head_data, + false, + ) + .await; + + // `seconding_sanity_check` + let hypothetical_candidate = HypotheticalCandidate::Complete { + candidate_hash: candidate.hash(), + receipt: Arc::new(candidate.clone()), + persisted_validation_data: pvd.clone(), + }; + let expected_request = vec![( + HypotheticalMembershipRequest { + candidates: vec![hypothetical_candidate.clone()], + fragment_chain_relay_parent: Some(leaf_a_hash), + }, + make_hypothetical_membership_response(hypothetical_candidate, leaf_a_hash), + )]; + assert_hypothetical_membership_requests(&mut virtual_overseer, expected_request).await; + // Prospective parachains are notified. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::IntroduceSecondedCandidate( + req, + tx, + ), + ) if + req.candidate_receipt == candidate + && req.candidate_para == para_id + && pvd == req.persisted_validation_data + => { + tx.send(true).unwrap(); + } + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::StatementDistribution( + StatementDistributionMessage::Share( + parent_hash, + _signed_statement, + ) + ) if parent_hash == leaf_a_parent => {} + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::CollatorProtocol(CollatorProtocolMessage::Seconded(hash, statement)) => { + assert_eq!(leaf_a_parent, hash); + assert_matches!(statement.payload(), Statement::Seconded(_)); + } + ); + + virtual_overseer + }); +} diff --git a/polkadot/node/core/bitfield-signing/Cargo.toml b/polkadot/node/core/bitfield-signing/Cargo.toml index e75404729dbd..126a18a14166 100644 --- a/polkadot/node/core/bitfield-signing/Cargo.toml +++ b/polkadot/node/core/bitfield-signing/Cargo.toml @@ -5,8 +5,6 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "Bitfield signing subsystem for the Polkadot node" -homepage.workspace = true -repository.workspace = true [lints] workspace = true @@ -14,12 +12,12 @@ workspace = true [dependencies] futures = { workspace = true } gum = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } polkadot-node-subsystem = { workspace = true, default-features = true } polkadot-node-subsystem-util = { workspace = true, default-features = true } -polkadot-primitives = { workspace = true, default-features = true } sp-keystore = { workspace = true, default-features = true } -thiserror = { workspace = true } wasm-timer = { workspace = true } +thiserror = { workspace = true } [dev-dependencies] polkadot-node-subsystem-test-helpers = { workspace = true } diff --git a/polkadot/node/core/candidate-validation/Cargo.toml b/polkadot/node/core/candidate-validation/Cargo.toml index e92976609f9e..87855dbce415 100644 --- a/polkadot/node/core/candidate-validation/Cargo.toml +++ b/polkadot/node/core/candidate-validation/Cargo.toml @@ -5,8 +5,6 @@ version = "7.0.0" authors.workspace = true edition.workspace = true license.workspace = true -homepage.workspace = true -repository.workspace = true [lints] workspace = true @@ -17,28 +15,28 @@ futures = { workspace = true } futures-timer = { workspace = true } gum = { workspace = true, default-features = true } -codec = { features = ["bit-vec", "derive"], workspace = true } -sp-application-crypto = { workspace = true } sp-keystore = { workspace = true } +sp-application-crypto = { workspace = true } +codec = { features = ["bit-vec", "derive"], workspace = true } -polkadot-node-metrics = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } +polkadot-parachain-primitives = { workspace = true, default-features = true } polkadot-node-primitives = { workspace = true, default-features = true } polkadot-node-subsystem = { workspace = true, default-features = true } polkadot-node-subsystem-util = { workspace = true, default-features = true } +polkadot-node-metrics = { workspace = true, default-features = true } polkadot-overseer = { workspace = true, default-features = true } -polkadot-parachain-primitives = { workspace = true, default-features = true } -polkadot-primitives = { workspace = true, default-features = true } [target.'cfg(not(any(target_os = "android", target_os = "unknown")))'.dependencies] polkadot-node-core-pvf = { workspace = true, default-features = true } [dev-dependencies] -assert_matches = { workspace = true } +sp-keyring = { workspace = true, default-features = true } futures = { features = ["thread-pool"], workspace = true } +assert_matches = { workspace = true } polkadot-node-subsystem-test-helpers = { workspace = true } -polkadot-primitives = { workspace = true, features = ["test"] } +sp-maybe-compressed-blob = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } polkadot-primitives-test-helpers = { workspace = true } rstest = { workspace = true } -sp-core = { workspace = true, default-features = true } -sp-keyring = { workspace = true, default-features = true } -sp-maybe-compressed-blob = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, features = ["test"] } diff --git a/polkadot/node/core/chain-api/Cargo.toml b/polkadot/node/core/chain-api/Cargo.toml index 0689a41233c7..a8e911e0c5c9 100644 --- a/polkadot/node/core/chain-api/Cargo.toml +++ b/polkadot/node/core/chain-api/Cargo.toml @@ -5,8 +5,6 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "The Chain API subsystem provides access to chain related utility functions like block number to hash conversions." -homepage.workspace = true -repository.workspace = true [lints] workspace = true @@ -21,11 +19,11 @@ sc-client-api = { workspace = true, default-features = true } sc-consensus-babe = { workspace = true, default-features = true } [dev-dependencies] -codec = { workspace = true, default-features = true } futures = { features = ["thread-pool"], workspace = true } maplit = { workspace = true } +codec = { workspace = true, default-features = true } polkadot-node-primitives = { workspace = true, default-features = true } -polkadot-node-subsystem-test-helpers = { workspace = true } polkadot-primitives = { workspace = true, default-features = true } -sp-blockchain = { workspace = true, default-features = true } +polkadot-node-subsystem-test-helpers = { workspace = true } sp-core = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } diff --git a/polkadot/node/core/chain-selection/Cargo.toml b/polkadot/node/core/chain-selection/Cargo.toml index e425b9f862a5..755d5cadeaaf 100644 --- a/polkadot/node/core/chain-selection/Cargo.toml +++ b/polkadot/node/core/chain-selection/Cargo.toml @@ -5,27 +5,25 @@ version = "7.0.0" authors.workspace = true edition.workspace = true license.workspace = true -homepage.workspace = true -repository.workspace = true [lints] workspace = true [dependencies] -codec = { workspace = true, default-features = true } futures = { workspace = true } futures-timer = { workspace = true } gum = { workspace = true, default-features = true } -kvdb = { workspace = true } +polkadot-primitives = { workspace = true, default-features = true } polkadot-node-primitives = { workspace = true, default-features = true } polkadot-node-subsystem = { workspace = true, default-features = true } polkadot-node-subsystem-util = { workspace = true, default-features = true } -polkadot-primitives = { workspace = true, default-features = true } +kvdb = { workspace = true } thiserror = { workspace = true } +codec = { workspace = true, default-features = true } [dev-dependencies] -assert_matches = { workspace = true } -kvdb-memorydb = { workspace = true } -parking_lot = { workspace = true, default-features = true } polkadot-node-subsystem-test-helpers = { workspace = true } sp-core = { workspace = true, default-features = true } +parking_lot = { workspace = true, default-features = true } +assert_matches = { workspace = true } +kvdb-memorydb = { workspace = true } diff --git a/polkadot/node/core/dispute-coordinator/Cargo.toml b/polkadot/node/core/dispute-coordinator/Cargo.toml index 6eb3020a0432..344b66af1933 100644 --- a/polkadot/node/core/dispute-coordinator/Cargo.toml +++ b/polkadot/node/core/dispute-coordinator/Cargo.toml @@ -5,41 +5,39 @@ description = "The node-side components that participate in disputes" authors.workspace = true edition.workspace = true license.workspace = true -homepage.workspace = true -repository.workspace = true [lints] workspace = true [dependencies] -codec = { workspace = true, default-features = true } -fatality = { workspace = true } futures = { workspace = true } gum = { workspace = true, default-features = true } +codec = { workspace = true, default-features = true } kvdb = { workspace = true } -schnellru = { workspace = true } thiserror = { workspace = true } +schnellru = { workspace = true } +fatality = { workspace = true } +polkadot-primitives = { workspace = true, default-features = true } polkadot-node-primitives = { workspace = true, default-features = true } polkadot-node-subsystem = { workspace = true, default-features = true } polkadot-node-subsystem-util = { workspace = true, default-features = true } -polkadot-primitives = { workspace = true, default-features = true } sc-keystore = { workspace = true, default-features = true } [dev-dependencies] -assert_matches = { workspace = true } -futures-timer = { workspace = true } kvdb-memorydb = { workspace = true } polkadot-node-subsystem-test-helpers = { workspace = true } -polkadot-primitives = { workspace = true, features = ["test"] } -polkadot-primitives-test-helpers = { workspace = true } -sp-application-crypto = { workspace = true, default-features = true } -sp-core = { workspace = true, default-features = true } sp-keyring = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } sp-keystore = { workspace = true, default-features = true } +assert_matches = { workspace = true } +polkadot-primitives-test-helpers = { workspace = true } +futures-timer = { workspace = true } +sp-application-crypto = { workspace = true, default-features = true } sp-tracing = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, features = ["test"] } [features] # If not enabled, the dispute coordinator will do nothing. diff --git a/polkadot/node/core/parachains-inherent/Cargo.toml b/polkadot/node/core/parachains-inherent/Cargo.toml index 264b8da2b44d..1e4953f40d0b 100644 --- a/polkadot/node/core/parachains-inherent/Cargo.toml +++ b/polkadot/node/core/parachains-inherent/Cargo.toml @@ -5,20 +5,18 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "Parachains inherent data provider for Polkadot node" -homepage.workspace = true -repository.workspace = true [lints] workspace = true [dependencies] -async-trait = { workspace = true } futures = { workspace = true } futures-timer = { workspace = true } gum = { workspace = true, default-features = true } +thiserror = { workspace = true } +async-trait = { workspace = true } polkadot-node-subsystem = { workspace = true, default-features = true } polkadot-overseer = { workspace = true, default-features = true } polkadot-primitives = { workspace = true, default-features = true } sp-blockchain = { workspace = true, default-features = true } sp-inherents = { workspace = true, default-features = true } -thiserror = { workspace = true } diff --git a/polkadot/node/core/prospective-parachains/Cargo.toml b/polkadot/node/core/prospective-parachains/Cargo.toml index 0d0ede8d1d9b..5629e4ef7fbe 100644 --- a/polkadot/node/core/prospective-parachains/Cargo.toml +++ b/polkadot/node/core/prospective-parachains/Cargo.toml @@ -5,28 +5,26 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "The Prospective Parachains subsystem. Tracks and handles prospective parachain fragments." -homepage.workspace = true -repository.workspace = true [lints] workspace = true [dependencies] -fatality = { workspace = true } futures = { workspace = true } gum = { workspace = true, default-features = true } thiserror = { workspace = true } +fatality = { workspace = true } +polkadot-primitives = { workspace = true, default-features = true } polkadot-node-subsystem = { workspace = true, default-features = true } polkadot-node-subsystem-util = { workspace = true, default-features = true } -polkadot-primitives = { workspace = true, default-features = true } [dev-dependencies] assert_matches = { workspace = true } polkadot-node-subsystem-test-helpers = { workspace = true } -polkadot-primitives = { workspace = true, features = ["test"] } polkadot-primitives-test-helpers = { workspace = true } +polkadot-primitives = { workspace = true, features = ["test"] } +sp-tracing = { workspace = true } +sp-core = { workspace = true, default-features = true } rand = { workspace = true } rstest = { workspace = true } -sp-core = { workspace = true, default-features = true } -sp-tracing = { workspace = true } diff --git a/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs b/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs index ded0a3ab73b2..265d1498ee96 100644 --- a/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs +++ b/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs @@ -630,7 +630,7 @@ impl BackedChain { ) -> impl Iterator + 'a { let mut found_index = None; for index in 0..self.chain.len() { - let node = &self.chain[index]; + let node = &self.chain[0]; if found_index.is_some() { self.by_parent_head.remove(&node.parent_head_data_hash); diff --git a/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs b/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs index 624dd74132c1..2f8a5525570c 100644 --- a/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs +++ b/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs @@ -1165,9 +1165,8 @@ fn test_populate_and_check_potential() { Err(Error::CandidateAlreadyKnown) ); - // Simulate some best chain reorgs. + // Simulate a best chain reorg by backing a2. { - // Back A2. The reversion should happen right at the root. let mut chain = chain.clone(); chain.candidate_backed(&candidate_a2_hash); assert_eq!(chain.best_chain_vec(), vec![candidate_a2_hash, candidate_b2_hash]); @@ -1186,66 +1185,6 @@ fn test_populate_and_check_potential() { chain.can_add_candidate_as_potential(&candidate_a_entry), Err(Error::ForkChoiceRule(_)) ); - - // Simulate a more complex chain reorg. - // A2 points to B2, which is backed. - // A2 has underneath a subtree A2 -> B2 -> C3 and A2 -> B2 -> C4. B2 and C3 are backed. C4 - // is kept because it has a lower candidate hash than C3. Backing C4 will cause a chain - // reorg. - - // Candidate C3. - let (pvd_c3, candidate_c3) = make_committed_candidate( - para_id, - relay_parent_y_info.hash, - relay_parent_y_info.number, - vec![0xb4].into(), - vec![0xc2].into(), - relay_parent_y_info.number, - ); - let candidate_c3_hash = candidate_c3.hash(); - let candidate_c3_entry = - CandidateEntry::new(candidate_c3_hash, candidate_c3, pvd_c3, CandidateState::Seconded) - .unwrap(); - - // Candidate C4. - let (pvd_c4, candidate_c4) = make_committed_candidate( - para_id, - relay_parent_y_info.hash, - relay_parent_y_info.number, - vec![0xb4].into(), - vec![0xc3].into(), - relay_parent_y_info.number, - ); - let candidate_c4_hash = candidate_c4.hash(); - // C4 should have a lower candidate hash than C3. - assert_eq!(fork_selection_rule(&candidate_c4_hash, &candidate_c3_hash), Ordering::Less); - let candidate_c4_entry = - CandidateEntry::new(candidate_c4_hash, candidate_c4, pvd_c4, CandidateState::Seconded) - .unwrap(); - - let mut storage = storage.clone(); - storage.add_candidate_entry(candidate_c3_entry).unwrap(); - storage.add_candidate_entry(candidate_c4_entry).unwrap(); - let mut chain = populate_chain_from_previous_storage(&scope, &storage); - chain.candidate_backed(&candidate_a2_hash); - chain.candidate_backed(&candidate_c3_hash); - - assert_eq!( - chain.best_chain_vec(), - vec![candidate_a2_hash, candidate_b2_hash, candidate_c3_hash] - ); - - // Backing C4 will cause a reorg. - chain.candidate_backed(&candidate_c4_hash); - assert_eq!( - chain.best_chain_vec(), - vec![candidate_a2_hash, candidate_b2_hash, candidate_c4_hash] - ); - - assert_eq!( - chain.unconnected().map(|c| c.candidate_hash).collect::>(), - [candidate_f_hash].into_iter().collect() - ); } // Candidate F has an invalid hrmp watermark. however, it was not checked beforehand as we don't diff --git a/polkadot/node/core/provisioner/Cargo.toml b/polkadot/node/core/provisioner/Cargo.toml index a3880d5a0f13..64a598b420f7 100644 --- a/polkadot/node/core/provisioner/Cargo.toml +++ b/polkadot/node/core/provisioner/Cargo.toml @@ -5,30 +5,28 @@ description = "Responsible for assembling a relay chain block from a set of avai authors.workspace = true edition.workspace = true license.workspace = true -homepage.workspace = true -repository.workspace = true [lints] workspace = true [dependencies] bitvec = { features = ["alloc"], workspace = true } -fatality = { workspace = true } futures = { workspace = true } -futures-timer = { workspace = true } gum = { workspace = true, default-features = true } +thiserror = { workspace = true } +polkadot-primitives = { workspace = true, default-features = true } polkadot-node-primitives = { workspace = true, default-features = true } polkadot-node-subsystem = { workspace = true, default-features = true } polkadot-node-subsystem-util = { workspace = true, default-features = true } -polkadot-primitives = { workspace = true, default-features = true } +futures-timer = { workspace = true } +fatality = { workspace = true } schnellru = { workspace = true } -thiserror = { workspace = true } [dev-dependencies] -polkadot-node-subsystem-test-helpers = { workspace = true } -polkadot-primitives = { workspace = true, features = ["test"] } -polkadot-primitives-test-helpers = { workspace = true } sp-application-crypto = { workspace = true, default-features = true } sp-keystore = { workspace = true, default-features = true } +polkadot-node-subsystem-test-helpers = { workspace = true } +polkadot-primitives-test-helpers = { workspace = true } +polkadot-primitives = { workspace = true, features = ["test"] } rstest = { workspace = true } diff --git a/polkadot/node/core/pvf-checker/Cargo.toml b/polkadot/node/core/pvf-checker/Cargo.toml index fac5f85b6b56..73ef17a2843a 100644 --- a/polkadot/node/core/pvf-checker/Cargo.toml +++ b/polkadot/node/core/pvf-checker/Cargo.toml @@ -5,31 +5,29 @@ version = "7.0.0" authors.workspace = true edition.workspace = true license.workspace = true -homepage.workspace = true -repository.workspace = true [lints] workspace = true [dependencies] futures = { workspace = true } -gum = { workspace = true, default-features = true } thiserror = { workspace = true } +gum = { workspace = true, default-features = true } polkadot-node-primitives = { workspace = true, default-features = true } polkadot-node-subsystem = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } polkadot-node-subsystem-util = { workspace = true, default-features = true } polkadot-overseer = { workspace = true, default-features = true } -polkadot-primitives = { workspace = true, default-features = true } sp-keystore = { workspace = true, default-features = true } [dev-dependencies] -futures-timer = { workspace = true } +sp-core = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sc-keystore = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } polkadot-node-subsystem-test-helpers = { workspace = true } polkadot-primitives-test-helpers = { workspace = true } -sc-keystore = { workspace = true, default-features = true } sp-application-crypto = { workspace = true, default-features = true } -sp-core = { workspace = true, default-features = true } -sp-keyring = { workspace = true, default-features = true } -sp-runtime = { workspace = true, default-features = true } +futures-timer = { workspace = true } diff --git a/polkadot/node/core/pvf/Cargo.toml b/polkadot/node/core/pvf/Cargo.toml index f47f7b734285..a9f97c308f26 100644 --- a/polkadot/node/core/pvf/Cargo.toml +++ b/polkadot/node/core/pvf/Cargo.toml @@ -5,8 +5,6 @@ version = "7.0.0" authors.workspace = true edition.workspace = true license.workspace = true -homepage.workspace = true -repository.workspace = true [lints] workspace = true @@ -23,28 +21,27 @@ is_executable = { optional = true, workspace = true } pin-project = { workspace = true } rand = { workspace = true, default-features = true } slotmap = { workspace = true } -strum = { features = ["derive"], workspace = true, default-features = true } tempfile = { workspace = true } thiserror = { workspace = true } tokio = { features = ["fs", "process"], workspace = true, default-features = true } +strum = { features = ["derive"], workspace = true, default-features = true } codec = { features = [ "derive", ], workspace = true } +polkadot-parachain-primitives = { workspace = true, default-features = true } polkadot-core-primitives = { workspace = true, default-features = true } polkadot-node-core-pvf-common = { workspace = true, default-features = true } polkadot-node-metrics = { workspace = true, default-features = true } polkadot-node-primitives = { workspace = true, default-features = true } polkadot-node-subsystem = { workspace = true, default-features = true } -polkadot-parachain-primitives = { workspace = true, default-features = true } polkadot-primitives = { workspace = true, default-features = true } -polkadot-node-core-pvf-execute-worker = { optional = true, workspace = true, default-features = true } -polkadot-node-core-pvf-prepare-worker = { optional = true, workspace = true, default-features = true } -sc-tracing = { workspace = true } sp-core = { workspace = true, default-features = true } sp-maybe-compressed-blob = { optional = true, workspace = true, default-features = true } +polkadot-node-core-pvf-prepare-worker = { optional = true, workspace = true, default-features = true } +polkadot-node-core-pvf-execute-worker = { optional = true, workspace = true, default-features = true } [dev-dependencies] assert_matches = { workspace = true } diff --git a/polkadot/node/core/pvf/common/Cargo.toml b/polkadot/node/core/pvf/common/Cargo.toml index d058d582fc26..903c8dd1af29 100644 --- a/polkadot/node/core/pvf/common/Cargo.toml +++ b/polkadot/node/core/pvf/common/Cargo.toml @@ -5,8 +5,6 @@ version = "7.0.0" authors.workspace = true edition.workspace = true license.workspace = true -homepage.workspace = true -repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/core/pvf/execute-worker/Cargo.toml b/polkadot/node/core/pvf/execute-worker/Cargo.toml index 4df425dfd199..6ad340d25612 100644 --- a/polkadot/node/core/pvf/execute-worker/Cargo.toml +++ b/polkadot/node/core/pvf/execute-worker/Cargo.toml @@ -5,18 +5,16 @@ version = "7.0.0" authors.workspace = true edition.workspace = true license.workspace = true -homepage.workspace = true -repository.workspace = true [lints] workspace = true [dependencies] -cfg-if = { workspace = true } cpu-time = { workspace = true } gum = { workspace = true, default-features = true } -libc = { workspace = true } +cfg-if = { workspace = true } nix = { features = ["process", "resource", "sched"], workspace = true } +libc = { workspace = true } codec = { features = ["derive"], workspace = true } diff --git a/polkadot/node/core/pvf/prepare-worker/Cargo.toml b/polkadot/node/core/pvf/prepare-worker/Cargo.toml index aa551c196c37..56235bd82192 100644 --- a/polkadot/node/core/pvf/prepare-worker/Cargo.toml +++ b/polkadot/node/core/pvf/prepare-worker/Cargo.toml @@ -5,8 +5,6 @@ version = "7.0.0" authors.workspace = true edition.workspace = true license.workspace = true -homepage.workspace = true -repository.workspace = true [lints] workspace = true @@ -16,11 +14,11 @@ blake3 = { workspace = true } cfg-if = { workspace = true } gum = { workspace = true, default-features = true } libc = { workspace = true } -nix = { features = ["process", "resource", "sched"], workspace = true } rayon = { workspace = true } +tracking-allocator = { workspace = true, default-features = true } tikv-jemalloc-ctl = { optional = true, workspace = true } tikv-jemallocator = { optional = true, workspace = true } -tracking-allocator = { workspace = true, default-features = true } +nix = { features = ["process", "resource", "sched"], workspace = true } codec = { features = ["derive"], workspace = true } diff --git a/polkadot/node/core/pvf/src/execute/queue.rs b/polkadot/node/core/pvf/src/execute/queue.rs index 69355b8fd55d..6d27ab0261d9 100644 --- a/polkadot/node/core/pvf/src/execute/queue.rs +++ b/polkadot/node/core/pvf/src/execute/queue.rs @@ -305,6 +305,8 @@ impl Queue { for hash in &update.deactivated { let _ = self.active_leaves.remove(&hash); } + + gum::debug!(target: LOG_TARGET, size = ?self.active_leaves.len(), "Active leaves pruned"); } fn insert_active_leaf(&mut self, update: ActiveLeavesUpdate, ancestors: Vec) { diff --git a/polkadot/node/core/pvf/src/worker_interface.rs b/polkadot/node/core/pvf/src/worker_interface.rs index f279fbb53544..e63778d4692f 100644 --- a/polkadot/node/core/pvf/src/worker_interface.rs +++ b/polkadot/node/core/pvf/src/worker_interface.rs @@ -237,8 +237,10 @@ impl WorkerHandle { // Clear all env vars from the spawned process. let mut command = process::Command::new(program.as_ref()); command.env_clear(); - - command.env("RUST_LOG", sc_tracing::logging::get_directives().join(",")); + // Add back any env vars we want to keep. + if let Ok(value) = std::env::var("RUST_LOG") { + command.env("RUST_LOG", value); + } let mut child = command .args(extra_args) diff --git a/polkadot/node/core/runtime-api/Cargo.toml b/polkadot/node/core/runtime-api/Cargo.toml index 65c92dc5c070..834e4b300b9e 100644 --- a/polkadot/node/core/runtime-api/Cargo.toml +++ b/polkadot/node/core/runtime-api/Cargo.toml @@ -5,8 +5,6 @@ description = "Wrapper around the parachain-related runtime APIs" authors.workspace = true edition.workspace = true license.workspace = true -homepage.workspace = true -repository.workspace = true [lints] workspace = true @@ -18,17 +16,17 @@ schnellru = { workspace = true } sp-consensus-babe = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } polkadot-node-metrics = { workspace = true, default-features = true } polkadot-node-subsystem = { workspace = true, default-features = true } polkadot-node-subsystem-types = { workspace = true, default-features = true } -polkadot-primitives = { workspace = true, default-features = true } [dev-dependencies] +sp-api = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } async-trait = { workspace = true } futures = { features = ["thread-pool"], workspace = true } -polkadot-node-primitives = { workspace = true, default-features = true } polkadot-node-subsystem-test-helpers = { workspace = true } +polkadot-node-primitives = { workspace = true, default-features = true } polkadot-primitives-test-helpers = { workspace = true } -sp-api = { workspace = true, default-features = true } -sp-core = { workspace = true, default-features = true } -sp-keyring = { workspace = true, default-features = true } diff --git a/polkadot/node/gum/Cargo.toml b/polkadot/node/gum/Cargo.toml index f4c22dd7595e..9b2df435a06a 100644 --- a/polkadot/node/gum/Cargo.toml +++ b/polkadot/node/gum/Cargo.toml @@ -5,14 +5,12 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "Stick logs together with the TraceID as provided by tempo" -homepage.workspace = true -repository.workspace = true [lints] workspace = true [dependencies] coarsetime = { workspace = true } +tracing = { workspace = true, default-features = true } gum-proc-macro = { workspace = true, default-features = true } polkadot-primitives = { features = ["std"], workspace = true, default-features = true } -tracing = { workspace = true, default-features = true } diff --git a/polkadot/node/gum/proc-macro/Cargo.toml b/polkadot/node/gum/proc-macro/Cargo.toml index 0b69d8b67cf1..da6364977cae 100644 --- a/polkadot/node/gum/proc-macro/Cargo.toml +++ b/polkadot/node/gum/proc-macro/Cargo.toml @@ -5,8 +5,6 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "Generate an overseer including builder pattern and message wrapper from a single annotated struct definition." -homepage.workspace = true -repository.workspace = true [lints] workspace = true @@ -18,11 +16,11 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -expander = { workspace = true } -proc-macro-crate = { workspace = true } -proc-macro2 = { workspace = true } -quote = { workspace = true } syn = { features = ["extra-traits", "full"], workspace = true } +quote = { workspace = true } +proc-macro2 = { workspace = true } +proc-macro-crate = { workspace = true } +expander = { workspace = true } [dev-dependencies] assert_matches = { workspace = true } diff --git a/polkadot/node/malus/Cargo.toml b/polkadot/node/malus/Cargo.toml index 84a58f382e20..49434606a61c 100644 --- a/polkadot/node/malus/Cargo.toml +++ b/polkadot/node/malus/Cargo.toml @@ -29,27 +29,27 @@ path = "../../src/bin/prepare-worker.rs" doc = false [dependencies] +polkadot-cli = { features = ["malus", "rococo-native", "westend-native"], workspace = true, default-features = true } +polkadot-node-subsystem = { workspace = true, default-features = true } +polkadot-node-subsystem-util = { workspace = true, default-features = true } +polkadot-node-subsystem-types = { workspace = true, default-features = true } +polkadot-node-core-dispute-coordinator = { workspace = true, default-features = true } +polkadot-node-core-candidate-validation = { workspace = true, default-features = true } +polkadot-node-core-backing = { workspace = true, default-features = true } +polkadot-node-primitives = { workspace = true, default-features = true } +polkadot-node-network-protocol = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } +color-eyre = { workspace = true } assert_matches = { workspace = true } async-trait = { workspace = true } +sp-keystore = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } clap = { features = ["derive"], workspace = true } -color-eyre = { workspace = true } futures = { workspace = true } futures-timer = { workspace = true } gum = { workspace = true, default-features = true } -polkadot-cli = { features = ["malus", "rococo-native", "westend-native"], workspace = true, default-features = true } polkadot-erasure-coding = { workspace = true, default-features = true } -polkadot-node-core-backing = { workspace = true, default-features = true } -polkadot-node-core-candidate-validation = { workspace = true, default-features = true } -polkadot-node-core-dispute-coordinator = { workspace = true, default-features = true } -polkadot-node-network-protocol = { workspace = true, default-features = true } -polkadot-node-primitives = { workspace = true, default-features = true } -polkadot-node-subsystem = { workspace = true, default-features = true } -polkadot-node-subsystem-types = { workspace = true, default-features = true } -polkadot-node-subsystem-util = { workspace = true, default-features = true } -polkadot-primitives = { workspace = true, default-features = true } rand = { workspace = true, default-features = true } -sp-core = { workspace = true, default-features = true } -sp-keystore = { workspace = true, default-features = true } # Required for worker binaries to build. polkadot-node-core-pvf-common = { workspace = true, default-features = true } @@ -57,9 +57,9 @@ polkadot-node-core-pvf-execute-worker = { workspace = true, default-features = t polkadot-node-core-pvf-prepare-worker = { workspace = true, default-features = true } [dev-dependencies] -futures = { features = ["thread-pool"], workspace = true } polkadot-node-subsystem-test-helpers = { workspace = true } sp-core = { workspace = true, default-features = true } +futures = { features = ["thread-pool"], workspace = true } [build-dependencies] substrate-build-script-utils = { workspace = true, default-features = true } diff --git a/polkadot/node/metrics/Cargo.toml b/polkadot/node/metrics/Cargo.toml index 454337cb63f8..41b08b66e9b4 100644 --- a/polkadot/node/metrics/Cargo.toml +++ b/polkadot/node/metrics/Cargo.toml @@ -5,8 +5,6 @@ version = "7.0.0" authors.workspace = true edition.workspace = true license.workspace = true -homepage.workspace = true -repository.workspace = true [lints] workspace = true @@ -18,28 +16,28 @@ gum = { workspace = true, default-features = true } metered = { features = ["futures_channel"], workspace = true } # Both `sc-service` and `sc-cli` are required by runtime metrics `logger_hook()`. -sc-cli = { workspace = true, default-features = true } sc-service = { workspace = true, default-features = true } +sc-cli = { workspace = true, default-features = true } -bs58 = { features = ["alloc"], workspace = true, default-features = true } -codec = { workspace = true, default-features = true } -log = { workspace = true, default-features = true } -polkadot-primitives = { workspace = true, default-features = true } prometheus-endpoint = { workspace = true, default-features = true } sc-tracing = { workspace = true, default-features = true } +codec = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } +bs58 = { features = ["alloc"], workspace = true, default-features = true } +log = { workspace = true, default-features = true } [dev-dependencies] assert_cmd = { workspace = true } -http-body-util = { workspace = true } -hyper = { workspace = true } +tempfile = { workspace = true } hyper-util = { features = ["client-legacy", "tokio"], workspace = true } +hyper = { workspace = true } +http-body-util = { workspace = true } +tokio = { workspace = true, default-features = true } polkadot-test-service = { features = ["runtime-metrics"], workspace = true } -prometheus-parse = { workspace = true } +substrate-test-utils = { workspace = true } sc-service = { workspace = true, default-features = true } sp-keyring = { workspace = true, default-features = true } -substrate-test-utils = { workspace = true } -tempfile = { workspace = true } -tokio = { workspace = true, default-features = true } +prometheus-parse = { workspace = true } [features] default = [] diff --git a/polkadot/node/metrics/src/tests.rs b/polkadot/node/metrics/src/tests.rs index 43dce0ec2ffe..4760138058eb 100644 --- a/polkadot/node/metrics/src/tests.rs +++ b/polkadot/node/metrics/src/tests.rs @@ -21,7 +21,7 @@ use hyper::Uri; use hyper_util::{client::legacy::Client, rt::TokioExecutor}; use polkadot_primitives::metric_definitions::PARACHAIN_INHERENT_DATA_BITFIELDS_PROCESSED; use polkadot_test_service::{node_config, run_validator_node, test_prometheus_config}; -use sp_keyring::Sr25519Keyring::*; +use sp_keyring::AccountKeyring::*; use std::collections::HashMap; const DEFAULT_PROMETHEUS_PORT: u16 = 9616; diff --git a/polkadot/node/network/approval-distribution/Cargo.toml b/polkadot/node/network/approval-distribution/Cargo.toml index d9d3fd8635a6..8d674a733470 100644 --- a/polkadot/node/network/approval-distribution/Cargo.toml +++ b/polkadot/node/network/approval-distribution/Cargo.toml @@ -5,14 +5,11 @@ description = "Polkadot Approval Distribution subsystem for the distribution of authors.workspace = true edition.workspace = true license.workspace = true -homepage.workspace = true -repository.workspace = true [lints] workspace = true [dependencies] -itertools = { workspace = true } polkadot-node-metrics = { workspace = true, default-features = true } polkadot-node-network-protocol = { workspace = true, default-features = true } polkadot-node-primitives = { workspace = true, default-features = true } @@ -20,11 +17,12 @@ polkadot-node-subsystem = { workspace = true, default-features = true } polkadot-node-subsystem-util = { workspace = true, default-features = true } polkadot-primitives = { workspace = true, default-features = true } rand = { workspace = true, default-features = true } +itertools = { workspace = true } -bitvec = { features = ["alloc"], workspace = true } futures = { workspace = true } futures-timer = { workspace = true } gum = { workspace = true, default-features = true } +bitvec = { features = ["alloc"], workspace = true } [dev-dependencies] sc-keystore = { workspace = true } @@ -38,7 +36,7 @@ polkadot-primitives-test-helpers = { workspace = true } assert_matches = { workspace = true } schnorrkel = { workspace = true } # rand_core should match schnorrkel -log = { workspace = true, default-features = true } -rand_chacha = { workspace = true, default-features = true } rand_core = { workspace = true } +rand_chacha = { workspace = true, default-features = true } sp-tracing = { workspace = true } +log = { workspace = true, default-features = true } diff --git a/polkadot/node/network/approval-distribution/src/lib.rs b/polkadot/node/network/approval-distribution/src/lib.rs index cefb1d744992..876cc59b9c28 100644 --- a/polkadot/node/network/approval-distribution/src/lib.rs +++ b/polkadot/node/network/approval-distribution/src/lib.rs @@ -163,6 +163,8 @@ enum ApprovalEntryError { InvalidCandidateIndex, DuplicateApproval, UnknownAssignment, + #[allow(dead_code)] + AssignmentsFollowedDifferentPaths(RequiredRouting, RequiredRouting), } impl ApprovalEntry { @@ -316,7 +318,7 @@ impl Default for AggressionConfig { fn default() -> Self { AggressionConfig { l1_threshold: Some(16), - l2_threshold: Some(64), + l2_threshold: Some(28), resend_unfinalized_period: Some(8), } } @@ -512,8 +514,6 @@ struct BlockEntry { vrf_story: RelayVRFStory, /// The block slot. slot: Slot, - /// Backing off from re-sending messages to peers. - last_resent_at_block_number: Option, } impl BlockEntry { @@ -568,7 +568,7 @@ impl BlockEntry { &mut self, approval: IndirectSignedApprovalVoteV2, ) -> Result<(RequiredRouting, HashSet), ApprovalEntryError> { - let mut required_routing: Option = None; + let mut required_routing = None; let mut peers_randomly_routed_to = HashSet::new(); if self.candidates.len() < approval.candidate_indices.len() as usize { @@ -595,11 +595,16 @@ impl BlockEntry { peers_randomly_routed_to .extend(approval_entry.routing_info().peers_randomly_routed.iter()); - if let Some(current_required_routing) = required_routing { - required_routing = Some( - current_required_routing - .combine(approval_entry.routing_info().required_routing), - ); + if let Some(required_routing) = required_routing { + if required_routing != approval_entry.routing_info().required_routing { + // This shouldn't happen since the required routing is computed based on the + // validator_index, so two assignments from the same validators will have + // the same required routing. + return Err(ApprovalEntryError::AssignmentsFollowedDifferentPaths( + required_routing, + approval_entry.routing_info().required_routing, + )) + } } else { required_routing = Some(approval_entry.routing_info().required_routing) } @@ -880,7 +885,6 @@ impl State { candidates_metadata: meta.candidates, vrf_story: meta.vrf_story, slot: meta.slot, - last_resent_at_block_number: None, }); self.topologies.inc_session_refs(meta.session); @@ -1320,33 +1324,6 @@ impl State { self.enable_aggression(network_sender, Resend::No, metrics).await; } - // When finality is lagging as a last resort nodes start sending the messages they have - // multiples times. This means it is safe to accept duplicate messages without punishing the - // peer and reduce the reputation and can end up banning the Peer, which in turn will create - // more no-shows. - fn accept_duplicates_from_validators( - blocks_by_number: &BTreeMap>, - topologies: &SessionGridTopologies, - aggression_config: &AggressionConfig, - entry: &BlockEntry, - peer: PeerId, - ) -> bool { - let topology = topologies.get_topology(entry.session); - let min_age = blocks_by_number.iter().next().map(|(num, _)| num); - let max_age = blocks_by_number.iter().rev().next().map(|(num, _)| num); - - // Return if we don't have at least 1 block. - let (min_age, max_age) = match (min_age, max_age) { - (Some(min), Some(max)) => (*min, *max), - _ => return false, - }; - - let age = max_age.saturating_sub(min_age); - - aggression_config.should_trigger_aggression(age) && - topology.map(|topology| topology.is_validator(&peer)).unwrap_or(false) - } - async fn import_and_circulate_assignment( &mut self, approval_voting_sender: &mut A, @@ -1411,29 +1388,20 @@ impl State { if peer_knowledge.contains(&message_subject, message_kind) { // wasn't included before if !peer_knowledge.received.insert(message_subject.clone(), message_kind) { - if !Self::accept_duplicates_from_validators( - &self.blocks_by_number, - &self.topologies, - &self.aggression_config, - entry, - peer_id, - ) { - gum::debug!( - target: LOG_TARGET, - ?peer_id, - ?message_subject, - "Duplicate assignment", - ); - - modify_reputation( - &mut self.reputation, - network_sender, - peer_id, - COST_DUPLICATE_MESSAGE, - ) - .await; - } + gum::debug!( + target: LOG_TARGET, + ?peer_id, + ?message_subject, + "Duplicate assignment", + ); + modify_reputation( + &mut self.reputation, + network_sender, + peer_id, + COST_DUPLICATE_MESSAGE, + ) + .await; metrics.on_assignment_duplicate(); } else { gum::trace!( @@ -1749,9 +1717,6 @@ impl State { assignments_knowledge_key: &Vec<(MessageSubject, MessageKind)>, approval_knowledge_key: &(MessageSubject, MessageKind), entry: &mut BlockEntry, - blocks_by_number: &BTreeMap>, - topologies: &SessionGridTopologies, - aggression_config: &AggressionConfig, reputation: &mut ReputationAggregator, peer_id: PeerId, metrics: &Metrics, @@ -1780,27 +1745,20 @@ impl State { .received .insert(approval_knowledge_key.0.clone(), approval_knowledge_key.1) { - if !Self::accept_duplicates_from_validators( - blocks_by_number, - topologies, - aggression_config, - entry, + gum::trace!( + target: LOG_TARGET, + ?peer_id, + ?approval_knowledge_key, + "Duplicate approval", + ); + + modify_reputation( + reputation, + network_sender, peer_id, - ) { - gum::trace!( - target: LOG_TARGET, - ?peer_id, - ?approval_knowledge_key, - "Duplicate approval", - ); - modify_reputation( - reputation, - network_sender, - peer_id, - COST_DUPLICATE_MESSAGE, - ) - .await; - } + COST_DUPLICATE_MESSAGE, + ) + .await; metrics.on_approval_duplicate(); } return false @@ -1892,9 +1850,6 @@ impl State { &assignments_knowledge_keys, &approval_knwowledge_key, entry, - &self.blocks_by_number, - &self.topologies, - &self.aggression_config, &mut self.reputation, peer_id, metrics, @@ -2305,43 +2260,18 @@ impl State { &self.topologies, |block_entry| { let block_age = max_age - block_entry.number; - // We want to resend only for blocks of min_age, there is no point in - // resending for blocks newer than that, because we are just going to create load - // and not gain anything. - let diff_from_min_age = block_entry.number - min_age; - - // We want to back-off on resending for blocks that have been resent recently, to - // give time for nodes to process all the extra messages, if we still have not - // finalized we are going to resend again after unfinalized_period * 2 since the - // last resend. - let blocks_since_last_sent = block_entry - .last_resent_at_block_number - .map(|last_resent_at_block_number| max_age - last_resent_at_block_number); - - let can_resend_at_this_age = blocks_since_last_sent - .zip(config.resend_unfinalized_period) - .map(|(blocks_since_last_sent, unfinalized_period)| { - blocks_since_last_sent >= unfinalized_period * 2 - }) - .unwrap_or(true); if resend == Resend::Yes && - config.resend_unfinalized_period.as_ref().map_or(false, |p| { - block_age > 0 && - block_age % p == 0 && diff_from_min_age == 0 && - can_resend_at_this_age - }) { + config + .resend_unfinalized_period + .as_ref() + .map_or(false, |p| block_age > 0 && block_age % p == 0) + { // Retry sending to all peers. for (_, knowledge) in block_entry.known_by.iter_mut() { knowledge.sent = Knowledge::default(); } - block_entry.last_resent_at_block_number = Some(max_age); - gum::debug!( - target: LOG_TARGET, - block_number = ?block_entry.number, - ?max_age, - "Aggression enabled with resend for block", - ); + true } else { false diff --git a/polkadot/node/network/approval-distribution/src/tests.rs b/polkadot/node/network/approval-distribution/src/tests.rs index 323b2cb08fec..063e71f2f528 100644 --- a/polkadot/node/network/approval-distribution/src/tests.rs +++ b/polkadot/node/network/approval-distribution/src/tests.rs @@ -1030,141 +1030,6 @@ fn peer_sending_us_the_same_we_just_sent_them_is_ok() { ); } -#[test] -fn peer_sending_us_duplicates_while_aggression_enabled_is_ok() { - let parent_hash = Hash::repeat_byte(0xFF); - let hash = Hash::repeat_byte(0xAA); - - let peers = make_peers_and_authority_ids(8); - let peer_a = peers.first().unwrap().0; - - let _ = test_harness( - Arc::new(MockAssignmentCriteria { tranche: Ok(0) }), - Arc::new(SystemClock {}), - state_without_reputation_delay(), - |mut virtual_overseer| async move { - let overseer = &mut virtual_overseer; - let peer = &peer_a; - setup_peer_with_view(overseer, peer, view![], ValidationVersion::V3).await; - - let peers_with_optional_peer_id = peers - .iter() - .map(|(peer_id, authority)| (Some(*peer_id), authority.clone())) - .collect_vec(); - // Setup a topology where peer_a is neighbor to current node. - setup_gossip_topology( - overseer, - make_gossip_topology(1, &peers_with_optional_peer_id, &[0], &[2], 1), - ) - .await; - - // new block `hash` with 1 candidates - let meta = BlockApprovalMeta { - hash, - parent_hash, - number: 1, - candidates: vec![Default::default(); 1], - slot: 1.into(), - session: 1, - vrf_story: RelayVRFStory(Default::default()), - }; - let msg = ApprovalDistributionMessage::NewBlocks(vec![meta]); - overseer_send(overseer, msg).await; - - // import an assignment related to `hash` locally - let validator_index = ValidatorIndex(0); - let candidate_indices: CandidateBitfield = - vec![0 as CandidateIndex].try_into().unwrap(); - let candidate_bitfields = vec![CoreIndex(0)].try_into().unwrap(); - let cert = fake_assignment_cert_v2(hash, validator_index, candidate_bitfields); - overseer_send( - overseer, - ApprovalDistributionMessage::DistributeAssignment( - cert.clone().into(), - candidate_indices.clone(), - ), - ) - .await; - - // update peer view to include the hash - overseer_send( - overseer, - ApprovalDistributionMessage::NetworkBridgeUpdate( - NetworkBridgeEvent::PeerViewChange(*peer, view![hash]), - ), - ) - .await; - - // we should send them the assignment - assert_matches!( - overseer_recv(overseer).await, - AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage( - peers, - Versioned::V3(protocol_v3::ValidationProtocol::ApprovalDistribution( - protocol_v3::ApprovalDistributionMessage::Assignments(assignments) - )) - )) => { - assert_eq!(peers.len(), 1); - assert_eq!(assignments.len(), 1); - } - ); - - // but if someone else is sending it the same assignment - // the peer could send us it as well - let assignments = vec![(cert, candidate_indices)]; - let msg = protocol_v3::ApprovalDistributionMessage::Assignments(assignments); - send_message_from_peer_v3(overseer, peer, msg.clone()).await; - - assert!( - overseer.recv().timeout(TIMEOUT).await.is_none(), - "we should not punish the peer" - ); - - // send the assignments again - send_message_from_peer_v3(overseer, peer, msg.clone()).await; - - // now we should - expect_reputation_change(overseer, peer, COST_DUPLICATE_MESSAGE).await; - - // Peers will be continously punished for sending duplicates until approval-distribution - // aggression kicks, at which point they aren't anymore. - let mut parent_hash = hash; - for level in 0..16 { - // As long as the lag is bellow l1 aggression, punish peers for duplicates. - send_message_from_peer_v3(overseer, peer, msg.clone()).await; - expect_reputation_change(overseer, peer, COST_DUPLICATE_MESSAGE).await; - - let number = 1 + level + 1; // first block had number 1 - let hash = BlakeTwo256::hash_of(&(parent_hash, number)); - let meta = BlockApprovalMeta { - hash, - parent_hash, - number, - candidates: vec![], - slot: (level as u64).into(), - session: 1, - vrf_story: RelayVRFStory(Default::default()), - }; - - let msg = ApprovalDistributionMessage::ApprovalCheckingLagUpdate(level + 1); - overseer_send(overseer, msg).await; - - let msg = ApprovalDistributionMessage::NewBlocks(vec![meta]); - overseer_send(overseer, msg).await; - - parent_hash = hash; - } - - // send the assignments again, we should not punish the peer because aggression is - // enabled. - send_message_from_peer_v3(overseer, peer, msg).await; - - assert!(overseer.recv().timeout(TIMEOUT).await.is_none(), "no message should be sent"); - virtual_overseer - }, - ); -} - #[test] fn import_approval_happy_path_v1_v2_peers() { let peers = make_peers_and_authority_ids(15); @@ -4027,7 +3892,7 @@ fn resends_messages_periodically() { // Add blocks until resend is done. { let mut parent_hash = hash; - for level in 0..4 { + for level in 0..2 { number = number + 1; let hash = BlakeTwo256::hash_of(&(parent_hash, number)); let meta = BlockApprovalMeta { diff --git a/polkadot/node/network/availability-distribution/Cargo.toml b/polkadot/node/network/availability-distribution/Cargo.toml index 7de8cb191599..8c5574f244e4 100644 --- a/polkadot/node/network/availability-distribution/Cargo.toml +++ b/polkadot/node/network/availability-distribution/Cargo.toml @@ -5,42 +5,40 @@ version = "7.0.0" authors.workspace = true edition.workspace = true license.workspace = true -homepage.workspace = true -repository.workspace = true [lints] workspace = true [dependencies] -codec = { features = ["std"], workspace = true, default-features = true } -derive_more = { workspace = true, default-features = true } -fatality = { workspace = true } futures = { workspace = true } gum = { workspace = true, default-features = true } +codec = { features = ["std"], workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } polkadot-erasure-coding = { workspace = true, default-features = true } polkadot-node-network-protocol = { workspace = true, default-features = true } -polkadot-node-primitives = { workspace = true, default-features = true } polkadot-node-subsystem = { workspace = true, default-features = true } polkadot-node-subsystem-util = { workspace = true, default-features = true } -polkadot-primitives = { workspace = true, default-features = true } -rand = { workspace = true, default-features = true } +polkadot-node-primitives = { workspace = true, default-features = true } sc-network = { workspace = true, default-features = true } -schnellru = { workspace = true } sp-core = { features = ["std"], workspace = true, default-features = true } sp-keystore = { workspace = true, default-features = true } thiserror = { workspace = true } +rand = { workspace = true, default-features = true } +derive_more = { workspace = true, default-features = true } +schnellru = { workspace = true } +fatality = { workspace = true } [dev-dependencies] -assert_matches = { workspace = true } -futures-timer = { workspace = true } polkadot-node-subsystem-test-helpers = { workspace = true } -polkadot-primitives-test-helpers = { workspace = true } -polkadot-subsystem-bench = { workspace = true } -rstest = { workspace = true } -sc-network = { workspace = true, default-features = true } sp-core = { features = ["std"], workspace = true, default-features = true } sp-keyring = { workspace = true, default-features = true } sp-tracing = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } +futures-timer = { workspace = true } +assert_matches = { workspace = true } +polkadot-primitives-test-helpers = { workspace = true } +rstest = { workspace = true } +polkadot-subsystem-bench = { workspace = true } [[bench]] diff --git a/polkadot/node/network/availability-recovery/Cargo.toml b/polkadot/node/network/availability-recovery/Cargo.toml index 8d4e6893b0a5..41f09b1f7044 100644 --- a/polkadot/node/network/availability-recovery/Cargo.toml +++ b/polkadot/node/network/availability-recovery/Cargo.toml @@ -5,42 +5,40 @@ version = "7.0.0" authors.workspace = true edition.workspace = true license.workspace = true -homepage.workspace = true -repository.workspace = true [lints] workspace = true [dependencies] -async-trait = { workspace = true } -fatality = { workspace = true } futures = { workspace = true } -gum = { workspace = true, default-features = true } -rand = { workspace = true, default-features = true } +tokio = { workspace = true, default-features = true } schnellru = { workspace = true } +rand = { workspace = true, default-features = true } +fatality = { workspace = true } thiserror = { workspace = true } -tokio = { workspace = true, default-features = true } +async-trait = { workspace = true } +gum = { workspace = true, default-features = true } -codec = { features = ["derive"], workspace = true } polkadot-erasure-coding = { workspace = true, default-features = true } -polkadot-node-network-protocol = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } polkadot-node-primitives = { workspace = true, default-features = true } polkadot-node-subsystem = { workspace = true, default-features = true } polkadot-node-subsystem-util = { workspace = true, default-features = true } -polkadot-primitives = { workspace = true, default-features = true } +polkadot-node-network-protocol = { workspace = true, default-features = true } +codec = { features = ["derive"], workspace = true } sc-network = { workspace = true, default-features = true } [dev-dependencies] assert_matches = { workspace = true } futures-timer = { workspace = true } -log = { workspace = true, default-features = true } rstest = { workspace = true } +log = { workspace = true, default-features = true } -sc-network = { workspace = true, default-features = true } -sp-application-crypto = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } sp-keyring = { workspace = true, default-features = true } -sp-tracing = { workspace = true, default-features = true } +sp-application-crypto = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } polkadot-node-subsystem-test-helpers = { workspace = true } polkadot-primitives-test-helpers = { workspace = true } diff --git a/polkadot/node/network/bitfield-distribution/Cargo.toml b/polkadot/node/network/bitfield-distribution/Cargo.toml index 74a205276906..6d007255c574 100644 --- a/polkadot/node/network/bitfield-distribution/Cargo.toml +++ b/polkadot/node/network/bitfield-distribution/Cargo.toml @@ -5,8 +5,6 @@ description = "Polkadot Bitfiled Distribution subsystem, which gossips signed av authors.workspace = true edition.workspace = true license.workspace = true -homepage.workspace = true -repository.workspace = true [lints] workspace = true @@ -16,21 +14,21 @@ always-assert = { workspace = true } futures = { workspace = true } futures-timer = { workspace = true } gum = { workspace = true, default-features = true } -polkadot-node-network-protocol = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } polkadot-node-subsystem = { workspace = true, default-features = true } polkadot-node-subsystem-util = { workspace = true, default-features = true } -polkadot-primitives = { workspace = true, default-features = true } +polkadot-node-network-protocol = { workspace = true, default-features = true } rand = { workspace = true, default-features = true } [dev-dependencies] -assert_matches = { workspace = true } -bitvec = { features = ["alloc"], workspace = true } -maplit = { workspace = true } polkadot-node-subsystem-test-helpers = { workspace = true } -rand_chacha = { workspace = true, default-features = true } +bitvec = { features = ["alloc"], workspace = true } +sp-core = { workspace = true, default-features = true } sp-application-crypto = { workspace = true, default-features = true } sp-authority-discovery = { workspace = true, default-features = true } -sp-core = { workspace = true, default-features = true } -sp-keyring = { workspace = true, default-features = true } sp-keystore = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } +maplit = { workspace = true } sp-tracing = { workspace = true } +assert_matches = { workspace = true } +rand_chacha = { workspace = true, default-features = true } diff --git a/polkadot/node/network/bridge/Cargo.toml b/polkadot/node/network/bridge/Cargo.toml index cdc1bc3f6c1b..b4b5743853cd 100644 --- a/polkadot/node/network/bridge/Cargo.toml +++ b/polkadot/node/network/bridge/Cargo.toml @@ -5,8 +5,6 @@ description = "The Network Bridge Subsystem — protocol multiplexer for Polkado authors.workspace = true edition.workspace = true license.workspace = true -homepage.workspace = true -repository.workspace = true [lints] workspace = true @@ -14,26 +12,26 @@ workspace = true [dependencies] always-assert = { workspace = true } async-trait = { workspace = true } -bytes = { workspace = true, default-features = true } -codec = { features = ["derive"], workspace = true } -fatality = { workspace = true } futures = { workspace = true } gum = { workspace = true, default-features = true } -parking_lot = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } +codec = { features = ["derive"], workspace = true } +sc-network = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } polkadot-node-metrics = { workspace = true, default-features = true } polkadot-node-network-protocol = { workspace = true, default-features = true } polkadot-node-subsystem = { workspace = true, default-features = true } polkadot-overseer = { workspace = true, default-features = true } -polkadot-primitives = { workspace = true, default-features = true } -sc-network = { workspace = true, default-features = true } -sp-consensus = { workspace = true, default-features = true } +parking_lot = { workspace = true, default-features = true } +bytes = { workspace = true, default-features = true } +fatality = { workspace = true } thiserror = { workspace = true } [dev-dependencies] assert_matches = { workspace = true } -futures-timer = { workspace = true } polkadot-node-subsystem-test-helpers = { workspace = true } polkadot-node-subsystem-util = { workspace = true, default-features = true } -polkadot-primitives-test-helpers = { workspace = true } sp-core = { workspace = true, default-features = true } sp-keyring = { workspace = true, default-features = true } +futures-timer = { workspace = true } +polkadot-primitives-test-helpers = { workspace = true } diff --git a/polkadot/node/network/collator-protocol/Cargo.toml b/polkadot/node/network/collator-protocol/Cargo.toml index a02b281b6fc4..304cb23bb6aa 100644 --- a/polkadot/node/network/collator-protocol/Cargo.toml +++ b/polkadot/node/network/collator-protocol/Cargo.toml @@ -5,8 +5,6 @@ description = "Polkadot Collator Protocol subsystem. Allows collators and valida authors.workspace = true edition.workspace = true license.workspace = true -homepage.workspace = true -repository.workspace = true [lints] workspace = true @@ -19,28 +17,28 @@ gum = { workspace = true, default-features = true } schnellru.workspace = true sp-core = { workspace = true, default-features = true } -sp-keystore = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } -fatality = { workspace = true } +polkadot-primitives = { workspace = true, default-features = true } polkadot-node-network-protocol = { workspace = true, default-features = true } polkadot-node-primitives = { workspace = true, default-features = true } -polkadot-node-subsystem = { workspace = true, default-features = true } polkadot-node-subsystem-util = { workspace = true, default-features = true } -polkadot-primitives = { workspace = true, default-features = true } +polkadot-node-subsystem = { workspace = true, default-features = true } +fatality = { workspace = true } thiserror = { workspace = true } tokio-util = { workspace = true } [dev-dependencies] +sp-tracing = { workspace = true } assert_matches = { workspace = true } rstest = { workspace = true } -sp-tracing = { workspace = true } -codec = { features = ["std"], workspace = true, default-features = true } -sc-keystore = { workspace = true, default-features = true } -sc-network = { workspace = true, default-features = true } sp-core = { features = ["std"], workspace = true, default-features = true } sp-keyring = { workspace = true, default-features = true } +sc-keystore = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } +codec = { features = ["std"], workspace = true, default-features = true } polkadot-node-subsystem-test-helpers = { workspace = true } polkadot-primitives-test-helpers = { workspace = true } diff --git a/polkadot/node/network/collator-protocol/src/collator_side/mod.rs b/polkadot/node/network/collator-protocol/src/collator_side/mod.rs index d77480272cb4..504b0d716043 100644 --- a/polkadot/node/network/collator-protocol/src/collator_side/mod.rs +++ b/polkadot/node/network/collator-protocol/src/collator_side/mod.rs @@ -899,7 +899,7 @@ async fn process_msg( ); } }, - msg @ (Invalid(..) | Seconded(..)) => { + msg @ (ReportCollator(..) | Invalid(..) | Seconded(..)) => { gum::warn!( target: LOG_TARGET, "{:?} message is not expected on the collator side of the protocol", diff --git a/polkadot/node/network/collator-protocol/src/error.rs b/polkadot/node/network/collator-protocol/src/error.rs index 97fd4076bb8f..ae7f9a8c1fbc 100644 --- a/polkadot/node/network/collator-protocol/src/error.rs +++ b/polkadot/node/network/collator-protocol/src/error.rs @@ -70,9 +70,6 @@ pub enum Error { #[error("Response receiver for claim queue request cancelled")] CancelledClaimQueue(oneshot::Canceled), - - #[error("No state for the relay parent")] - RelayParentStateNotFound, } /// An error happened on the validator side of the protocol when attempting @@ -125,7 +122,7 @@ impl SecondingError { PersistedValidationDataMismatch | CandidateHashMismatch | RelayParentMismatch | - ParentHeadDataMismatch | + Duplicate | ParentHeadDataMismatch | InvalidCoreIndex(_, _) | InvalidSessionIndex(_, _) | InvalidReceiptVersion(_) diff --git a/polkadot/node/network/collator-protocol/src/validator_side/claim_queue_state.rs b/polkadot/node/network/collator-protocol/src/validator_side/claim_queue_state.rs deleted file mode 100644 index 3a34cf52fec6..000000000000 --- a/polkadot/node/network/collator-protocol/src/validator_side/claim_queue_state.rs +++ /dev/null @@ -1,1055 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -//! `ClaimQueueState` tracks the state of the claim queue over a set of relay blocks. Refer to -//! [`ClaimQueueState`] for more details. - -use std::collections::VecDeque; - -use crate::LOG_TARGET; -use polkadot_primitives::{Hash, Id as ParaId}; - -/// Represents a single claim from the claim queue, mapped to the relay chain block where it could -/// be backed on-chain. -#[derive(Debug, PartialEq)] -struct ClaimInfo { - // Hash of the relay chain block. Can be `None` if it is still not known (a future block). - hash: Option, - /// Represents the `ParaId` scheduled for the block. Can be `None` if nothing is scheduled. - claim: Option, - /// The length of the claim queue at the block. It is used to determine the 'block window' - /// where a claim can be made. - claim_queue_len: usize, - /// A flag that indicates if the slot is claimed or not. - claimed: bool, -} - -/// Tracks the state of the claim queue over a set of relay blocks. -/// -/// Generally the claim queue represents the `ParaId` that should be scheduled at the current block -/// (the first element of the claim queue) and N other `ParaId`s which are supposed to be scheduled -/// on the next relay blocks. In other words the claim queue is a rolling window giving a hint what -/// should be built/fetched/accepted (depending on the context) at each block. -/// -/// Since the claim queue peeks into the future blocks there is a relation between the claim queue -/// state between the current block and the future blocks. -/// Let's see an example with 2 co-scheduled parachains: -/// - relay parent 1; Claim queue: [A, B, A] -/// - relay parent 2; Claim queue: [B, A, B] -/// - relay parent 3; Claim queue: [A, B, A] -/// - and so on -/// -/// Note that at rp1 the second element in the claim queue is equal to the first one in rp2. Also -/// the third element of the claim queue at rp1 is equal to the second one in rp2 and the first one -/// in rp3. -/// -/// So if we want to claim the third slot at rp 1 we are also claiming the second at rp2 and first -/// at rp3. To track this in a simple way we can project the claim queue onto the relay blocks like -/// this: -/// [A] [B] [A] -> this is the claim queue at rp3 -/// [B] [A] [B] -> this is the claim queue at rp2 -/// [A] [B] [A] -> this is the claim queue at rp1 -/// [RP 1][RP 2][RP 3][RP X][RP Y] -> relay blocks, RP x and RP Y are future blocks -/// -/// Note that the claims at each column are the same so we can simplify this by just projecting a -/// single claim over a block: -/// [A] [B] [A] [B] [A] -> claims effectively are the same -/// [RP 1][RP 2][RP 3][RP X][RP Y] -> relay blocks, RP x and RP Y are future blocks -/// -/// Basically this is how `ClaimQueueState` works. It keeps track of claims at each block by mapping -/// claims to relay blocks. -/// -/// How making a claim works? -/// At each relay block we keep track how long is the claim queue. This is a 'window' where we can -/// make a claim. So adding a claim just looks for a free spot at this window and claims it. -/// -/// Note on adding a new leaf. -/// When a new leaf is added we check if the first element in its claim queue matches with the -/// projection on the first element in 'future blocks'. If yes - the new relay block inherits this -/// claim. If not - this means that the claim queue changed for some reason so the claim can't be -/// inherited. This should not happen under normal circumstances. But if it happens it means that we -/// have got one claim which won't be satisfied in the worst case scenario. -pub(crate) struct ClaimQueueState { - block_state: VecDeque, - future_blocks: VecDeque, -} - -impl ClaimQueueState { - pub(crate) fn new() -> Self { - Self { block_state: VecDeque::new(), future_blocks: VecDeque::new() } - } - - // Appends a new leaf - pub(crate) fn add_leaf(&mut self, hash: &Hash, claim_queue: &Vec) { - if self.block_state.iter().any(|s| s.hash == Some(*hash)) { - return - } - - // First check if our view for the future blocks is consistent with the one in the claim - // queue of the new block. If not - the claim queue has changed for some reason and we need - // to readjust our view. - for (idx, expected_claim) in claim_queue.iter().enumerate() { - match self.future_blocks.get_mut(idx) { - Some(future_block) => - if future_block.claim.as_ref() != Some(expected_claim) { - // There is an inconsistency. Update our view with the one from the claim - // queue. `claimed` can't be true anymore since the `ParaId` has changed. - future_block.claimed = false; - future_block.claim = Some(*expected_claim); - }, - None => { - self.future_blocks.push_back(ClaimInfo { - hash: None, - claim: Some(*expected_claim), - // For future blocks we don't know the size of the claim queue. - // `claim_queue_len` could be an option but there is not much benefit from - // the extra boilerplate code to handle it. We set it to one since we - // usually know about one claim at each future block but this value is not - // used anywhere in the code. - claim_queue_len: 1, - claimed: false, - }); - }, - } - } - - // Now pop the first future block and add it as a leaf - let claim_info = if let Some(new_leaf) = self.future_blocks.pop_front() { - ClaimInfo { - hash: Some(*hash), - claim: claim_queue.first().copied(), - claim_queue_len: claim_queue.len(), - claimed: new_leaf.claimed, - } - } else { - // maybe the claim queue was empty but we still need to add a leaf - ClaimInfo { - hash: Some(*hash), - claim: claim_queue.first().copied(), - claim_queue_len: claim_queue.len(), - claimed: false, - } - }; - - // `future_blocks` can't be longer than the length of the claim queue at the last block - 1. - // For example this can happen if at relay block N we have got a claim queue of a length 4 - // and it's shrunk to 2. - self.future_blocks.truncate(claim_queue.len().saturating_sub(1)); - - self.block_state.push_back(claim_info); - } - - fn get_window<'a>( - &'a mut self, - relay_parent: &'a Hash, - ) -> impl Iterator + 'a { - let mut window = self - .block_state - .iter_mut() - .skip_while(|b| b.hash != Some(*relay_parent)) - .peekable(); - let cq_len = window.peek().map_or(0, |b| b.claim_queue_len); - window.chain(self.future_blocks.iter_mut()).take(cq_len) - } - - pub(crate) fn claim_at(&mut self, relay_parent: &Hash, para_id: &ParaId) -> bool { - gum::trace!( - target: LOG_TARGET, - ?para_id, - ?relay_parent, - "claim_at" - ); - self.find_a_claim(relay_parent, para_id, true) - } - - pub(crate) fn can_claim_at(&mut self, relay_parent: &Hash, para_id: &ParaId) -> bool { - gum::trace!( - target: LOG_TARGET, - ?para_id, - ?relay_parent, - "can_claim_at" - ); - - self.find_a_claim(relay_parent, para_id, false) - } - - // Returns `true` if there is a claim within `relay_parent`'s view of the claim queue for - // `para_id`. If `claim_it` is set to `true` the slot is claimed. Otherwise the function just - // reports the availability of the slot. - fn find_a_claim(&mut self, relay_parent: &Hash, para_id: &ParaId, claim_it: bool) -> bool { - let window = self.get_window(relay_parent); - - for w in window { - gum::trace!( - target: LOG_TARGET, - ?para_id, - ?relay_parent, - claim_info=?w, - ?claim_it, - "Checking claim" - ); - - if !w.claimed && w.claim == Some(*para_id) { - w.claimed = claim_it; - return true - } - } - - false - } - - pub(crate) fn unclaimed_at(&mut self, relay_parent: &Hash) -> Vec { - let window = self.get_window(relay_parent); - - window.filter(|b| !b.claimed).filter_map(|b| b.claim).collect() - } -} - -#[cfg(test)] -mod test { - use super::*; - - #[test] - fn sane_initial_state() { - let mut state = ClaimQueueState::new(); - let relay_parent = Hash::from_low_u64_be(1); - let para_id = ParaId::new(1); - - assert!(!state.can_claim_at(&relay_parent, ¶_id)); - assert!(!state.claim_at(&relay_parent, ¶_id)); - assert_eq!(state.unclaimed_at(&relay_parent), vec![]); - } - - #[test] - fn add_leaf_works() { - let mut state = ClaimQueueState::new(); - let relay_parent_a = Hash::from_low_u64_be(1); - let para_id = ParaId::new(1); - let claim_queue = vec![para_id, para_id, para_id]; - - state.add_leaf(&relay_parent_a, &claim_queue); - assert_eq!(state.unclaimed_at(&relay_parent_a), vec![para_id, para_id, para_id]); - - assert_eq!( - state.block_state, - VecDeque::from(vec![ClaimInfo { - hash: Some(relay_parent_a), - claim: Some(para_id), - claim_queue_len: 3, - claimed: false, - },]) - ); - assert_eq!( - state.future_blocks, - VecDeque::from(vec![ - ClaimInfo { hash: None, claim: Some(para_id), claim_queue_len: 1, claimed: false }, - ClaimInfo { hash: None, claim: Some(para_id), claim_queue_len: 1, claimed: false } - ]) - ); - - // should be no op - state.add_leaf(&relay_parent_a, &claim_queue); - assert_eq!(state.block_state.len(), 1); - assert_eq!(state.future_blocks.len(), 2); - - // add another leaf - let relay_parent_b = Hash::from_low_u64_be(2); - state.add_leaf(&relay_parent_b, &claim_queue); - - assert_eq!( - state.block_state, - VecDeque::from(vec![ - ClaimInfo { - hash: Some(relay_parent_a), - claim: Some(para_id), - claim_queue_len: 3, - claimed: false, - }, - ClaimInfo { - hash: Some(relay_parent_b), - claim: Some(para_id), - claim_queue_len: 3, - claimed: false, - } - ]) - ); - assert_eq!( - state.future_blocks, - VecDeque::from(vec![ - ClaimInfo { hash: None, claim: Some(para_id), claim_queue_len: 1, claimed: false }, - ClaimInfo { hash: None, claim: Some(para_id), claim_queue_len: 1, claimed: false } - ]) - ); - - assert_eq!(state.unclaimed_at(&relay_parent_a), vec![para_id, para_id, para_id]); - assert_eq!(state.unclaimed_at(&relay_parent_b), vec![para_id, para_id, para_id]); - } - - #[test] - fn claims_at_separate_relay_parents_work() { - let mut state = ClaimQueueState::new(); - let relay_parent_a = Hash::from_low_u64_be(1); - let relay_parent_b = Hash::from_low_u64_be(2); - let para_id = ParaId::new(1); - let claim_queue = vec![para_id, para_id, para_id]; - - state.add_leaf(&relay_parent_a, &claim_queue); - state.add_leaf(&relay_parent_b, &claim_queue); - - // add one claim for a - assert!(state.can_claim_at(&relay_parent_a, ¶_id)); - assert_eq!(state.unclaimed_at(&relay_parent_a), vec![para_id, para_id, para_id]); - assert!(state.claim_at(&relay_parent_a, ¶_id)); - - // and one for b - assert!(state.can_claim_at(&relay_parent_b, ¶_id)); - assert_eq!(state.unclaimed_at(&relay_parent_b), vec![para_id, para_id, para_id]); - assert!(state.claim_at(&relay_parent_b, ¶_id)); - - // a should have one claim since the one for b was claimed - assert_eq!(state.unclaimed_at(&relay_parent_a), vec![para_id]); - // and two more for b - assert_eq!(state.unclaimed_at(&relay_parent_b), vec![para_id, para_id]); - - assert_eq!( - state.block_state, - VecDeque::from(vec![ - ClaimInfo { - hash: Some(relay_parent_a), - claim: Some(para_id), - claim_queue_len: 3, - claimed: true, - }, - ClaimInfo { - hash: Some(relay_parent_b), - claim: Some(para_id), - claim_queue_len: 3, - claimed: true, - } - ]) - ); - assert_eq!( - state.future_blocks, - VecDeque::from(vec![ - ClaimInfo { hash: None, claim: Some(para_id), claim_queue_len: 1, claimed: false }, - ClaimInfo { hash: None, claim: Some(para_id), claim_queue_len: 1, claimed: false } - ]) - ); - } - - #[test] - fn claims_are_transferred_to_next_slot() { - let mut state = ClaimQueueState::new(); - let relay_parent_a = Hash::from_low_u64_be(1); - let para_id = ParaId::new(1); - let claim_queue = vec![para_id, para_id, para_id]; - - state.add_leaf(&relay_parent_a, &claim_queue); - - // add two claims, 2nd should be transferred to a new leaf - assert!(state.can_claim_at(&relay_parent_a, ¶_id)); - assert_eq!(state.unclaimed_at(&relay_parent_a), vec![para_id, para_id, para_id]); - assert!(state.claim_at(&relay_parent_a, ¶_id)); - - assert!(state.can_claim_at(&relay_parent_a, ¶_id)); - assert_eq!(state.unclaimed_at(&relay_parent_a), vec![para_id, para_id]); - assert!(state.claim_at(&relay_parent_a, ¶_id)); - - assert_eq!( - state.block_state, - VecDeque::from(vec![ClaimInfo { - hash: Some(relay_parent_a), - claim: Some(para_id), - claim_queue_len: 3, - claimed: true, - },]) - ); - assert_eq!( - state.future_blocks, - VecDeque::from(vec![ - ClaimInfo { hash: None, claim: Some(para_id), claim_queue_len: 1, claimed: true }, - ClaimInfo { hash: None, claim: Some(para_id), claim_queue_len: 1, claimed: false } - ]) - ); - - // one more - assert!(state.can_claim_at(&relay_parent_a, ¶_id)); - assert_eq!(state.unclaimed_at(&relay_parent_a), vec![para_id]); - assert!(state.claim_at(&relay_parent_a, ¶_id)); - - assert_eq!( - state.block_state, - VecDeque::from(vec![ClaimInfo { - hash: Some(relay_parent_a), - claim: Some(para_id), - claim_queue_len: 3, - claimed: true, - },]) - ); - assert_eq!( - state.future_blocks, - VecDeque::from(vec![ - ClaimInfo { hash: None, claim: Some(para_id), claim_queue_len: 1, claimed: true }, - ClaimInfo { hash: None, claim: Some(para_id), claim_queue_len: 1, claimed: true } - ]) - ); - - // no more claims - assert!(!state.can_claim_at(&relay_parent_a, ¶_id)); - assert_eq!(state.unclaimed_at(&relay_parent_a), vec![]); - } - - #[test] - fn claims_are_transferred_to_new_leaves() { - let mut state = ClaimQueueState::new(); - let relay_parent_a = Hash::from_low_u64_be(1); - let para_id = ParaId::new(1); - let claim_queue = vec![para_id, para_id, para_id]; - - state.add_leaf(&relay_parent_a, &claim_queue); - - for _ in 0..3 { - assert!(state.can_claim_at(&relay_parent_a, ¶_id)); - assert!(state.claim_at(&relay_parent_a, ¶_id)); - } - - assert_eq!( - state.block_state, - VecDeque::from(vec![ClaimInfo { - hash: Some(relay_parent_a), - claim: Some(para_id), - claim_queue_len: 3, - claimed: true, - },]) - ); - assert_eq!( - state.future_blocks, - VecDeque::from(vec![ - ClaimInfo { hash: None, claim: Some(para_id), claim_queue_len: 1, claimed: true }, - ClaimInfo { hash: None, claim: Some(para_id), claim_queue_len: 1, claimed: true } - ]) - ); - - // no more claims - assert!(!state.can_claim_at(&relay_parent_a, ¶_id)); - - // new leaf - let relay_parent_b = Hash::from_low_u64_be(2); - state.add_leaf(&relay_parent_b, &claim_queue); - - assert_eq!( - state.block_state, - VecDeque::from(vec![ - ClaimInfo { - hash: Some(relay_parent_a), - claim: Some(para_id), - claim_queue_len: 3, - claimed: true, - }, - ClaimInfo { - hash: Some(relay_parent_b), - claim: Some(para_id), - claim_queue_len: 3, - claimed: true, - } - ]) - ); - assert_eq!( - state.future_blocks, - VecDeque::from(vec![ - ClaimInfo { hash: None, claim: Some(para_id), claim_queue_len: 1, claimed: true }, - ClaimInfo { hash: None, claim: Some(para_id), claim_queue_len: 1, claimed: false } - ]) - ); - - // still no claims for a - assert!(!state.can_claim_at(&relay_parent_a, ¶_id)); - - // but can accept for b - assert!(state.can_claim_at(&relay_parent_b, ¶_id)); - assert!(state.claim_at(&relay_parent_b, ¶_id)); - - assert_eq!( - state.block_state, - VecDeque::from(vec![ - ClaimInfo { - hash: Some(relay_parent_a), - claim: Some(para_id), - claim_queue_len: 3, - claimed: true, - }, - ClaimInfo { - hash: Some(relay_parent_b), - claim: Some(para_id), - claim_queue_len: 3, - claimed: true, - } - ]) - ); - assert_eq!( - state.future_blocks, - VecDeque::from(vec![ - ClaimInfo { hash: None, claim: Some(para_id), claim_queue_len: 1, claimed: true }, - ClaimInfo { hash: None, claim: Some(para_id), claim_queue_len: 1, claimed: true } - ]) - ); - } - - #[test] - fn two_paras() { - let mut state = ClaimQueueState::new(); - let relay_parent_a = Hash::from_low_u64_be(1); - let para_id_a = ParaId::new(1); - let para_id_b = ParaId::new(2); - let claim_queue = vec![para_id_a, para_id_b, para_id_a]; - - state.add_leaf(&relay_parent_a, &claim_queue); - assert!(state.can_claim_at(&relay_parent_a, ¶_id_a)); - assert!(state.can_claim_at(&relay_parent_a, ¶_id_b)); - assert_eq!(state.unclaimed_at(&relay_parent_a), vec![para_id_a, para_id_b, para_id_a]); - - assert_eq!( - state.block_state, - VecDeque::from(vec![ClaimInfo { - hash: Some(relay_parent_a), - claim: Some(para_id_a), - claim_queue_len: 3, - claimed: false, - },]) - ); - assert_eq!( - state.future_blocks, - VecDeque::from(vec![ - ClaimInfo { - hash: None, - claim: Some(para_id_b), - claim_queue_len: 1, - claimed: false - }, - ClaimInfo { - hash: None, - claim: Some(para_id_a), - claim_queue_len: 1, - claimed: false - } - ]) - ); - - assert!(state.claim_at(&relay_parent_a, ¶_id_a)); - assert!(state.can_claim_at(&relay_parent_a, ¶_id_a)); - assert!(state.can_claim_at(&relay_parent_a, ¶_id_b)); - assert_eq!(state.unclaimed_at(&relay_parent_a), vec![para_id_b, para_id_a]); - - assert_eq!( - state.block_state, - VecDeque::from(vec![ClaimInfo { - hash: Some(relay_parent_a), - claim: Some(para_id_a), - claim_queue_len: 3, - claimed: true, - },]) - ); - assert_eq!( - state.future_blocks, - VecDeque::from(vec![ - ClaimInfo { - hash: None, - claim: Some(para_id_b), - claim_queue_len: 1, - claimed: false - }, - ClaimInfo { - hash: None, - claim: Some(para_id_a), - claim_queue_len: 1, - claimed: false - } - ]) - ); - - assert!(state.claim_at(&relay_parent_a, ¶_id_a)); - assert!(!state.can_claim_at(&relay_parent_a, ¶_id_a)); - assert!(state.can_claim_at(&relay_parent_a, ¶_id_b)); - assert_eq!(state.unclaimed_at(&relay_parent_a), vec![para_id_b]); - - assert_eq!( - state.block_state, - VecDeque::from(vec![ClaimInfo { - hash: Some(relay_parent_a), - claim: Some(para_id_a), - claim_queue_len: 3, - claimed: true, - },]) - ); - assert_eq!( - state.future_blocks, - VecDeque::from(vec![ - ClaimInfo { - hash: None, - claim: Some(para_id_b), - claim_queue_len: 1, - claimed: false - }, - ClaimInfo { hash: None, claim: Some(para_id_a), claim_queue_len: 1, claimed: true } - ]) - ); - - assert!(state.claim_at(&relay_parent_a, ¶_id_b)); - assert!(!state.can_claim_at(&relay_parent_a, ¶_id_a)); - assert!(!state.can_claim_at(&relay_parent_a, ¶_id_b)); - assert_eq!(state.unclaimed_at(&relay_parent_a), vec![]); - - assert_eq!( - state.block_state, - VecDeque::from(vec![ClaimInfo { - hash: Some(relay_parent_a), - claim: Some(para_id_a), - claim_queue_len: 3, - claimed: true, - },]) - ); - assert_eq!( - state.future_blocks, - VecDeque::from(vec![ - ClaimInfo { hash: None, claim: Some(para_id_b), claim_queue_len: 1, claimed: true }, - ClaimInfo { hash: None, claim: Some(para_id_a), claim_queue_len: 1, claimed: true } - ]) - ); - } - - #[test] - fn claim_queue_changes_unexpectedly() { - let mut state = ClaimQueueState::new(); - let relay_parent_a = Hash::from_low_u64_be(1); - let para_id_a = ParaId::new(1); - let para_id_b = ParaId::new(2); - let claim_queue_a = vec![para_id_a, para_id_b, para_id_a]; - - state.add_leaf(&relay_parent_a, &claim_queue_a); - assert!(state.can_claim_at(&relay_parent_a, ¶_id_a)); - assert!(state.can_claim_at(&relay_parent_a, ¶_id_b)); - assert!(state.claim_at(&relay_parent_a, ¶_id_a)); - assert!(state.claim_at(&relay_parent_a, ¶_id_a)); - assert!(state.claim_at(&relay_parent_a, ¶_id_b)); - assert_eq!(state.unclaimed_at(&relay_parent_a), vec![]); - - assert_eq!( - state.block_state, - VecDeque::from(vec![ClaimInfo { - hash: Some(relay_parent_a), - claim: Some(para_id_a), - claim_queue_len: 3, - claimed: true, - },]) - ); - assert_eq!( - state.future_blocks, - VecDeque::from(vec![ - ClaimInfo { hash: None, claim: Some(para_id_b), claim_queue_len: 1, claimed: true }, - ClaimInfo { hash: None, claim: Some(para_id_a), claim_queue_len: 1, claimed: true } - ]) - ); - - let relay_parent_b = Hash::from_low_u64_be(2); - let claim_queue_b = vec![para_id_a, para_id_a, para_id_a]; // should be [b, a, ...] - state.add_leaf(&relay_parent_b, &claim_queue_b); - - // because of the unexpected change in claim queue we lost the claim for paraB and have one - // unclaimed for paraA - assert_eq!(state.unclaimed_at(&relay_parent_a), vec![para_id_a]); - - assert_eq!( - state.block_state, - VecDeque::from(vec![ - ClaimInfo { - hash: Some(relay_parent_a), - claim: Some(para_id_a), - claim_queue_len: 3, - claimed: true, - }, - ClaimInfo { - hash: Some(relay_parent_b), - claim: Some(para_id_a), - claim_queue_len: 3, - claimed: false, - } - ]) - ); - assert_eq!( - state.future_blocks, - // since the 3rd slot of the claim queue at rp1 is equal to the second one in rp2, this - // claim still exists - VecDeque::from(vec![ - ClaimInfo { hash: None, claim: Some(para_id_a), claim_queue_len: 1, claimed: true }, - ClaimInfo { - hash: None, - claim: Some(para_id_a), - claim_queue_len: 1, - claimed: false - } - ]) - ); - } - - #[test] - fn claim_queue_changes_unexpectedly_with_two_blocks() { - let mut state = ClaimQueueState::new(); - let relay_parent_a = Hash::from_low_u64_be(1); - let para_id_a = ParaId::new(1); - let para_id_b = ParaId::new(2); - let claim_queue_a = vec![para_id_a, para_id_b, para_id_b]; - - state.add_leaf(&relay_parent_a, &claim_queue_a); - assert!(state.can_claim_at(&relay_parent_a, ¶_id_a)); - assert!(state.can_claim_at(&relay_parent_a, ¶_id_b)); - assert!(state.claim_at(&relay_parent_a, ¶_id_a)); - assert!(state.claim_at(&relay_parent_a, ¶_id_b)); - assert!(state.claim_at(&relay_parent_a, ¶_id_b)); - assert_eq!(state.unclaimed_at(&relay_parent_a), vec![]); - - assert_eq!( - state.block_state, - VecDeque::from(vec![ClaimInfo { - hash: Some(relay_parent_a), - claim: Some(para_id_a), - claim_queue_len: 3, - claimed: true, - },]) - ); - assert_eq!( - state.future_blocks, - VecDeque::from(vec![ - ClaimInfo { hash: None, claim: Some(para_id_b), claim_queue_len: 1, claimed: true }, - ClaimInfo { hash: None, claim: Some(para_id_b), claim_queue_len: 1, claimed: true } - ]) - ); - - let relay_parent_b = Hash::from_low_u64_be(2); - let claim_queue_b = vec![para_id_a, para_id_a, para_id_a]; // should be [b, b, ...] - state.add_leaf(&relay_parent_b, &claim_queue_b); - - // because of the unexpected change in claim queue we lost both claims for paraB and have - // two unclaimed for paraA - assert_eq!(state.unclaimed_at(&relay_parent_a), vec![para_id_a, para_id_a]); - - assert_eq!( - state.block_state, - VecDeque::from(vec![ - ClaimInfo { - hash: Some(relay_parent_a), - claim: Some(para_id_a), - claim_queue_len: 3, - claimed: true, - }, - ClaimInfo { - hash: Some(relay_parent_b), - claim: Some(para_id_a), - claim_queue_len: 3, - claimed: false, - } - ]) - ); - assert_eq!( - state.future_blocks, - VecDeque::from(vec![ - ClaimInfo { - hash: None, - claim: Some(para_id_a), - claim_queue_len: 1, - claimed: false - }, - ClaimInfo { - hash: None, - claim: Some(para_id_a), - claim_queue_len: 1, - claimed: false - } - ]) - ); - } - - #[test] - fn empty_claim_queue() { - let mut state = ClaimQueueState::new(); - let relay_parent_a = Hash::from_low_u64_be(1); - let para_id_a = ParaId::new(1); - let claim_queue_a = vec![]; - - state.add_leaf(&relay_parent_a, &claim_queue_a); - assert_eq!(state.unclaimed_at(&relay_parent_a), vec![]); - - assert_eq!( - state.block_state, - VecDeque::from(vec![ClaimInfo { - hash: Some(relay_parent_a), - claim: None, - claim_queue_len: 0, - claimed: false, - },]) - ); - // no claim queue so we know nothing about future blocks - assert!(state.future_blocks.is_empty()); - - assert!(!state.can_claim_at(&relay_parent_a, ¶_id_a)); - assert!(!state.claim_at(&relay_parent_a, ¶_id_a)); - assert_eq!(state.unclaimed_at(&relay_parent_a), vec![]); - - let relay_parent_b = Hash::from_low_u64_be(2); - let claim_queue_b = vec![para_id_a]; - state.add_leaf(&relay_parent_b, &claim_queue_b); - - assert_eq!( - state.block_state, - VecDeque::from(vec![ - ClaimInfo { - hash: Some(relay_parent_a), - claim: None, - claim_queue_len: 0, - claimed: false, - }, - ClaimInfo { - hash: Some(relay_parent_b), - claim: Some(para_id_a), - claim_queue_len: 1, - claimed: false, - }, - ]) - ); - // claim queue with length 1 doesn't say anything about future blocks - assert!(state.future_blocks.is_empty()); - - assert!(!state.can_claim_at(&relay_parent_a, ¶_id_a)); - assert!(!state.claim_at(&relay_parent_a, ¶_id_a)); - assert_eq!(state.unclaimed_at(&relay_parent_a), vec![]); - - assert!(state.can_claim_at(&relay_parent_b, ¶_id_a)); - assert_eq!(state.unclaimed_at(&relay_parent_b), vec![para_id_a]); - assert!(state.claim_at(&relay_parent_b, ¶_id_a)); - - let relay_parent_c = Hash::from_low_u64_be(3); - let claim_queue_c = vec![para_id_a, para_id_a]; - state.add_leaf(&relay_parent_c, &claim_queue_c); - - assert_eq!( - state.block_state, - VecDeque::from(vec![ - ClaimInfo { - hash: Some(relay_parent_a), - claim: None, - claim_queue_len: 0, - claimed: false, - }, - ClaimInfo { - hash: Some(relay_parent_b), - claim: Some(para_id_a), - claim_queue_len: 1, - claimed: true, - }, - ClaimInfo { - hash: Some(relay_parent_c), - claim: Some(para_id_a), - claim_queue_len: 2, - claimed: false, - }, - ]) - ); - // claim queue with length 2 fills only one future block - assert_eq!( - state.future_blocks, - VecDeque::from(vec![ClaimInfo { - hash: None, - claim: Some(para_id_a), - claim_queue_len: 1, - claimed: false, - },]) - ); - - assert!(!state.can_claim_at(&relay_parent_a, ¶_id_a)); - assert!(!state.claim_at(&relay_parent_a, ¶_id_a)); - assert_eq!(state.unclaimed_at(&relay_parent_a), vec![]); - - // already claimed - assert!(!state.can_claim_at(&relay_parent_b, ¶_id_a)); - assert_eq!(state.unclaimed_at(&relay_parent_b), vec![]); - assert!(!state.claim_at(&relay_parent_b, ¶_id_a)); - - assert!(state.can_claim_at(&relay_parent_c, ¶_id_a)); - assert_eq!(state.unclaimed_at(&relay_parent_c), vec![para_id_a, para_id_a]); - } - - #[test] - fn claim_queue_becomes_shorter() { - let mut state = ClaimQueueState::new(); - let relay_parent_a = Hash::from_low_u64_be(1); - let para_id_a = ParaId::new(1); - let para_id_b = ParaId::new(2); - let claim_queue_a = vec![para_id_a, para_id_b, para_id_a]; - - state.add_leaf(&relay_parent_a, &claim_queue_a); - assert_eq!(state.unclaimed_at(&relay_parent_a), vec![para_id_a, para_id_b, para_id_a]); - - assert_eq!( - state.block_state, - VecDeque::from(vec![ClaimInfo { - hash: Some(relay_parent_a), - claim: Some(para_id_a), - claim_queue_len: 3, - claimed: false, - },]) - ); - assert_eq!( - state.future_blocks, - VecDeque::from(vec![ - ClaimInfo { - hash: None, - claim: Some(para_id_b), - claim_queue_len: 1, - claimed: false - }, - ClaimInfo { - hash: None, - claim: Some(para_id_a), - claim_queue_len: 1, - claimed: false - } - ]) - ); - - let relay_parent_b = Hash::from_low_u64_be(2); - let claim_queue_b = vec![para_id_a, para_id_b]; // should be [b, a] - state.add_leaf(&relay_parent_b, &claim_queue_b); - - assert_eq!(state.unclaimed_at(&relay_parent_b), vec![para_id_a, para_id_b]); - // claims for `relay_parent_a` has changed. - assert_eq!(state.unclaimed_at(&relay_parent_a), vec![para_id_a, para_id_a, para_id_b]); - - assert_eq!( - state.block_state, - VecDeque::from(vec![ - ClaimInfo { - hash: Some(relay_parent_a), - claim: Some(para_id_a), - claim_queue_len: 3, - claimed: false, - }, - ClaimInfo { - hash: Some(relay_parent_b), - claim: Some(para_id_a), - claim_queue_len: 2, - claimed: false, - } - ]) - ); - assert_eq!( - state.future_blocks, - VecDeque::from(vec![ClaimInfo { - hash: None, - claim: Some(para_id_b), - claim_queue_len: 1, - claimed: false - },]) - ); - } - - #[test] - fn claim_queue_becomes_shorter_and_drops_future_claims() { - let mut state = ClaimQueueState::new(); - let relay_parent_a = Hash::from_low_u64_be(1); - let para_id_a = ParaId::new(1); - let para_id_b = ParaId::new(2); - let claim_queue_a = vec![para_id_a, para_id_b, para_id_a, para_id_b]; - - state.add_leaf(&relay_parent_a, &claim_queue_a); - - assert_eq!( - state.unclaimed_at(&relay_parent_a), - vec![para_id_a, para_id_b, para_id_a, para_id_b] - ); - - // We start with claim queue len 4. - assert_eq!( - state.block_state, - VecDeque::from(vec![ClaimInfo { - hash: Some(relay_parent_a), - claim: Some(para_id_a), - claim_queue_len: 4, - claimed: false, - },]) - ); - // we have got three future blocks - assert_eq!( - state.future_blocks, - VecDeque::from(vec![ - ClaimInfo { - hash: None, - claim: Some(para_id_b), - claim_queue_len: 1, - claimed: false - }, - ClaimInfo { - hash: None, - claim: Some(para_id_a), - claim_queue_len: 1, - claimed: false - }, - ClaimInfo { - hash: None, - claim: Some(para_id_b), - claim_queue_len: 1, - claimed: false - } - ]) - ); - - // The next claim len is 2, so we loose one future block - let relay_parent_b = Hash::from_low_u64_be(2); - let para_id_a = ParaId::new(1); - let para_id_b = ParaId::new(2); - let claim_queue_b = vec![para_id_b, para_id_a]; - state.add_leaf(&relay_parent_b, &claim_queue_b); - - assert_eq!(state.unclaimed_at(&relay_parent_a), vec![para_id_a, para_id_b, para_id_a]); - assert_eq!(state.unclaimed_at(&relay_parent_b), vec![para_id_b, para_id_a]); - - assert_eq!( - state.block_state, - VecDeque::from(vec![ - ClaimInfo { - hash: Some(relay_parent_a), - claim: Some(para_id_a), - claim_queue_len: 4, - claimed: false, - }, - ClaimInfo { - hash: Some(relay_parent_b), - claim: Some(para_id_b), - claim_queue_len: 2, - claimed: false, - } - ]) - ); - - assert_eq!( - state.future_blocks, - VecDeque::from(vec![ClaimInfo { - hash: None, - claim: Some(para_id_a), - claim_queue_len: 1, - claimed: false - },]) - ); - } -} diff --git a/polkadot/node/network/collator-protocol/src/validator_side/collation.rs b/polkadot/node/network/collator-protocol/src/validator_side/collation.rs index 625140a73966..cc0de1cb70f6 100644 --- a/polkadot/node/network/collator-protocol/src/validator_side/collation.rs +++ b/polkadot/node/network/collator-protocol/src/validator_side/collation.rs @@ -18,28 +18,16 @@ //! //! Usually a path of collations is as follows: //! 1. First, collation must be advertised by collator. -//! 2. The validator inspects the claim queue and decides if the collation should be fetched -//! based on the entries there. A parachain can't have more fetched collations than the -//! entries in the claim queue at a specific relay parent. When calculating this limit the -//! validator counts all advertisements within its view not just at the relay parent. -//! 3. If the advertisement was accepted, it's queued for fetch (per relay parent). -//! 4. Once it's requested, the collation is said to be pending fetch -//! (`CollationStatus::Fetching`). -//! 5. Pending fetch collation becomes pending validation -//! (`CollationStatus::WaitingOnValidation`) once received, we send it to backing for -//! validation. -//! 6. If it turns to be invalid or async backing allows seconding another candidate, carry on +//! 2. If the advertisement was accepted, it's queued for fetch (per relay parent). +//! 3. Once it's requested, the collation is said to be Pending. +//! 4. Pending collation becomes Fetched once received, we send it to backing for validation. +//! 5. If it turns to be invalid or async backing allows seconding another candidate, carry on //! with the next advertisement, otherwise we're done with this relay parent. //! -//! ┌───────────────────────────────────┐ -//! └─▶Waiting ─▶ Fetching ─▶ WaitingOnValidation - -use std::{ - collections::{BTreeMap, VecDeque}, - future::Future, - pin::Pin, - task::Poll, -}; +//! ┌──────────────────────────────────────────┐ +//! └─▶Advertised ─▶ Pending ─▶ Fetched ─▶ Validated + +use std::{collections::VecDeque, future::Future, pin::Pin, task::Poll}; use futures::{future::BoxFuture, FutureExt}; use polkadot_node_network_protocol::{ @@ -48,7 +36,9 @@ use polkadot_node_network_protocol::{ PeerId, }; use polkadot_node_primitives::PoV; -use polkadot_node_subsystem_util::metrics::prometheus::prometheus::HistogramTimer; +use polkadot_node_subsystem_util::{ + metrics::prometheus::prometheus::HistogramTimer, runtime::ProspectiveParachainsMode, +}; use polkadot_primitives::{ vstaging::CandidateReceiptV2 as CandidateReceipt, CandidateHash, CollatorId, Hash, HeadData, Id as ParaId, PersistedValidationData, @@ -197,10 +187,12 @@ pub struct PendingCollationFetch { pub enum CollationStatus { /// We are waiting for a collation to be advertised to us. Waiting, - /// We are currently fetching a collation for the specified `ParaId`. - Fetching(ParaId), + /// We are currently fetching a collation. + Fetching, /// We are waiting that a collation is being validated. WaitingOnValidation, + /// We have seconded a collation. + Seconded, } impl Default for CollationStatus { @@ -210,22 +202,22 @@ impl Default for CollationStatus { } impl CollationStatus { - /// Downgrades to `Waiting` - pub fn back_to_waiting(&mut self) { - *self = Self::Waiting + /// Downgrades to `Waiting`, but only if `self != Seconded`. + fn back_to_waiting(&mut self, relay_parent_mode: ProspectiveParachainsMode) { + match self { + Self::Seconded => + if relay_parent_mode.is_enabled() { + // With async backing enabled it's allowed to + // second more candidates. + *self = Self::Waiting + }, + _ => *self = Self::Waiting, + } } } -/// The number of claims in the claim queue and seconded candidates count for a specific `ParaId`. -#[derive(Default, Debug)] -struct CandidatesStatePerPara { - /// How many collations have been seconded. - pub seconded_per_para: usize, - // Claims in the claim queue for the `ParaId`. - pub claims_per_para: usize, -} - /// Information about collations per relay parent. +#[derive(Default)] pub struct Collations { /// What is the current status in regards to a collation for this relay parent? pub status: CollationStatus, @@ -234,89 +226,75 @@ pub struct Collations { /// This is the currently last started fetch, which did not exceed `MAX_UNSHARED_DOWNLOAD_TIME` /// yet. pub fetching_from: Option<(CollatorId, Option)>, - /// Collation that were advertised to us, but we did not yet request or fetch. Grouped by - /// `ParaId`. - waiting_queue: BTreeMap>, - /// Number of seconded candidates and claims in the claim queue per `ParaId`. - candidates_state: BTreeMap, + /// Collation that were advertised to us, but we did not yet fetch. + pub waiting_queue: VecDeque<(PendingCollation, CollatorId)>, + /// How many collations have been seconded. + pub seconded_count: usize, } impl Collations { - pub(super) fn new(group_assignments: &Vec) -> Self { - let mut candidates_state = BTreeMap::::new(); - - for para_id in group_assignments { - candidates_state.entry(*para_id).or_default().claims_per_para += 1; - } - - Self { - status: Default::default(), - fetching_from: None, - waiting_queue: Default::default(), - candidates_state, - } - } - /// Note a seconded collation for a given para. - pub(super) fn note_seconded(&mut self, para_id: ParaId) { - self.candidates_state.entry(para_id).or_default().seconded_per_para += 1; - gum::trace!( - target: LOG_TARGET, - ?para_id, - new_count=self.candidates_state.entry(para_id).or_default().seconded_per_para, - "Note seconded." - ); - self.status.back_to_waiting(); + pub(super) fn note_seconded(&mut self) { + self.seconded_count += 1 } - /// Adds a new collation to the waiting queue for the relay parent. This function doesn't - /// perform any limits check. The caller should assure that the collation limit is respected. - pub(super) fn add_to_waiting_queue(&mut self, collation: (PendingCollation, CollatorId)) { - self.waiting_queue.entry(collation.0.para_id).or_default().push_back(collation); - } - - /// Picks a collation to fetch from the waiting queue. - /// When fetching collations we need to ensure that each parachain has got a fair core time - /// share depending on its assignments in the claim queue. This means that the number of - /// collations seconded per parachain should ideally be equal to the number of claims for the - /// particular parachain in the claim queue. + /// Returns the next collation to fetch from the `waiting_queue`. /// - /// To achieve this each seconded collation is mapped to an entry from the claim queue. The next - /// fetch is the first unfulfilled entry from the claim queue for which there is an - /// advertisement. + /// This will reset the status back to `Waiting` using [`CollationStatus::back_to_waiting`]. /// - /// `unfulfilled_claim_queue_entries` represents all claim queue entries which are still not - /// fulfilled. - pub(super) fn pick_a_collation_to_fetch( + /// Returns `Some(_)` if there is any collation to fetch, the `status` is not `Seconded` and + /// the passed in `finished_one` is the currently `waiting_collation`. + pub(super) fn get_next_collation_to_fetch( &mut self, - unfulfilled_claim_queue_entries: Vec, + finished_one: &(CollatorId, Option), + relay_parent_mode: ProspectiveParachainsMode, ) -> Option<(PendingCollation, CollatorId)> { - gum::trace!( - target: LOG_TARGET, - waiting_queue=?self.waiting_queue, - candidates_state=?self.candidates_state, - "Pick a collation to fetch." - ); - - for assignment in unfulfilled_claim_queue_entries { - // if there is an unfulfilled assignment - return it - if let Some(collation) = self - .waiting_queue - .get_mut(&assignment) - .and_then(|collations| collations.pop_front()) + // If finished one does not match waiting_collation, then we already dequeued another fetch + // to replace it. + if let Some((collator_id, maybe_candidate_hash)) = self.fetching_from.as_ref() { + // If a candidate hash was saved previously, `finished_one` must include this too. + if collator_id != &finished_one.0 && + maybe_candidate_hash.map_or(true, |hash| Some(&hash) != finished_one.1.as_ref()) { - return Some(collation) + gum::trace!( + target: LOG_TARGET, + waiting_collation = ?self.fetching_from, + ?finished_one, + "Not proceeding to the next collation - has already been done." + ); + return None } } - - None + self.status.back_to_waiting(relay_parent_mode); + + match self.status { + // We don't need to fetch any other collation when we already have seconded one. + CollationStatus::Seconded => None, + CollationStatus::Waiting => + if self.is_seconded_limit_reached(relay_parent_mode) { + None + } else { + self.waiting_queue.pop_front() + }, + CollationStatus::WaitingOnValidation | CollationStatus::Fetching => + unreachable!("We have reset the status above!"), + } } - pub(super) fn seconded_for_para(&self, para_id: &ParaId) -> usize { - self.candidates_state - .get(¶_id) - .map(|state| state.seconded_per_para) - .unwrap_or_default() + /// Checks the limit of seconded candidates. + pub(super) fn is_seconded_limit_reached( + &self, + relay_parent_mode: ProspectiveParachainsMode, + ) -> bool { + let seconded_limit = + if let ProspectiveParachainsMode::Enabled { max_candidate_depth, .. } = + relay_parent_mode + { + max_candidate_depth + 1 + } else { + 1 + }; + self.seconded_count >= seconded_limit } } diff --git a/polkadot/node/network/collator-protocol/src/validator_side/mod.rs b/polkadot/node/network/collator-protocol/src/validator_side/mod.rs index 5f5effcde9a8..86358f503d04 100644 --- a/polkadot/node/network/collator-protocol/src/validator_side/mod.rs +++ b/polkadot/node/network/collator-protocol/src/validator_side/mod.rs @@ -49,25 +49,22 @@ use polkadot_node_subsystem::{ use polkadot_node_subsystem_util::{ backing_implicit_view::View as ImplicitView, reputation::{ReputationAggregator, REPUTATION_CHANGE_INTERVAL}, - request_async_backing_params, request_claim_queue, request_session_index_for_child, - runtime::{recv_runtime, request_node_features}, + request_claim_queue, request_session_index_for_child, + runtime::{prospective_parachains_mode, request_node_features, ProspectiveParachainsMode}, }; use polkadot_primitives::{ node_features, - vstaging::{CandidateDescriptorV2, CandidateDescriptorVersion}, - AsyncBackingParams, CandidateHash, CollatorId, CoreIndex, Hash, HeadData, Id as ParaId, - OccupiedCoreAssumption, PersistedValidationData, SessionIndex, + vstaging::{CandidateDescriptorV2, CandidateDescriptorVersion, CoreState}, + CandidateHash, CollatorId, CoreIndex, Hash, HeadData, Id as ParaId, OccupiedCoreAssumption, + PersistedValidationData, SessionIndex, }; use crate::error::{Error, FetchError, Result, SecondingError}; use self::collation::BlockedCollationId; -use self::claim_queue_state::ClaimQueueState; - use super::{modify_reputation, tick_stream, LOG_TARGET}; -mod claim_queue_state; mod collation; mod metrics; @@ -166,19 +163,27 @@ impl PeerData { fn update_view( &mut self, implicit_view: &ImplicitView, - active_leaves: &HashMap, + active_leaves: &HashMap, + per_relay_parent: &HashMap, new_view: View, ) { let old_view = std::mem::replace(&mut self.view, new_view); if let PeerState::Collating(ref mut peer_state) = self.state { for removed in old_view.difference(&self.view) { - // Remove relay parent advertisements if it went out of our (implicit) view. - let keep = is_relay_parent_in_implicit_view( - removed, - implicit_view, - active_leaves, - peer_state.para_id, - ); + // Remove relay parent advertisements if it went out + // of our (implicit) view. + let keep = per_relay_parent + .get(removed) + .map(|s| { + is_relay_parent_in_implicit_view( + removed, + s.prospective_parachains_mode, + implicit_view, + active_leaves, + peer_state.para_id, + ) + }) + .unwrap_or(false); if !keep { peer_state.advertisements.remove(&removed); @@ -191,7 +196,8 @@ impl PeerData { fn prune_old_advertisements( &mut self, implicit_view: &ImplicitView, - active_leaves: &HashMap, + active_leaves: &HashMap, + per_relay_parent: &HashMap, ) { if let PeerState::Collating(ref mut peer_state) = self.state { peer_state.advertisements.retain(|hash, _| { @@ -199,30 +205,36 @@ impl PeerData { // - Relay parent is an active leaf // - It belongs to allowed ancestry under some leaf // Discard otherwise. - is_relay_parent_in_implicit_view( - hash, - implicit_view, - active_leaves, - peer_state.para_id, - ) + per_relay_parent.get(hash).map_or(false, |s| { + is_relay_parent_in_implicit_view( + hash, + s.prospective_parachains_mode, + implicit_view, + active_leaves, + peer_state.para_id, + ) + }) }); } } - /// Performs sanity check for an advertisement and notes it as advertised. + /// Note an advertisement by the collator. Returns `true` if the advertisement was imported + /// successfully. Fails if the advertisement is duplicate, out of view, or the peer has not + /// declared itself a collator. fn insert_advertisement( &mut self, on_relay_parent: Hash, + relay_parent_mode: ProspectiveParachainsMode, candidate_hash: Option, implicit_view: &ImplicitView, - active_leaves: &HashMap, - per_relay_parent: &PerRelayParent, + active_leaves: &HashMap, ) -> std::result::Result<(CollatorId, ParaId), InsertAdvertisementError> { match self.state { PeerState::Connected(_) => Err(InsertAdvertisementError::UndeclaredCollator), PeerState::Collating(ref mut state) => { if !is_relay_parent_in_implicit_view( &on_relay_parent, + relay_parent_mode, implicit_view, active_leaves, state.para_id, @@ -230,41 +242,53 @@ impl PeerData { return Err(InsertAdvertisementError::OutOfOurView) } - if let Some(candidate_hash) = candidate_hash { - if state - .advertisements - .get(&on_relay_parent) - .map_or(false, |candidates| candidates.contains(&candidate_hash)) - { - return Err(InsertAdvertisementError::Duplicate) - } - - let candidates = state.advertisements.entry(on_relay_parent).or_default(); - - // Current assignments is equal to the length of the claim queue. No honest - // collator should send that many advertisements. - if candidates.len() > per_relay_parent.assignment.current.len() { - return Err(InsertAdvertisementError::PeerLimitReached) - } - - candidates.insert(candidate_hash); - } else { - if self.version != CollationVersion::V1 { - gum::error!( - target: LOG_TARGET, - "Programming error, `candidate_hash` can not be `None` \ - for non `V1` networking.", - ); - } - - if state.advertisements.contains_key(&on_relay_parent) { - return Err(InsertAdvertisementError::Duplicate) - } - - state - .advertisements - .insert(on_relay_parent, HashSet::from_iter(candidate_hash)); - }; + match (relay_parent_mode, candidate_hash) { + (ProspectiveParachainsMode::Disabled, candidate_hash) => { + if state.advertisements.contains_key(&on_relay_parent) { + return Err(InsertAdvertisementError::Duplicate) + } + state + .advertisements + .insert(on_relay_parent, HashSet::from_iter(candidate_hash)); + }, + ( + ProspectiveParachainsMode::Enabled { max_candidate_depth, .. }, + candidate_hash, + ) => { + if let Some(candidate_hash) = candidate_hash { + if state + .advertisements + .get(&on_relay_parent) + .map_or(false, |candidates| candidates.contains(&candidate_hash)) + { + return Err(InsertAdvertisementError::Duplicate) + } + + let candidates = + state.advertisements.entry(on_relay_parent).or_default(); + + if candidates.len() > max_candidate_depth { + return Err(InsertAdvertisementError::PeerLimitReached) + } + candidates.insert(candidate_hash); + } else { + if self.version != CollationVersion::V1 { + gum::error!( + target: LOG_TARGET, + "Programming error, `candidate_hash` can not be `None` \ + for non `V1` networking.", + ); + } + + if state.advertisements.contains_key(&on_relay_parent) { + return Err(InsertAdvertisementError::Duplicate) + } + state + .advertisements + .insert(on_relay_parent, HashSet::from_iter(candidate_hash)); + }; + }, + } state.last_active = Instant::now(); Ok((state.collator_id.clone(), state.para_id)) @@ -345,6 +369,7 @@ struct GroupAssignments { } struct PerRelayParent { + prospective_parachains_mode: ProspectiveParachainsMode, assignment: GroupAssignments, collations: Collations, v2_receipts: bool, @@ -365,10 +390,11 @@ struct State { /// ancestry of some active leaf, then it does support prospective parachains. implicit_view: ImplicitView, - /// All active leaves observed by us. This mapping works as a replacement for + /// All active leaves observed by us, including both that do and do not + /// support prospective parachains. This mapping works as a replacement for /// [`polkadot_node_network_protocol::View`] and can be dropped once the transition /// to asynchronous backing is done. - active_leaves: HashMap, + active_leaves: HashMap, /// State tracked per relay parent. per_relay_parent: HashMap, @@ -411,69 +437,23 @@ struct State { reputation: ReputationAggregator, } -impl State { - // Returns the number of seconded and pending collations for a specific `ParaId`. Pending - // collations are: - // 1. Collations being fetched from a collator. - // 2. Collations waiting for validation from backing subsystem. - // 3. Collations blocked from seconding due to parent not being known by backing subsystem. - fn seconded_and_pending_for_para(&self, relay_parent: &Hash, para_id: &ParaId) -> usize { - let seconded = self - .per_relay_parent - .get(relay_parent) - .map_or(0, |per_relay_parent| per_relay_parent.collations.seconded_for_para(para_id)); - - let pending_fetch = self.per_relay_parent.get(relay_parent).map_or(0, |rp_state| { - match rp_state.collations.status { - CollationStatus::Fetching(pending_para_id) if pending_para_id == *para_id => 1, - _ => 0, - } - }); - - let waiting_for_validation = self - .fetched_candidates - .keys() - .filter(|fc| fc.relay_parent == *relay_parent && fc.para_id == *para_id) - .count(); - - let blocked_from_seconding = - self.blocked_from_seconding.values().fold(0, |acc, blocked_collations| { - acc + blocked_collations - .iter() - .filter(|pc| { - pc.candidate_receipt.descriptor.para_id() == *para_id && - pc.candidate_receipt.descriptor.relay_parent() == *relay_parent - }) - .count() - }); - - gum::trace!( - target: LOG_TARGET, - ?relay_parent, - ?para_id, - seconded, - pending_fetch, - waiting_for_validation, - blocked_from_seconding, - "Seconded and pending collations for para", - ); - - seconded + pending_fetch + waiting_for_validation + blocked_from_seconding - } -} - fn is_relay_parent_in_implicit_view( relay_parent: &Hash, + relay_parent_mode: ProspectiveParachainsMode, implicit_view: &ImplicitView, - active_leaves: &HashMap, + active_leaves: &HashMap, para_id: ParaId, ) -> bool { - active_leaves.iter().any(|(hash, _)| { - implicit_view - .known_allowed_relay_parents_under(hash, Some(para_id)) - .unwrap_or_default() - .contains(relay_parent) - }) + match relay_parent_mode { + ProspectiveParachainsMode::Disabled => active_leaves.contains_key(relay_parent), + ProspectiveParachainsMode::Enabled { .. } => active_leaves.iter().any(|(hash, mode)| { + mode.is_enabled() && + implicit_view + .known_allowed_relay_parents_under(hash, Some(para_id)) + .unwrap_or_default() + .contains(relay_parent) + }), + } } async fn construct_per_relay_parent( @@ -481,6 +461,7 @@ async fn construct_per_relay_parent( current_assignments: &mut HashMap, keystore: &KeystorePtr, relay_parent: Hash, + relay_parent_mode: ProspectiveParachainsMode, v2_receipts: bool, session_index: SessionIndex, ) -> Result> @@ -498,24 +479,39 @@ where .await .map_err(Error::CancelledValidatorGroups)??; + let cores = polkadot_node_subsystem_util::request_availability_cores(relay_parent, sender) + .await + .await + .map_err(Error::CancelledAvailabilityCores)??; + let core_now = if let Some(group) = polkadot_node_subsystem_util::signing_key_and_index(&validators, keystore).and_then( |(_, index)| polkadot_node_subsystem_util::find_validator_group(&groups, index), ) { - rotation_info.core_for_group(group, groups.len()) + rotation_info.core_for_group(group, cores.len()) } else { gum::trace!(target: LOG_TARGET, ?relay_parent, "Not a validator"); return Ok(None) }; - let mut claim_queue = request_claim_queue(relay_parent, sender) + let claim_queue = request_claim_queue(relay_parent, sender) .await .await .map_err(Error::CancelledClaimQueue)??; - let assigned_paras = claim_queue.remove(&core_now).unwrap_or_else(|| VecDeque::new()); - - for para_id in assigned_paras.iter() { + let paras_now = cores + .get(core_now.0 as usize) + .and_then(|c| match (c, relay_parent_mode) { + (CoreState::Occupied(_), ProspectiveParachainsMode::Disabled) => None, + ( + CoreState::Occupied(_), + ProspectiveParachainsMode::Enabled { max_candidate_depth: 0, .. }, + ) => None, + _ => claim_queue.get(&core_now).cloned(), + }) + .unwrap_or_else(|| VecDeque::new()); + + for para_id in paras_now.iter() { let entry = current_assignments.entry(*para_id).or_default(); *entry += 1; if *entry == 1 { @@ -528,12 +524,10 @@ where } } - let assignment = GroupAssignments { current: assigned_paras.into_iter().collect() }; - let collations = Collations::new(&assignment.current); - Ok(Some(PerRelayParent { - assignment, - collations, + prospective_parachains_mode: relay_parent_mode, + assignment: GroupAssignments { current: paras_now.into_iter().collect() }, + collations: Collations::default(), v2_receipts, session_index, current_core: core_now, @@ -661,7 +655,12 @@ fn handle_peer_view_change(state: &mut State, peer_id: PeerId, view: View) { None => return, }; - peer_data.update_view(&state.implicit_view, &state.active_leaves, view); + peer_data.update_view( + &state.implicit_view, + &state.active_leaves, + &state.per_relay_parent, + view, + ); state.collation_requests_cancel_handles.retain(|pc, handle| { let keep = pc.peer_id != peer_id || peer_data.has_advertised(&pc.relay_parent, None); if !keep { @@ -694,6 +693,7 @@ async fn request_collation( .get_mut(&relay_parent) .ok_or(FetchError::RelayParentOutOfView)?; + // Relay parent mode is checked in `handle_advertisement`. let (requests, response_recv) = match (peer_protocol_version, prospective_candidate) { (CollationVersion::V1, _) => { let (req, response_recv) = OutgoingRequest::new( @@ -739,7 +739,7 @@ async fn request_collation( let maybe_candidate_hash = prospective_candidate.as_ref().map(ProspectiveCandidate::candidate_hash); - per_relay_parent.collations.status = CollationStatus::Fetching(para_id); + per_relay_parent.collations.status = CollationStatus::Fetching; per_relay_parent .collations .fetching_from @@ -1050,62 +1050,6 @@ async fn second_unblocked_collations( } } -fn ensure_seconding_limit_is_respected( - relay_parent: &Hash, - para_id: ParaId, - state: &State, -) -> std::result::Result<(), AdvertisementError> { - let paths = state.implicit_view.paths_via_relay_parent(relay_parent); - - gum::trace!( - target: LOG_TARGET, - ?relay_parent, - ?para_id, - ?paths, - "Checking seconding limit", - ); - - let mut has_claim_at_some_path = false; - for path in paths { - let mut cq_state = ClaimQueueState::new(); - for ancestor in &path { - let seconded_and_pending = state.seconded_and_pending_for_para(&ancestor, ¶_id); - cq_state.add_leaf( - &ancestor, - &state - .per_relay_parent - .get(ancestor) - .ok_or(AdvertisementError::RelayParentUnknown)? - .assignment - .current, - ); - for _ in 0..seconded_and_pending { - cq_state.claim_at(ancestor, ¶_id); - } - } - - if cq_state.can_claim_at(relay_parent, ¶_id) { - gum::trace!( - target: LOG_TARGET, - ?relay_parent, - ?para_id, - ?path, - "Seconding limit respected at path", - ); - has_claim_at_some_path = true; - break - } - } - - // If there is a place in the claim queue for the candidate at at least one path we will accept - // it. - if has_claim_at_some_path { - Ok(()) - } else { - Err(AdvertisementError::SecondedLimitReached) - } -} - async fn handle_advertisement( sender: &mut Sender, state: &mut State, @@ -1128,6 +1072,7 @@ where .get(&relay_parent) .ok_or(AdvertisementError::RelayParentUnknown)?; + let relay_parent_mode = per_relay_parent.prospective_parachains_mode; let assignment = &per_relay_parent.assignment; let collator_para_id = @@ -1143,29 +1088,32 @@ where let (collator_id, para_id) = peer_data .insert_advertisement( relay_parent, + relay_parent_mode, candidate_hash, &state.implicit_view, &state.active_leaves, - &per_relay_parent, ) .map_err(AdvertisementError::Invalid)?; - ensure_seconding_limit_is_respected(&relay_parent, para_id, state)?; + if per_relay_parent.collations.is_seconded_limit_reached(relay_parent_mode) { + return Err(AdvertisementError::SecondedLimitReached) + } if let Some((candidate_hash, parent_head_data_hash)) = prospective_candidate { // Check if backing subsystem allows to second this candidate. // // This is also only important when async backing or elastic scaling is enabled. - let can_second = can_second( - sender, - collator_para_id, - relay_parent, - candidate_hash, - parent_head_data_hash, - ) - .await; + let seconding_not_allowed = relay_parent_mode.is_enabled() && + !can_second( + sender, + collator_para_id, + relay_parent, + candidate_hash, + parent_head_data_hash, + ) + .await; - if !can_second { + if seconding_not_allowed { return Err(AdvertisementError::BlockedByBacking) } } @@ -1195,8 +1143,8 @@ where Ok(()) } -/// Enqueue collation for fetching. The advertisement is expected to be validated and the seconding -/// limit checked. +/// Enqueue collation for fetching. The advertisement is expected to be +/// validated. async fn enqueue_collation( sender: &mut Sender, state: &mut State, @@ -1231,6 +1179,7 @@ where return Ok(()) }, }; + let relay_parent_mode = per_relay_parent.prospective_parachains_mode; let prospective_candidate = prospective_candidate.map(|(candidate_hash, parent_head_data_hash)| ProspectiveCandidate { candidate_hash, @@ -1238,11 +1187,22 @@ where }); let collations = &mut per_relay_parent.collations; + if collations.is_seconded_limit_reached(relay_parent_mode) { + gum::trace!( + target: LOG_TARGET, + peer_id = ?peer_id, + %para_id, + ?relay_parent, + "Limit of seconded collations reached for valid advertisement", + ); + return Ok(()) + } + let pending_collation = PendingCollation::new(relay_parent, para_id, &peer_id, prospective_candidate); match collations.status { - CollationStatus::Fetching(_) | CollationStatus::WaitingOnValidation => { + CollationStatus::Fetching | CollationStatus::WaitingOnValidation => { gum::trace!( target: LOG_TARGET, peer_id = ?peer_id, @@ -1250,13 +1210,26 @@ where ?relay_parent, "Added collation to the pending list" ); - collations.add_to_waiting_queue((pending_collation, collator_id)); + collations.waiting_queue.push_back((pending_collation, collator_id)); }, CollationStatus::Waiting => { - // We were waiting for a collation to be advertised to us (we were idle) so we can fetch - // the new collation immediately fetch_collation(sender, state, pending_collation, collator_id).await?; }, + CollationStatus::Seconded if relay_parent_mode.is_enabled() => { + // Limit is not reached, it's allowed to second another + // collation. + fetch_collation(sender, state, pending_collation, collator_id).await?; + }, + CollationStatus::Seconded => { + gum::trace!( + target: LOG_TARGET, + peer_id = ?peer_id, + %para_id, + ?relay_parent, + ?relay_parent_mode, + "A collation has already been seconded", + ); + }, } Ok(()) @@ -1282,10 +1255,7 @@ where .await .await .map_err(Error::CancelledSessionIndex)??; - - let async_backing_params = - recv_runtime(request_async_backing_params(*leaf, sender).await).await?; - + let mode = prospective_parachains_mode(sender, *leaf).await?; let v2_receipts = request_node_features(*leaf, session_index, sender) .await? .unwrap_or_default() @@ -1298,6 +1268,7 @@ where &mut state.current_assignments, keystore, *leaf, + mode, v2_receipts, session_index, ) @@ -1306,53 +1277,53 @@ where continue }; - state.active_leaves.insert(*leaf, async_backing_params); + state.active_leaves.insert(*leaf, mode); state.per_relay_parent.insert(*leaf, per_relay_parent); - state - .implicit_view - .activate_leaf(sender, *leaf) - .await - .map_err(Error::ImplicitViewFetchError)?; - - // Order is always descending. - let allowed_ancestry = state - .implicit_view - .known_allowed_relay_parents_under(leaf, None) - .unwrap_or_default(); - for block_hash in allowed_ancestry { - if let Entry::Vacant(entry) = state.per_relay_parent.entry(*block_hash) { - // Safe to use the same v2 receipts config for the allowed relay parents as well - // as the same session index since they must be in the same session. - if let Some(per_relay_parent) = construct_per_relay_parent( - sender, - &mut state.current_assignments, - keystore, - *block_hash, - v2_receipts, - session_index, - ) - .await? - { - entry.insert(per_relay_parent); + if mode.is_enabled() { + state + .implicit_view + .activate_leaf(sender, *leaf) + .await + .map_err(Error::ImplicitViewFetchError)?; + + // Order is always descending. + let allowed_ancestry = state + .implicit_view + .known_allowed_relay_parents_under(leaf, None) + .unwrap_or_default(); + for block_hash in allowed_ancestry { + if let Entry::Vacant(entry) = state.per_relay_parent.entry(*block_hash) { + // Safe to use the same v2 receipts config for the allowed relay parents as well + // as the same session index since they must be in the same session. + if let Some(per_relay_parent) = construct_per_relay_parent( + sender, + &mut state.current_assignments, + keystore, + *block_hash, + mode, + v2_receipts, + session_index, + ) + .await? + { + entry.insert(per_relay_parent); + } } } } } - for (removed, _) in removed { - gum::trace!( - target: LOG_TARGET, - ?view, - ?removed, - "handle_our_view_change - removed", - ); - + for (removed, mode) in removed { state.active_leaves.remove(removed); // If the leaf is deactivated it still may stay in the view as a part // of implicit ancestry. Only update the state after the hash is actually // pruned from the block info storage. - let pruned = state.implicit_view.deactivate_leaf(*removed); + let pruned = if mode.is_enabled() { + state.implicit_view.deactivate_leaf(*removed) + } else { + vec![*removed] + }; for removed in pruned { if let Some(per_relay_parent) = state.per_relay_parent.remove(&removed) { @@ -1382,7 +1353,11 @@ where }); for (peer_id, peer_data) in state.peer_data.iter_mut() { - peer_data.prune_old_advertisements(&state.implicit_view, &state.active_leaves); + peer_data.prune_old_advertisements( + &state.implicit_view, + &state.active_leaves, + &state.per_relay_parent, + ); // Disconnect peers who are not relevant to our current or next para. // @@ -1487,6 +1462,9 @@ async fn process_msg( "DistributeCollation message is not expected on the validator side of the protocol", ); }, + ReportCollator(id) => { + report_collator(&mut state.reputation, ctx.sender(), &state.peer_data, id).await; + }, NetworkBridgeUpdate(event) => { if let Err(e) = handle_network_msg(ctx, state, keystore, event).await { gum::warn!( @@ -1515,9 +1493,8 @@ async fn process_msg( if let Some(CollationEvent { collator_id, pending_collation, .. }) = state.fetched_candidates.remove(&fetched_collation) { - let PendingCollation { - relay_parent, peer_id, prospective_candidate, para_id, .. - } = pending_collation; + let PendingCollation { relay_parent, peer_id, prospective_candidate, .. } = + pending_collation; note_good_collation( &mut state.reputation, ctx.sender(), @@ -1537,7 +1514,8 @@ async fn process_msg( } if let Some(rp_state) = state.per_relay_parent.get_mut(&parent) { - rp_state.collations.note_seconded(para_id); + rp_state.collations.status = CollationStatus::Seconded; + rp_state.collations.note_seconded(); } // See if we've unblocked other collations for seconding. @@ -1666,7 +1644,6 @@ async fn run_inner( disconnect_inactive_peers(ctx.sender(), &eviction_policy, &state.peer_data).await; }, resp = state.collation_requests.select_next_some() => { - let relay_parent = resp.0.pending_collation.relay_parent; let res = match handle_collation_fetch_response( &mut state, resp, @@ -1675,17 +1652,9 @@ async fn run_inner( ).await { Err(Some((peer_id, rep))) => { modify_reputation(&mut state.reputation, ctx.sender(), peer_id, rep).await; - // Reset the status for the relay parent - state.per_relay_parent.get_mut(&relay_parent).map(|rp| { - rp.collations.status.back_to_waiting(); - }); continue }, Err(None) => { - // Reset the status for the relay parent - state.per_relay_parent.get_mut(&relay_parent).map(|rp| { - rp.collations.status.back_to_waiting(); - }); continue }, Ok(res) => res @@ -1764,7 +1733,11 @@ async fn dequeue_next_collation_and_fetch( // The collator we tried to fetch from last, optionally which candidate. previous_fetch: (CollatorId, Option), ) { - while let Some((next, id)) = get_next_collation_to_fetch(&previous_fetch, relay_parent, state) { + while let Some((next, id)) = state.per_relay_parent.get_mut(&relay_parent).and_then(|state| { + state + .collations + .get_next_collation_to_fetch(&previous_fetch, state.prospective_parachains_mode) + }) { gum::debug!( target: LOG_TARGET, ?relay_parent, @@ -1873,7 +1846,9 @@ async fn kick_off_seconding( collation_event.collator_protocol_version, collation_event.pending_collation.prospective_candidate, ) { - (CollationVersion::V2, Some(ProspectiveCandidate { parent_head_data_hash, .. })) => { + (CollationVersion::V2, Some(ProspectiveCandidate { parent_head_data_hash, .. })) + if per_relay_parent.prospective_parachains_mode.is_enabled() => + { let pvd = request_prospective_validation_data( ctx.sender(), relay_parent, @@ -1885,7 +1860,8 @@ async fn kick_off_seconding( (pvd, maybe_parent_head_data, Some(parent_head_data_hash)) }, - (CollationVersion::V1, _) => { + // Support V2 collators without async backing enabled. + (CollationVersion::V2, Some(_)) | (CollationVersion::V1, _) => { let pvd = request_persisted_validation_data( ctx.sender(), candidate_receipt.descriptor().relay_parent(), @@ -2134,106 +2110,6 @@ async fn handle_collation_fetch_response( result } -// Returns the claim queue without fetched or pending advertisement. The resulting `Vec` keeps the -// order in the claim queue so the earlier an element is located in the `Vec` the higher its -// priority is. -fn unfulfilled_claim_queue_entries(relay_parent: &Hash, state: &State) -> Result> { - let relay_parent_state = state - .per_relay_parent - .get(relay_parent) - .ok_or(Error::RelayParentStateNotFound)?; - let scheduled_paras = relay_parent_state.assignment.current.iter().collect::>(); - let paths = state.implicit_view.paths_via_relay_parent(relay_parent); - - let mut claim_queue_states = Vec::new(); - for path in paths { - let mut cq_state = ClaimQueueState::new(); - for ancestor in &path { - cq_state.add_leaf( - &ancestor, - &state - .per_relay_parent - .get(&ancestor) - .ok_or(Error::RelayParentStateNotFound)? - .assignment - .current, - ); - - for para_id in &scheduled_paras { - let seconded_and_pending = state.seconded_and_pending_for_para(&ancestor, ¶_id); - for _ in 0..seconded_and_pending { - cq_state.claim_at(&ancestor, ¶_id); - } - } - } - claim_queue_states.push(cq_state); - } - - // From the claim queue state for each leaf we have to return a combined single one. Go for a - // simple solution and return the longest one. In theory we always prefer the earliest entries - // in the claim queue so there is a good chance that the longest path is the one with - // unsatisfied entries in the beginning. This is not guaranteed as we might have fetched 2nd or - // 3rd spot from the claim queue but it should be good enough. - let unfulfilled_entries = claim_queue_states - .iter_mut() - .map(|cq| cq.unclaimed_at(relay_parent)) - .max_by(|a, b| a.len().cmp(&b.len())) - .unwrap_or_default(); - - Ok(unfulfilled_entries) -} - -/// Returns the next collation to fetch from the `waiting_queue` and reset the status back to -/// `Waiting`. -fn get_next_collation_to_fetch( - finished_one: &(CollatorId, Option), - relay_parent: Hash, - state: &mut State, -) -> Option<(PendingCollation, CollatorId)> { - let unfulfilled_entries = match unfulfilled_claim_queue_entries(&relay_parent, &state) { - Ok(entries) => entries, - Err(err) => { - gum::error!( - target: LOG_TARGET, - ?relay_parent, - ?err, - "Failed to get unfulfilled claim queue entries" - ); - return None - }, - }; - let rp_state = match state.per_relay_parent.get_mut(&relay_parent) { - Some(rp_state) => rp_state, - None => { - gum::error!( - target: LOG_TARGET, - ?relay_parent, - "Failed to get relay parent state" - ); - return None - }, - }; - - // If finished one does not match waiting_collation, then we already dequeued another fetch - // to replace it. - if let Some((collator_id, maybe_candidate_hash)) = rp_state.collations.fetching_from.as_ref() { - // If a candidate hash was saved previously, `finished_one` must include this too. - if collator_id != &finished_one.0 && - maybe_candidate_hash.map_or(true, |hash| Some(&hash) != finished_one.1.as_ref()) - { - gum::trace!( - target: LOG_TARGET, - waiting_collation = ?rp_state.collations.fetching_from, - ?finished_one, - "Not proceeding to the next collation - has already been done." - ); - return None - } - } - rp_state.collations.status.back_to_waiting(); - rp_state.collations.pick_a_collation_to_fetch(unfulfilled_entries) -} - // Sanity check the candidate descriptor version. fn descriptor_version_sanity_check( descriptor: &CandidateDescriptorV2, diff --git a/polkadot/node/network/collator-protocol/src/validator_side/tests/mod.rs b/polkadot/node/network/collator-protocol/src/validator_side/tests/mod.rs index 5a2e135419dd..7bc61dd4ebec 100644 --- a/polkadot/node/network/collator-protocol/src/validator_side/tests/mod.rs +++ b/polkadot/node/network/collator-protocol/src/validator_side/tests/mod.rs @@ -28,24 +28,28 @@ use std::{ time::Duration, }; -use self::prospective_parachains::update_view; use polkadot_node_network_protocol::{ + our_view, peer_set::CollationVersion, request_response::{Requests, ResponseSender}, ObservedRole, }; use polkadot_node_primitives::{BlockData, PoV}; -use polkadot_node_subsystem::messages::{ - AllMessages, ReportPeerMessage, RuntimeApiMessage, RuntimeApiRequest, +use polkadot_node_subsystem::{ + errors::RuntimeApiError, + messages::{AllMessages, ReportPeerMessage, RuntimeApiMessage, RuntimeApiRequest}, }; use polkadot_node_subsystem_test_helpers as test_helpers; use polkadot_node_subsystem_util::{reputation::add_reputation, TimeoutExt}; use polkadot_primitives::{ - node_features, vstaging::CandidateReceiptV2 as CandidateReceipt, AsyncBackingParams, - CollatorPair, CoreIndex, GroupRotationInfo, HeadData, NodeFeatures, PersistedValidationData, - ValidatorId, ValidatorIndex, + node_features, + vstaging::{CandidateReceiptV2 as CandidateReceipt, CoreState, OccupiedCore}, + CollatorPair, CoreIndex, GroupIndex, GroupRotationInfo, HeadData, NodeFeatures, + PersistedValidationData, ScheduledCore, ValidatorId, ValidatorIndex, +}; +use polkadot_primitives_test_helpers::{ + dummy_candidate_descriptor, dummy_candidate_receipt_bad_sig, dummy_hash, }; -use polkadot_primitives_test_helpers::{dummy_candidate_receipt_bad_sig, dummy_hash}; mod prospective_parachains; @@ -53,6 +57,9 @@ const ACTIVITY_TIMEOUT: Duration = Duration::from_millis(500); const DECLARE_TIMEOUT: Duration = Duration::from_millis(25); const REPUTATION_CHANGE_TEST_INTERVAL: Duration = Duration::from_millis(10); +const ASYNC_BACKING_DISABLED_ERROR: RuntimeApiError = + RuntimeApiError::NotSupported { runtime_api_name: "test-runtime" }; + fn dummy_pvd() -> PersistedValidationData { PersistedValidationData { parent_head: HeadData(vec![7, 8, 9]), @@ -70,17 +77,19 @@ struct TestState { validator_public: Vec, validator_groups: Vec>, group_rotation_info: GroupRotationInfo, + cores: Vec, claim_queue: BTreeMap>, - async_backing_params: AsyncBackingParams, node_features: NodeFeatures, session_index: SessionIndex, - // Used by `update_view` to keep track of latest requested ancestor - last_known_block: Option, } impl Default for TestState { fn default() -> Self { - let relay_parent = Hash::from_low_u64_be(0x05); + let chain_a = ParaId::from(1); + let chain_b = ParaId::from(2); + + let chain_ids = vec![chain_a, chain_b]; + let relay_parent = Hash::repeat_byte(0x05); let collators = iter::repeat(()).map(|_| CollatorPair::generate().0).take(5).collect(); let validators = vec![ @@ -101,103 +110,50 @@ impl Default for TestState { let group_rotation_info = GroupRotationInfo { session_start_block: 0, group_rotation_frequency: 1, now: 0 }; + let cores = vec![ + CoreState::Scheduled(ScheduledCore { para_id: chain_ids[0], collator: None }), + CoreState::Free, + CoreState::Occupied(OccupiedCore { + next_up_on_available: Some(ScheduledCore { para_id: chain_ids[1], collator: None }), + occupied_since: 0, + time_out_at: 1, + next_up_on_time_out: None, + availability: Default::default(), + group_responsible: GroupIndex(0), + candidate_hash: Default::default(), + candidate_descriptor: { + let mut d = dummy_candidate_descriptor(dummy_hash()); + d.para_id = chain_ids[1]; + + d.into() + }, + }), + ]; + let mut claim_queue = BTreeMap::new(); - claim_queue.insert( - CoreIndex(0), - iter::repeat(ParaId::from(Self::CHAIN_IDS[0])) - .take(Self::ASYNC_BACKING_PARAMS.allowed_ancestry_len as usize) - .collect(), - ); + claim_queue.insert(CoreIndex(0), [chain_ids[0]].into_iter().collect()); claim_queue.insert(CoreIndex(1), VecDeque::new()); - claim_queue.insert( - CoreIndex(2), - iter::repeat(ParaId::from(Self::CHAIN_IDS[1])) - .take(Self::ASYNC_BACKING_PARAMS.allowed_ancestry_len as usize) - .collect(), - ); + claim_queue.insert(CoreIndex(2), [chain_ids[1]].into_iter().collect()); let mut node_features = NodeFeatures::EMPTY; node_features.resize(node_features::FeatureIndex::CandidateReceiptV2 as usize + 1, false); node_features.set(node_features::FeatureIndex::CandidateReceiptV2 as u8 as usize, true); Self { - chain_ids: Self::CHAIN_IDS.map(|id| ParaId::from(id)).to_vec(), + chain_ids, relay_parent, collators, validator_public, validator_groups, group_rotation_info, + cores, claim_queue, - async_backing_params: Self::ASYNC_BACKING_PARAMS, node_features, session_index: 1, - last_known_block: None, } } } -impl TestState { - const CHAIN_IDS: [u32; 2] = [1, 2]; - const ASYNC_BACKING_PARAMS: AsyncBackingParams = - AsyncBackingParams { max_candidate_depth: 4, allowed_ancestry_len: 3 }; - - fn with_shared_core() -> Self { - let mut state = Self::default(); - - let mut claim_queue = BTreeMap::new(); - claim_queue.insert( - CoreIndex(0), - VecDeque::from_iter( - [ - ParaId::from(Self::CHAIN_IDS[1]), - ParaId::from(Self::CHAIN_IDS[0]), - ParaId::from(Self::CHAIN_IDS[0]), - ] - .into_iter(), - ), - ); - state.validator_groups.truncate(1); - - assert!( - claim_queue.get(&CoreIndex(0)).unwrap().len() == - Self::ASYNC_BACKING_PARAMS.allowed_ancestry_len as usize - ); - - state.claim_queue = claim_queue; - - state - } - - fn with_one_scheduled_para() -> Self { - let mut state = Self::default(); - - let validator_groups = vec![vec![ValidatorIndex(0), ValidatorIndex(1)]]; - - let mut claim_queue = BTreeMap::new(); - claim_queue.insert( - CoreIndex(0), - VecDeque::from_iter( - [ - ParaId::from(Self::CHAIN_IDS[0]), - ParaId::from(Self::CHAIN_IDS[0]), - ParaId::from(Self::CHAIN_IDS[0]), - ] - .into_iter(), - ), - ); - - assert!( - claim_queue.get(&CoreIndex(0)).unwrap().len() == - Self::ASYNC_BACKING_PARAMS.allowed_ancestry_len as usize - ); - - state.validator_groups = validator_groups; - state.claim_queue = claim_queue; - - state - } -} - type VirtualOverseer = polkadot_node_subsystem_test_helpers::TestSubsystemContextHandle; @@ -290,6 +246,91 @@ async fn overseer_signal(overseer: &mut VirtualOverseer, signal: OverseerSignal) .expect(&format!("{:?} is more than enough for sending signals.", TIMEOUT)); } +async fn respond_to_runtime_api_queries( + virtual_overseer: &mut VirtualOverseer, + test_state: &TestState, + hash: Hash, +) { + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + rp, + RuntimeApiRequest::SessionIndexForChild(tx) + )) => { + assert_eq!(rp, hash); + tx.send(Ok(test_state.session_index)).unwrap(); + } + ); + + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + rp, + RuntimeApiRequest::AsyncBackingParams(tx) + )) => { + assert_eq!(rp, hash); + tx.send(Err(ASYNC_BACKING_DISABLED_ERROR)).unwrap(); + } + ); + + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + rp, + RuntimeApiRequest::NodeFeatures(_, tx) + )) => { + assert_eq!(rp, hash); + tx.send(Ok(test_state.node_features.clone())).unwrap(); + } + ); + + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _, + RuntimeApiRequest::Validators(tx), + )) => { + let _ = tx.send(Ok(test_state.validator_public.clone())); + } + ); + + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + rp, + RuntimeApiRequest::ValidatorGroups(tx), + )) => { + assert_eq!(rp, hash); + let _ = tx.send(Ok(( + test_state.validator_groups.clone(), + test_state.group_rotation_info.clone(), + ))); + } + ); + + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + rp, + RuntimeApiRequest::AvailabilityCores(tx), + )) => { + assert_eq!(rp, hash); + let _ = tx.send(Ok(test_state.cores.clone())); + } + ); + + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + rp, + RuntimeApiRequest::ClaimQueue(tx), + )) => { + assert_eq!(rp, hash); + let _ = tx.send(Ok(test_state.claim_queue.clone())); + } + ); +} + /// Assert that the next message is a `CandidateBacking(Second())`. async fn assert_candidate_backing_second( virtual_overseer: &mut VirtualOverseer, @@ -465,6 +506,198 @@ async fn advertise_collation( .await; } +// As we receive a relevant advertisement act on it and issue a collation request. +#[test] +fn act_on_advertisement() { + let test_state = TestState::default(); + + test_harness(ReputationAggregator::new(|_| true), |test_harness| async move { + let TestHarness { mut virtual_overseer, .. } = test_harness; + + let pair = CollatorPair::generate().0; + gum::trace!("activating"); + + overseer_send( + &mut virtual_overseer, + CollatorProtocolMessage::NetworkBridgeUpdate(NetworkBridgeEvent::OurViewChange( + our_view![test_state.relay_parent], + )), + ) + .await; + + respond_to_runtime_api_queries(&mut virtual_overseer, &test_state, test_state.relay_parent) + .await; + + let peer_b = PeerId::random(); + + connect_and_declare_collator( + &mut virtual_overseer, + peer_b, + pair.clone(), + test_state.chain_ids[0], + CollationVersion::V1, + ) + .await; + + advertise_collation(&mut virtual_overseer, peer_b, test_state.relay_parent, None).await; + + assert_fetch_collation_request( + &mut virtual_overseer, + test_state.relay_parent, + test_state.chain_ids[0], + None, + ) + .await; + + virtual_overseer + }); +} + +/// Tests that validator side works with v2 network protocol +/// before async backing is enabled. +#[test] +fn act_on_advertisement_v2() { + let test_state = TestState::default(); + + test_harness(ReputationAggregator::new(|_| true), |test_harness| async move { + let TestHarness { mut virtual_overseer, .. } = test_harness; + + let pair = CollatorPair::generate().0; + gum::trace!("activating"); + + overseer_send( + &mut virtual_overseer, + CollatorProtocolMessage::NetworkBridgeUpdate(NetworkBridgeEvent::OurViewChange( + our_view![test_state.relay_parent], + )), + ) + .await; + + respond_to_runtime_api_queries(&mut virtual_overseer, &test_state, test_state.relay_parent) + .await; + + let peer_b = PeerId::random(); + + connect_and_declare_collator( + &mut virtual_overseer, + peer_b, + pair.clone(), + test_state.chain_ids[0], + CollationVersion::V2, + ) + .await; + + let pov = PoV { block_data: BlockData(vec![]) }; + let mut candidate_a = + dummy_candidate_receipt_bad_sig(dummy_hash(), Some(Default::default())); + candidate_a.descriptor.para_id = test_state.chain_ids[0]; + candidate_a.descriptor.relay_parent = test_state.relay_parent; + candidate_a.descriptor.persisted_validation_data_hash = dummy_pvd().hash(); + + let candidate_hash = candidate_a.hash(); + let parent_head_data_hash = Hash::zero(); + // v2 advertisement. + advertise_collation( + &mut virtual_overseer, + peer_b, + test_state.relay_parent, + Some((candidate_hash, parent_head_data_hash)), + ) + .await; + + let response_channel = assert_fetch_collation_request( + &mut virtual_overseer, + test_state.relay_parent, + test_state.chain_ids[0], + Some(candidate_hash), + ) + .await; + + response_channel + .send(Ok(( + request_v1::CollationFetchingResponse::Collation( + candidate_a.clone().into(), + pov.clone(), + ) + .encode(), + ProtocolName::from(""), + ))) + .expect("Sending response should succeed"); + + assert_candidate_backing_second( + &mut virtual_overseer, + test_state.relay_parent, + test_state.chain_ids[0], + &pov, + // Async backing isn't enabled and thus it should do it the old way. + CollationVersion::V1, + ) + .await; + + virtual_overseer + }); +} + +// Test that other subsystems may modify collators' reputations. +#[test] +fn collator_reporting_works() { + let test_state = TestState::default(); + + test_harness(ReputationAggregator::new(|_| true), |test_harness| async move { + let TestHarness { mut virtual_overseer, .. } = test_harness; + + overseer_send( + &mut virtual_overseer, + CollatorProtocolMessage::NetworkBridgeUpdate(NetworkBridgeEvent::OurViewChange( + our_view![test_state.relay_parent], + )), + ) + .await; + + respond_to_runtime_api_queries(&mut virtual_overseer, &test_state, test_state.relay_parent) + .await; + + let peer_b = PeerId::random(); + let peer_c = PeerId::random(); + + connect_and_declare_collator( + &mut virtual_overseer, + peer_b, + test_state.collators[0].clone(), + test_state.chain_ids[0], + CollationVersion::V1, + ) + .await; + + connect_and_declare_collator( + &mut virtual_overseer, + peer_c, + test_state.collators[1].clone(), + test_state.chain_ids[0], + CollationVersion::V1, + ) + .await; + + overseer_send( + &mut virtual_overseer, + CollatorProtocolMessage::ReportCollator(test_state.collators[0].public()), + ) + .await; + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::NetworkBridgeTx( + NetworkBridgeTxMessage::ReportPeer(ReportPeerMessage::Single(peer, rep)), + ) => { + assert_eq!(peer, peer_b); + assert_eq!(rep.value, COST_REPORT_BAD.cost_or_benefit()); + } + ); + + virtual_overseer + }); +} + // Test that we verify the signatures on `Declare` and `AdvertiseCollation` messages. #[test] fn collator_authentication_verification_works() { @@ -514,18 +747,31 @@ fn collator_authentication_verification_works() { }); } -/// Tests that on a V1 Advertisement a validator fetches only one collation at any moment of time -/// per relay parent and ignores other V1 advertisements once a candidate gets seconded. +/// Tests that a validator fetches only one collation at any moment of time +/// per relay parent and ignores other advertisements once a candidate gets +/// seconded. #[test] -fn fetch_one_collation_at_a_time_for_v1_advertisement() { - let mut test_state = TestState::default(); +fn fetch_one_collation_at_a_time() { + let test_state = TestState::default(); test_harness(ReputationAggregator::new(|_| true), |test_harness| async move { let TestHarness { mut virtual_overseer, .. } = test_harness; - let second = Hash::from_low_u64_be(test_state.relay_parent.to_low_u64_be() - 1); - let relay_parent = test_state.relay_parent; - update_view(&mut virtual_overseer, &mut test_state, vec![(relay_parent, 0), (second, 1)]) - .await; + let second = Hash::random(); + + let our_view = our_view![test_state.relay_parent, second]; + + overseer_send( + &mut virtual_overseer, + CollatorProtocolMessage::NetworkBridgeUpdate(NetworkBridgeEvent::OurViewChange( + our_view.clone(), + )), + ) + .await; + + // Iter over view since the order may change due to sorted invariant. + for hash in our_view.iter() { + respond_to_runtime_api_queries(&mut virtual_overseer, &test_state, *hash).await; + } let peer_b = PeerId::random(); let peer_c = PeerId::random(); @@ -548,8 +794,8 @@ fn fetch_one_collation_at_a_time_for_v1_advertisement() { ) .await; - advertise_collation(&mut virtual_overseer, peer_b, relay_parent, None).await; - advertise_collation(&mut virtual_overseer, peer_c, relay_parent, None).await; + advertise_collation(&mut virtual_overseer, peer_b, test_state.relay_parent, None).await; + advertise_collation(&mut virtual_overseer, peer_c, test_state.relay_parent, None).await; let response_channel = assert_fetch_collation_request( &mut virtual_overseer, @@ -604,14 +850,26 @@ fn fetch_one_collation_at_a_time_for_v1_advertisement() { /// timeout and in case of an error. #[test] fn fetches_next_collation() { - let mut test_state = TestState::with_one_scheduled_para(); + let test_state = TestState::default(); test_harness(ReputationAggregator::new(|_| true), |test_harness| async move { let TestHarness { mut virtual_overseer, .. } = test_harness; - let first = test_state.relay_parent; let second = Hash::random(); - update_view(&mut virtual_overseer, &mut test_state, vec![(first, 0), (second, 1)]).await; + + let our_view = our_view![test_state.relay_parent, second]; + + overseer_send( + &mut virtual_overseer, + CollatorProtocolMessage::NetworkBridgeUpdate(NetworkBridgeEvent::OurViewChange( + our_view.clone(), + )), + ) + .await; + + for hash in our_view.iter() { + respond_to_runtime_api_queries(&mut virtual_overseer, &test_state, *hash).await; + } let peer_b = PeerId::random(); let peer_c = PeerId::random(); @@ -721,13 +979,21 @@ fn fetches_next_collation() { #[test] fn reject_connection_to_next_group() { - let mut test_state = TestState::default(); + let test_state = TestState::default(); test_harness(ReputationAggregator::new(|_| true), |test_harness| async move { let TestHarness { mut virtual_overseer, .. } = test_harness; - let relay_parent = test_state.relay_parent; - update_view(&mut virtual_overseer, &mut test_state, vec![(relay_parent, 0)]).await; + overseer_send( + &mut virtual_overseer, + CollatorProtocolMessage::NetworkBridgeUpdate(NetworkBridgeEvent::OurViewChange( + our_view![test_state.relay_parent], + )), + ) + .await; + + respond_to_runtime_api_queries(&mut virtual_overseer, &test_state, test_state.relay_parent) + .await; let peer_b = PeerId::random(); @@ -760,13 +1026,26 @@ fn reject_connection_to_next_group() { // invalid. #[test] fn fetch_next_collation_on_invalid_collation() { - let mut test_state = TestState::with_one_scheduled_para(); + let test_state = TestState::default(); test_harness(ReputationAggregator::new(|_| true), |test_harness| async move { let TestHarness { mut virtual_overseer, .. } = test_harness; - let relay_parent = test_state.relay_parent; - update_view(&mut virtual_overseer, &mut test_state, vec![(relay_parent, 0)]).await; + let second = Hash::random(); + + let our_view = our_view![test_state.relay_parent, second]; + + overseer_send( + &mut virtual_overseer, + CollatorProtocolMessage::NetworkBridgeUpdate(NetworkBridgeEvent::OurViewChange( + our_view.clone(), + )), + ) + .await; + + for hash in our_view.iter() { + respond_to_runtime_api_queries(&mut virtual_overseer, &test_state, *hash).await; + } let peer_b = PeerId::random(); let peer_c = PeerId::random(); @@ -789,12 +1068,12 @@ fn fetch_next_collation_on_invalid_collation() { ) .await; - advertise_collation(&mut virtual_overseer, peer_b, relay_parent, None).await; - advertise_collation(&mut virtual_overseer, peer_c, relay_parent, None).await; + advertise_collation(&mut virtual_overseer, peer_b, test_state.relay_parent, None).await; + advertise_collation(&mut virtual_overseer, peer_c, test_state.relay_parent, None).await; let response_channel = assert_fetch_collation_request( &mut virtual_overseer, - relay_parent, + test_state.relay_parent, test_state.chain_ids[0], None, ) @@ -804,7 +1083,7 @@ fn fetch_next_collation_on_invalid_collation() { let mut candidate_a = dummy_candidate_receipt_bad_sig(dummy_hash(), Some(Default::default())); candidate_a.descriptor.para_id = test_state.chain_ids[0]; - candidate_a.descriptor.relay_parent = relay_parent; + candidate_a.descriptor.relay_parent = test_state.relay_parent; candidate_a.descriptor.persisted_validation_data_hash = dummy_pvd().hash(); response_channel .send(Ok(( @@ -819,7 +1098,7 @@ fn fetch_next_collation_on_invalid_collation() { let receipt = assert_candidate_backing_second( &mut virtual_overseer, - relay_parent, + test_state.relay_parent, test_state.chain_ids[0], &pov, CollationVersion::V1, @@ -829,7 +1108,7 @@ fn fetch_next_collation_on_invalid_collation() { // Inform that the candidate was invalid. overseer_send( &mut virtual_overseer, - CollatorProtocolMessage::Invalid(relay_parent, receipt), + CollatorProtocolMessage::Invalid(test_state.relay_parent, receipt), ) .await; @@ -846,7 +1125,7 @@ fn fetch_next_collation_on_invalid_collation() { // We should see a request for another collation. assert_fetch_collation_request( &mut virtual_overseer, - relay_parent, + test_state.relay_parent, test_state.chain_ids[0], None, ) @@ -858,15 +1137,25 @@ fn fetch_next_collation_on_invalid_collation() { #[test] fn inactive_disconnected() { - let mut test_state = TestState::default(); + let test_state = TestState::default(); test_harness(ReputationAggregator::new(|_| true), |test_harness| async move { let TestHarness { mut virtual_overseer, .. } = test_harness; let pair = CollatorPair::generate().0; - let relay_parent = test_state.relay_parent; - update_view(&mut virtual_overseer, &mut test_state, vec![(relay_parent, 0)]).await; + let hash_a = test_state.relay_parent; + + overseer_send( + &mut virtual_overseer, + CollatorProtocolMessage::NetworkBridgeUpdate(NetworkBridgeEvent::OurViewChange( + our_view![hash_a], + )), + ) + .await; + + respond_to_runtime_api_queries(&mut virtual_overseer, &test_state, test_state.relay_parent) + .await; let peer_b = PeerId::random(); @@ -878,11 +1167,11 @@ fn inactive_disconnected() { CollationVersion::V1, ) .await; - advertise_collation(&mut virtual_overseer, peer_b, relay_parent, None).await; + advertise_collation(&mut virtual_overseer, peer_b, test_state.relay_parent, None).await; assert_fetch_collation_request( &mut virtual_overseer, - relay_parent, + test_state.relay_parent, test_state.chain_ids[0], None, ) @@ -897,24 +1186,31 @@ fn inactive_disconnected() { #[test] fn activity_extends_life() { - let mut test_state = TestState::with_one_scheduled_para(); + let test_state = TestState::default(); test_harness(ReputationAggregator::new(|_| true), |test_harness| async move { let TestHarness { mut virtual_overseer, .. } = test_harness; let pair = CollatorPair::generate().0; - let hash_a = Hash::from_low_u64_be(12); - let hash_b = Hash::from_low_u64_be(11); - let hash_c = Hash::from_low_u64_be(10); + let hash_a = test_state.relay_parent; + let hash_b = Hash::repeat_byte(1); + let hash_c = Hash::repeat_byte(2); + + let our_view = our_view![hash_a, hash_b, hash_c]; - update_view( + overseer_send( &mut virtual_overseer, - &mut test_state, - vec![(hash_a, 0), (hash_b, 1), (hash_c, 2)], + CollatorProtocolMessage::NetworkBridgeUpdate(NetworkBridgeEvent::OurViewChange( + our_view.clone(), + )), ) .await; + for hash in our_view.iter() { + respond_to_runtime_api_queries(&mut virtual_overseer, &test_state, *hash).await; + } + let peer_b = PeerId::random(); connect_and_declare_collator( @@ -972,13 +1268,21 @@ fn activity_extends_life() { #[test] fn disconnect_if_no_declare() { - let mut test_state = TestState::default(); + let test_state = TestState::default(); test_harness(ReputationAggregator::new(|_| true), |test_harness| async move { let TestHarness { mut virtual_overseer, .. } = test_harness; - let relay_parent = test_state.relay_parent; - update_view(&mut virtual_overseer, &mut test_state, vec![(relay_parent, 0)]).await; + overseer_send( + &mut virtual_overseer, + CollatorProtocolMessage::NetworkBridgeUpdate(NetworkBridgeEvent::OurViewChange( + our_view![test_state.relay_parent], + )), + ) + .await; + + respond_to_runtime_api_queries(&mut virtual_overseer, &test_state, test_state.relay_parent) + .await; let peer_b = PeerId::random(); @@ -1001,15 +1305,25 @@ fn disconnect_if_no_declare() { #[test] fn disconnect_if_wrong_declare() { - let mut test_state = TestState::default(); + let test_state = TestState::default(); test_harness(ReputationAggregator::new(|_| true), |test_harness| async move { let TestHarness { mut virtual_overseer, .. } = test_harness; + let pair = CollatorPair::generate().0; - let peer_b = PeerId::random(); - let relay_parent = test_state.relay_parent; - update_view(&mut virtual_overseer, &mut test_state, vec![(relay_parent, 0)]).await; + overseer_send( + &mut virtual_overseer, + CollatorProtocolMessage::NetworkBridgeUpdate(NetworkBridgeEvent::OurViewChange( + our_view![test_state.relay_parent], + )), + ) + .await; + + respond_to_runtime_api_queries(&mut virtual_overseer, &test_state, test_state.relay_parent) + .await; + + let peer_b = PeerId::random(); overseer_send( &mut virtual_overseer, @@ -1053,15 +1367,25 @@ fn disconnect_if_wrong_declare() { #[test] fn delay_reputation_change() { - let mut test_state = TestState::default(); + let test_state = TestState::default(); test_harness(ReputationAggregator::new(|_| false), |test_harness| async move { let TestHarness { mut virtual_overseer, .. } = test_harness; + let pair = CollatorPair::generate().0; - let peer_b = PeerId::random(); - let relay_parent = test_state.relay_parent; - update_view(&mut virtual_overseer, &mut test_state, vec![(relay_parent, 0)]).await; + overseer_send( + &mut virtual_overseer, + CollatorProtocolMessage::NetworkBridgeUpdate(NetworkBridgeEvent::OurViewChange( + our_view![test_state.relay_parent], + )), + ) + .await; + + respond_to_runtime_api_queries(&mut virtual_overseer, &test_state, test_state.relay_parent) + .await; + + let peer_b = PeerId::random(); overseer_send( &mut virtual_overseer, @@ -1136,24 +1460,42 @@ fn view_change_clears_old_collators() { let pair = CollatorPair::generate().0; - let peer = PeerId::random(); - let relay_parent = test_state.relay_parent; - update_view(&mut virtual_overseer, &mut test_state, vec![(relay_parent, 0)]).await; + overseer_send( + &mut virtual_overseer, + CollatorProtocolMessage::NetworkBridgeUpdate(NetworkBridgeEvent::OurViewChange( + our_view![test_state.relay_parent], + )), + ) + .await; + + respond_to_runtime_api_queries(&mut virtual_overseer, &test_state, test_state.relay_parent) + .await; + + let peer_b = PeerId::random(); connect_and_declare_collator( &mut virtual_overseer, - peer, + peer_b, pair.clone(), test_state.chain_ids[0], CollationVersion::V1, ) .await; - test_state.group_rotation_info = test_state.group_rotation_info.bump_rotation(); + let hash_b = Hash::repeat_byte(69); - update_view(&mut virtual_overseer, &mut test_state, vec![]).await; + overseer_send( + &mut virtual_overseer, + CollatorProtocolMessage::NetworkBridgeUpdate(NetworkBridgeEvent::OurViewChange( + our_view![hash_b], + )), + ) + .await; - assert_collator_disconnect(&mut virtual_overseer, peer).await; + test_state.group_rotation_info = test_state.group_rotation_info.bump_rotation(); + respond_to_runtime_api_queries(&mut virtual_overseer, &test_state, hash_b).await; + + assert_collator_disconnect(&mut virtual_overseer, peer_b).await; virtual_overseer }) diff --git a/polkadot/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs b/polkadot/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs index fac63aeb2097..eda26e8539a1 100644 --- a/polkadot/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs +++ b/polkadot/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs @@ -21,11 +21,14 @@ use super::*; use polkadot_node_subsystem::messages::ChainApiMessage; use polkadot_primitives::{ vstaging::{CommittedCandidateReceiptV2 as CommittedCandidateReceipt, MutateDescriptorV2}, - BlockNumber, CandidateCommitments, Header, SigningContext, ValidatorId, + AsyncBackingParams, BlockNumber, CandidateCommitments, Header, SigningContext, ValidatorId, }; use polkadot_primitives_test_helpers::dummy_committed_candidate_receipt_v2; use rstest::rstest; +const ASYNC_BACKING_PARAMETERS: AsyncBackingParams = + AsyncBackingParams { max_candidate_depth: 4, allowed_ancestry_len: 3 }; + fn get_parent_hash(hash: Hash) -> Hash { Hash::from_low_u64_be(hash.to_low_u64_be() + 1) } @@ -45,8 +48,7 @@ async fn assert_construct_per_relay_parent( msg, AllMessages::RuntimeApi( RuntimeApiMessage::Request(parent, RuntimeApiRequest::Validators(tx)) - ) => { - assert_eq!(parent, hash); + ) if parent == hash => { tx.send(Ok(test_state.validator_public.clone())).unwrap(); } ); @@ -63,6 +65,15 @@ async fn assert_construct_per_relay_parent( } ); + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::AvailabilityCores(tx)) + ) if parent == hash => { + tx.send(Ok(test_state.cores.clone())).unwrap(); + } + ); + assert_matches!( overseer_recv(virtual_overseer).await, AllMessages::RuntimeApi(RuntimeApiMessage::Request( @@ -77,11 +88,12 @@ async fn assert_construct_per_relay_parent( /// Handle a view update. pub(super) async fn update_view( virtual_overseer: &mut VirtualOverseer, - test_state: &mut TestState, + test_state: &TestState, new_view: Vec<(Hash, u32)>, // Hash and block number. + activated: u8, // How many new heads does this update contain? ) -> Option { - let last_block_from_view = new_view.last().map(|t| t.1); let new_view: HashMap = HashMap::from_iter(new_view); + let our_view = OurView::new(new_view.keys().map(|hash| *hash), 0); overseer_send( @@ -91,14 +103,9 @@ pub(super) async fn update_view( .await; let mut next_overseer_message = None; - for _ in 0..new_view.len() { - let msg = match next_overseer_message.take() { - Some(msg) => msg, - None => overseer_recv(virtual_overseer).await, - }; - + for _ in 0..activated { let (leaf_hash, leaf_number) = assert_matches!( - msg, + overseer_recv(virtual_overseer).await, AllMessages::RuntimeApi(RuntimeApiMessage::Request( parent, RuntimeApiRequest::SessionIndexForChild(tx) @@ -114,7 +121,7 @@ pub(super) async fn update_view( _, RuntimeApiRequest::AsyncBackingParams(tx), )) => { - tx.send(Ok(test_state.async_backing_params)).unwrap(); + tx.send(Ok(ASYNC_BACKING_PARAMETERS)).unwrap(); } ); @@ -137,8 +144,7 @@ pub(super) async fn update_view( ) .await; - let min_number = - leaf_number.saturating_sub(test_state.async_backing_params.allowed_ancestry_len); + let min_number = leaf_number.saturating_sub(ASYNC_BACKING_PARAMETERS.allowed_ancestry_len); let ancestry_len = leaf_number + 1 - min_number; let ancestry_hashes = std::iter::successors(Some(leaf_hash), |h| Some(get_parent_hash(*h))) @@ -151,10 +157,6 @@ pub(super) async fn update_view( { let mut ancestry_iter = ancestry_iter.clone(); while let Some((hash, number)) = ancestry_iter.next() { - if Some(number) == test_state.last_known_block { - break; - } - // May be `None` for the last element. let parent_hash = ancestry_iter.peek().map(|(h, _)| *h).unwrap_or_else(|| get_parent_hash(hash)); @@ -202,9 +204,6 @@ pub(super) async fn update_view( // Skip the leaf. for (hash, number) in ancestry_iter.skip(1).take(requested_len.saturating_sub(1)) { - if Some(number) == test_state.last_known_block { - break; - } assert_construct_per_relay_parent( virtual_overseer, test_state, @@ -215,9 +214,6 @@ pub(super) async fn update_view( .await; } } - - test_state.last_known_block = last_block_from_view; - next_overseer_message } @@ -341,140 +337,9 @@ async fn assert_persisted_validation_data( } } -// Combines dummy candidate creation, advertisement and fetching in a single call -async fn submit_second_and_assert( - virtual_overseer: &mut VirtualOverseer, - keystore: KeystorePtr, - para_id: ParaId, - relay_parent: Hash, - collator: PeerId, - candidate_head_data: HeadData, -) { - let (candidate, commitments) = - create_dummy_candidate_and_commitments(para_id, candidate_head_data, relay_parent); - - let candidate_hash = candidate.hash(); - let parent_head_data_hash = Hash::zero(); - - assert_advertise_collation( - virtual_overseer, - collator, - relay_parent, - para_id, - (candidate_hash, parent_head_data_hash), - ) - .await; - - let response_channel = assert_fetch_collation_request( - virtual_overseer, - relay_parent, - para_id, - Some(candidate_hash), - ) - .await; - - let pov = PoV { block_data: BlockData(vec![1]) }; - - send_collation_and_assert_processing( - virtual_overseer, - keystore, - relay_parent, - para_id, - collator, - response_channel, - candidate, - commitments, - pov, - ) - .await; -} - -fn create_dummy_candidate_and_commitments( - para_id: ParaId, - candidate_head_data: HeadData, - relay_parent: Hash, -) -> (CandidateReceipt, CandidateCommitments) { - let mut candidate = dummy_candidate_receipt_bad_sig(relay_parent, Some(Default::default())); - candidate.descriptor.para_id = para_id; - candidate.descriptor.persisted_validation_data_hash = dummy_pvd().hash(); - let commitments = CandidateCommitments { - head_data: candidate_head_data, - horizontal_messages: Default::default(), - upward_messages: Default::default(), - new_validation_code: None, - processed_downward_messages: 0, - hrmp_watermark: 0, - }; - candidate.commitments_hash = commitments.hash(); - - (candidate.into(), commitments) -} - -async fn assert_advertise_collation( - virtual_overseer: &mut VirtualOverseer, - peer: PeerId, - relay_parent: Hash, - expected_para_id: ParaId, - candidate: (CandidateHash, Hash), -) { - advertise_collation(virtual_overseer, peer, relay_parent, Some(candidate)).await; - assert_matches!( - overseer_recv(virtual_overseer).await, - AllMessages::CandidateBacking( - CandidateBackingMessage::CanSecond(request, tx), - ) => { - assert_eq!(request.candidate_hash, candidate.0); - assert_eq!(request.candidate_para_id, expected_para_id); - assert_eq!(request.parent_head_data_hash, candidate.1); - tx.send(true).expect("receiving side should be alive"); - } - ); -} - -async fn send_collation_and_assert_processing( - virtual_overseer: &mut VirtualOverseer, - keystore: KeystorePtr, - relay_parent: Hash, - expected_para_id: ParaId, - expected_peer_id: PeerId, - response_channel: ResponseSender, - candidate: CandidateReceipt, - commitments: CandidateCommitments, - pov: PoV, -) { - response_channel - .send(Ok(( - request_v2::CollationFetchingResponse::Collation(candidate.clone(), pov.clone()) - .encode(), - ProtocolName::from(""), - ))) - .expect("Sending response should succeed"); - - assert_candidate_backing_second( - virtual_overseer, - relay_parent, - expected_para_id, - &pov, - CollationVersion::V2, - ) - .await; - - let candidate = CommittedCandidateReceipt { descriptor: candidate.descriptor, commitments }; - - send_seconded_statement(virtual_overseer, keystore.clone(), &candidate).await; - - assert_collation_seconded( - virtual_overseer, - relay_parent, - expected_peer_id, - CollationVersion::V2, - ) - .await; -} - #[test] fn v1_advertisement_accepted_and_seconded() { - let mut test_state = TestState::default(); + let test_state = TestState::default(); test_harness(ReputationAggregator::new(|_| true), |test_harness| async move { let TestHarness { mut virtual_overseer, keystore } = test_harness; @@ -484,7 +349,7 @@ fn v1_advertisement_accepted_and_seconded() { let head_b = Hash::from_low_u64_be(128); let head_b_num: u32 = 0; - update_view(&mut virtual_overseer, &mut test_state, vec![(head_b, head_b_num)]).await; + update_view(&mut virtual_overseer, &test_state, vec![(head_b, head_b_num)], 1).await; let peer_a = PeerId::random(); @@ -512,7 +377,7 @@ fn v1_advertisement_accepted_and_seconded() { candidate.descriptor.para_id = test_state.chain_ids[0]; candidate.descriptor.persisted_validation_data_hash = dummy_pvd().hash(); let commitments = CandidateCommitments { - head_data: HeadData(vec![1u8]), + head_data: HeadData(vec![1 as u8]), horizontal_messages: Default::default(), upward_messages: Default::default(), new_validation_code: None, @@ -553,7 +418,7 @@ fn v1_advertisement_accepted_and_seconded() { #[test] fn v1_advertisement_rejected_on_non_active_leaf() { - let mut test_state = TestState::default(); + let test_state = TestState::default(); test_harness(ReputationAggregator::new(|_| true), |test_harness| async move { let TestHarness { mut virtual_overseer, .. } = test_harness; @@ -563,7 +428,7 @@ fn v1_advertisement_rejected_on_non_active_leaf() { let head_b = Hash::from_low_u64_be(128); let head_b_num: u32 = 5; - update_view(&mut virtual_overseer, &mut test_state, vec![(head_b, head_b_num)]).await; + update_view(&mut virtual_overseer, &test_state, vec![(head_b, head_b_num)], 1).await; let peer_a = PeerId::random(); @@ -595,7 +460,7 @@ fn v1_advertisement_rejected_on_non_active_leaf() { #[test] fn accept_advertisements_from_implicit_view() { - let mut test_state = TestState::default(); + let test_state = TestState::default(); test_harness(ReputationAggregator::new(|_| true), |test_harness| async move { let TestHarness { mut virtual_overseer, .. } = test_harness; @@ -613,7 +478,7 @@ fn accept_advertisements_from_implicit_view() { let head_d = get_parent_hash(head_c); // Activated leaf is `b`, but the collation will be based on `c`. - update_view(&mut virtual_overseer, &mut test_state, vec![(head_b, head_b_num)]).await; + update_view(&mut virtual_overseer, &test_state, vec![(head_b, head_b_num)], 1).await; let peer_a = PeerId::random(); let peer_b = PeerId::random(); @@ -698,26 +563,24 @@ fn accept_advertisements_from_implicit_view() { #[test] fn second_multiple_candidates_per_relay_parent() { - let mut test_state = TestState::with_one_scheduled_para(); + let test_state = TestState::default(); test_harness(ReputationAggregator::new(|_| true), |test_harness| async move { let TestHarness { mut virtual_overseer, keystore } = test_harness; let pair = CollatorPair::generate().0; - let head_a = Hash::from_low_u64_be(130); - let head_a_num: u32 = 0; - + // Grandparent of head `a`. let head_b = Hash::from_low_u64_be(128); let head_b_num: u32 = 2; - // Activated leaf is `a` and `b`.The collation will be based on `b`. - update_view( - &mut virtual_overseer, - &mut test_state, - vec![(head_a, head_a_num), (head_b, head_b_num)], - ) - .await; + // Grandparent of head `b`. + // Group rotation frequency is 1 by default, at `c` we're assigned + // to the first para. + let head_c = Hash::from_low_u64_be(130); + + // Activated leaf is `b`, but the collation will be based on `c`. + update_view(&mut virtual_overseer, &test_state, vec![(head_b, head_b_num)], 1).await; let peer_a = PeerId::random(); @@ -730,17 +593,80 @@ fn second_multiple_candidates_per_relay_parent() { ) .await; - // `allowed_ancestry_len` equals the size of the claim queue - for i in 0..test_state.async_backing_params.allowed_ancestry_len { - submit_second_and_assert( + for i in 0..(ASYNC_BACKING_PARAMETERS.max_candidate_depth + 1) { + let mut candidate = dummy_candidate_receipt_bad_sig(head_c, Some(Default::default())); + candidate.descriptor.para_id = test_state.chain_ids[0]; + candidate.descriptor.persisted_validation_data_hash = dummy_pvd().hash(); + let commitments = CandidateCommitments { + head_data: HeadData(vec![i as u8]), + horizontal_messages: Default::default(), + upward_messages: Default::default(), + new_validation_code: None, + processed_downward_messages: 0, + hrmp_watermark: 0, + }; + candidate.commitments_hash = commitments.hash(); + let candidate: CandidateReceipt = candidate.into(); + + let candidate_hash = candidate.hash(); + let parent_head_data_hash = Hash::zero(); + + advertise_collation( &mut virtual_overseer, - keystore.clone(), - test_state.chain_ids[0], - head_a, peer_a, - HeadData(vec![i as u8]), + head_c, + Some((candidate_hash, parent_head_data_hash)), + ) + .await; + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::CandidateBacking( + CandidateBackingMessage::CanSecond(request, tx), + ) => { + assert_eq!(request.candidate_hash, candidate_hash); + assert_eq!(request.candidate_para_id, test_state.chain_ids[0]); + assert_eq!(request.parent_head_data_hash, parent_head_data_hash); + tx.send(true).expect("receiving side should be alive"); + } + ); + + let response_channel = assert_fetch_collation_request( + &mut virtual_overseer, + head_c, + test_state.chain_ids[0], + Some(candidate_hash), + ) + .await; + + let pov = PoV { block_data: BlockData(vec![1]) }; + + response_channel + .send(Ok(( + request_v2::CollationFetchingResponse::Collation( + candidate.clone(), + pov.clone(), + ) + .encode(), + ProtocolName::from(""), + ))) + .expect("Sending response should succeed"); + + assert_candidate_backing_second( + &mut virtual_overseer, + head_c, + test_state.chain_ids[0], + &pov, + CollationVersion::V2, ) .await; + + let candidate = + CommittedCandidateReceipt { descriptor: candidate.descriptor, commitments }; + + send_seconded_statement(&mut virtual_overseer, keystore.clone(), &candidate).await; + + assert_collation_seconded(&mut virtual_overseer, head_c, peer_a, CollationVersion::V2) + .await; } // No more advertisements can be made for this relay parent. @@ -748,14 +674,21 @@ fn second_multiple_candidates_per_relay_parent() { advertise_collation( &mut virtual_overseer, peer_a, - head_a, + head_c, Some((candidate_hash, Hash::zero())), ) .await; - // Rejected but not reported because reached the limit of advertisements for the para_id - test_helpers::Yield::new().await; - assert_matches!(virtual_overseer.recv().now_or_never(), None); + // Reported because reached the limit of advertisements per relay parent. + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::NetworkBridgeTx( + NetworkBridgeTxMessage::ReportPeer(ReportPeerMessage::Single(peer_id, rep)), + ) => { + assert_eq!(peer_a, peer_id); + assert_eq!(rep.value, COST_UNEXPECTED_MESSAGE.cost_or_benefit()); + } + ); // By different peer too (not reported). let pair_b = CollatorPair::generate().0; @@ -774,7 +707,7 @@ fn second_multiple_candidates_per_relay_parent() { advertise_collation( &mut virtual_overseer, peer_b, - head_a, + head_c, Some((candidate_hash, Hash::zero())), ) .await; @@ -788,7 +721,7 @@ fn second_multiple_candidates_per_relay_parent() { #[test] fn fetched_collation_sanity_check() { - let mut test_state = TestState::default(); + let test_state = TestState::default(); test_harness(ReputationAggregator::new(|_| true), |test_harness| async move { let TestHarness { mut virtual_overseer, .. } = test_harness; @@ -805,7 +738,7 @@ fn fetched_collation_sanity_check() { let head_c = Hash::from_low_u64_be(130); // Activated leaf is `b`, but the collation will be based on `c`. - update_view(&mut virtual_overseer, &mut test_state, vec![(head_b, head_b_num)]).await; + update_view(&mut virtual_overseer, &test_state, vec![(head_b, head_b_num)], 1).await; let peer_a = PeerId::random(); @@ -899,7 +832,7 @@ fn fetched_collation_sanity_check() { #[test] fn sanity_check_invalid_parent_head_data() { - let mut test_state = TestState::default(); + let test_state = TestState::default(); test_harness(ReputationAggregator::new(|_| true), |test_harness| async move { let TestHarness { mut virtual_overseer, .. } = test_harness; @@ -909,7 +842,7 @@ fn sanity_check_invalid_parent_head_data() { let head_c = Hash::from_low_u64_be(130); let head_c_num = 3; - update_view(&mut virtual_overseer, &mut test_state, vec![(head_c, head_c_num)]).await; + update_view(&mut virtual_overseer, &test_state, vec![(head_c, head_c_num)], 1).await; let peer_a = PeerId::random(); @@ -1019,7 +952,7 @@ fn sanity_check_invalid_parent_head_data() { #[test] fn advertisement_spam_protection() { - let mut test_state = TestState::default(); + let test_state = TestState::default(); test_harness(ReputationAggregator::new(|_| true), |test_harness| async move { let TestHarness { mut virtual_overseer, .. } = test_harness; @@ -1032,7 +965,7 @@ fn advertisement_spam_protection() { let head_c = get_parent_hash(head_b); // Activated leaf is `b`, but the collation will be based on `c`. - update_view(&mut virtual_overseer, &mut test_state, vec![(head_b, head_b_num)]).await; + update_view(&mut virtual_overseer, &test_state, vec![(head_b, head_b_num)], 1).await; let peer_a = PeerId::random(); connect_and_declare_collator( @@ -1093,7 +1026,7 @@ fn advertisement_spam_protection() { #[case(true)] #[case(false)] fn child_blocked_from_seconding_by_parent(#[case] valid_parent: bool) { - let mut test_state = TestState::with_one_scheduled_para(); + let test_state = TestState::default(); test_harness(ReputationAggregator::new(|_| true), |test_harness| async move { let TestHarness { mut virtual_overseer, keystore } = test_harness; @@ -1110,7 +1043,7 @@ fn child_blocked_from_seconding_by_parent(#[case] valid_parent: bool) { let head_c = Hash::from_low_u64_be(130); // Activated leaf is `b`, but the collation will be based on `c`. - update_view(&mut virtual_overseer, &mut test_state, vec![(head_b, head_b_num)]).await; + update_view(&mut virtual_overseer, &test_state, vec![(head_b, head_b_num)], 1).await; let peer_a = PeerId::random(); @@ -1411,7 +1344,7 @@ fn v2_descriptor(#[case] v2_feature_enabled: bool) { let head_b = Hash::from_low_u64_be(128); let head_b_num: u32 = 0; - update_view(&mut virtual_overseer, &mut test_state, vec![(head_b, head_b_num)]).await; + update_view(&mut virtual_overseer, &test_state, vec![(head_b, head_b_num)], 1).await; let peer_a = PeerId::random(); @@ -1509,7 +1442,7 @@ fn v2_descriptor(#[case] v2_feature_enabled: bool) { #[test] fn invalid_v2_descriptor() { - let mut test_state = TestState::default(); + let test_state = TestState::default(); test_harness(ReputationAggregator::new(|_| true), |test_harness| async move { let TestHarness { mut virtual_overseer, .. } = test_harness; @@ -1519,7 +1452,7 @@ fn invalid_v2_descriptor() { let head_b = Hash::from_low_u64_be(128); let head_b_num: u32 = 0; - update_view(&mut virtual_overseer, &mut test_state, vec![(head_b, head_b_num)]).await; + update_view(&mut virtual_overseer, &test_state, vec![(head_b, head_b_num)], 1).await; let peer_a = PeerId::random(); @@ -1612,868 +1545,3 @@ fn invalid_v2_descriptor() { virtual_overseer }); } - -#[test] -fn fair_collation_fetches() { - let mut test_state = TestState::with_shared_core(); - - test_harness(ReputationAggregator::new(|_| true), |test_harness| async move { - let TestHarness { mut virtual_overseer, keystore } = test_harness; - - let head_b = Hash::from_low_u64_be(128); - let head_b_num: u32 = 2; - - update_view(&mut virtual_overseer, &mut test_state, vec![(head_b, head_b_num)]).await; - - let peer_a = PeerId::random(); - let pair_a = CollatorPair::generate().0; - - connect_and_declare_collator( - &mut virtual_overseer, - peer_a, - pair_a.clone(), - test_state.chain_ids[0], - CollationVersion::V2, - ) - .await; - - let peer_b = PeerId::random(); - let pair_b = CollatorPair::generate().0; - - connect_and_declare_collator( - &mut virtual_overseer, - peer_b, - pair_b.clone(), - test_state.chain_ids[1], - CollationVersion::V2, - ) - .await; - - // `peer_a` sends two advertisements (its claim queue limit) - for i in 0..2u8 { - submit_second_and_assert( - &mut virtual_overseer, - keystore.clone(), - ParaId::from(test_state.chain_ids[0]), - head_b, - peer_a, - HeadData(vec![i]), - ) - .await; - } - - // `peer_a` sends another advertisement and it is ignored - let candidate_hash = CandidateHash(Hash::repeat_byte(0xAA)); - advertise_collation( - &mut virtual_overseer, - peer_a, - head_b, - Some((candidate_hash, Hash::zero())), - ) - .await; - test_helpers::Yield::new().await; - assert_matches!(virtual_overseer.recv().now_or_never(), None); - - // `peer_b` should still be able to advertise its collation - submit_second_and_assert( - &mut virtual_overseer, - keystore.clone(), - ParaId::from(test_state.chain_ids[1]), - head_b, - peer_b, - HeadData(vec![0u8]), - ) - .await; - - // And no more advertisements can be made for this relay parent. - - // verify for peer_a - let candidate_hash = CandidateHash(Hash::repeat_byte(0xBB)); - advertise_collation( - &mut virtual_overseer, - peer_a, - head_b, - Some((candidate_hash, Hash::zero())), - ) - .await; - test_helpers::Yield::new().await; - assert_matches!(virtual_overseer.recv().now_or_never(), None); - - // verify for peer_b - let candidate_hash = CandidateHash(Hash::repeat_byte(0xCC)); - advertise_collation( - &mut virtual_overseer, - peer_b, - head_b, - Some((candidate_hash, Hash::zero())), - ) - .await; - test_helpers::Yield::new().await; - assert_matches!(virtual_overseer.recv().now_or_never(), None); - - virtual_overseer - }); -} - -#[test] -fn collation_fetching_prefer_entries_earlier_in_claim_queue() { - let mut test_state = TestState::with_shared_core(); - - test_harness(ReputationAggregator::new(|_| true), |test_harness| async move { - let TestHarness { mut virtual_overseer, keystore } = test_harness; - - let pair_a = CollatorPair::generate().0; - let collator_a = PeerId::random(); - let para_id_a = test_state.chain_ids[0]; - - let pair_b = CollatorPair::generate().0; - let collator_b = PeerId::random(); - let para_id_b = test_state.chain_ids[1]; - - let head = Hash::from_low_u64_be(128); - let head_num: u32 = 2; - - update_view(&mut virtual_overseer, &mut test_state, vec![(head, head_num)]).await; - - connect_and_declare_collator( - &mut virtual_overseer, - collator_a, - pair_a.clone(), - para_id_a, - CollationVersion::V2, - ) - .await; - - connect_and_declare_collator( - &mut virtual_overseer, - collator_b, - pair_b.clone(), - para_id_b, - CollationVersion::V2, - ) - .await; - - let (candidate_a1, commitments_a1) = - create_dummy_candidate_and_commitments(para_id_a, HeadData(vec![0u8]), head); - let (candidate_b1, commitments_b1) = - create_dummy_candidate_and_commitments(para_id_b, HeadData(vec![1u8]), head); - let (candidate_a2, commitments_a2) = - create_dummy_candidate_and_commitments(para_id_a, HeadData(vec![2u8]), head); - let (candidate_a3, _) = - create_dummy_candidate_and_commitments(para_id_a, HeadData(vec![3u8]), head); - let parent_head_data_a1 = HeadData(vec![0u8]); - let parent_head_data_b1 = HeadData(vec![1u8]); - let parent_head_data_a2 = HeadData(vec![2u8]); - let parent_head_data_a3 = HeadData(vec![3u8]); - - // advertise a collation for `para_id_a` but don't send the collation. This will be a - // pending fetch. - assert_advertise_collation( - &mut virtual_overseer, - collator_a, - head, - para_id_a, - (candidate_a1.hash(), parent_head_data_a1.hash()), - ) - .await; - - let response_channel_a1 = assert_fetch_collation_request( - &mut virtual_overseer, - head, - para_id_a, - Some(candidate_a1.hash()), - ) - .await; - - // advertise another collation for `para_id_a`. This one should be fetched last. - assert_advertise_collation( - &mut virtual_overseer, - collator_a, - head, - para_id_a, - (candidate_a2.hash(), parent_head_data_a2.hash()), - ) - .await; - - // There is a pending collation so nothing should be fetched - test_helpers::Yield::new().await; - assert_matches!(virtual_overseer.recv().now_or_never(), None); - - // Advertise a collation for `para_id_b`. This should be fetched second - assert_advertise_collation( - &mut virtual_overseer, - collator_b, - head, - para_id_b, - (candidate_b1.hash(), parent_head_data_b1.hash()), - ) - .await; - - // Again - no fetch because of the pending collation - test_helpers::Yield::new().await; - assert_matches!(virtual_overseer.recv().now_or_never(), None); - - //Now send a response for the first fetch and examine the second fetch - send_collation_and_assert_processing( - &mut virtual_overseer, - keystore.clone(), - head, - para_id_a, - collator_a, - response_channel_a1, - candidate_a1, - commitments_a1, - PoV { block_data: BlockData(vec![1]) }, - ) - .await; - - // The next fetch should be for `para_id_b` - let response_channel_b = assert_fetch_collation_request( - &mut virtual_overseer, - head, - para_id_b, - Some(candidate_b1.hash()), - ) - .await; - - send_collation_and_assert_processing( - &mut virtual_overseer, - keystore.clone(), - head, - para_id_b, - collator_b, - response_channel_b, - candidate_b1, - commitments_b1, - PoV { block_data: BlockData(vec![2]) }, - ) - .await; - - // and the final one for `para_id_a` - let response_channel_a2 = assert_fetch_collation_request( - &mut virtual_overseer, - head, - para_id_a, - Some(candidate_a2.hash()), - ) - .await; - - // Advertise another collation for `para_id_a`. This should be rejected as there is no slot - // in the claim queue for it. One is fetched and one is pending. - advertise_collation( - &mut virtual_overseer, - collator_a, - head, - Some((candidate_a3.hash(), parent_head_data_a3.hash())), - ) - .await; - - // `CanSecond` shouldn't be sent as the advertisement should be ignored - test_helpers::Yield::new().await; - assert_matches!(virtual_overseer.recv().now_or_never(), None); - - // Fetch the pending collation - send_collation_and_assert_processing( - &mut virtual_overseer, - keystore.clone(), - head, - para_id_a, - collator_a, - response_channel_a2, - candidate_a2, - commitments_a2, - PoV { block_data: BlockData(vec![3]) }, - ) - .await; - - virtual_overseer - }); -} - -#[test] -fn collation_fetching_considers_advertisements_from_the_whole_view() { - let mut test_state = TestState::with_shared_core(); - - test_harness(ReputationAggregator::new(|_| true), |test_harness| async move { - let TestHarness { mut virtual_overseer, keystore } = test_harness; - - let pair_a = CollatorPair::generate().0; - let collator_a = PeerId::random(); - let para_id_a = test_state.chain_ids[0]; - - let pair_b = CollatorPair::generate().0; - let collator_b = PeerId::random(); - let para_id_b = test_state.chain_ids[1]; - - let relay_parent_2 = Hash::from_low_u64_be(test_state.relay_parent.to_low_u64_be() - 1); - - assert_eq!( - *test_state.claim_queue.get(&CoreIndex(0)).unwrap(), - VecDeque::from([para_id_b, para_id_a, para_id_a]) - ); - - update_view(&mut virtual_overseer, &mut test_state, vec![(relay_parent_2, 2)]).await; - - connect_and_declare_collator( - &mut virtual_overseer, - collator_a, - pair_a.clone(), - para_id_a, - CollationVersion::V2, - ) - .await; - - connect_and_declare_collator( - &mut virtual_overseer, - collator_b, - pair_b.clone(), - para_id_b, - CollationVersion::V2, - ) - .await; - - submit_second_and_assert( - &mut virtual_overseer, - keystore.clone(), - para_id_a, - relay_parent_2, - collator_a, - HeadData(vec![0u8]), - ) - .await; - - submit_second_and_assert( - &mut virtual_overseer, - keystore.clone(), - para_id_b, - relay_parent_2, - collator_b, - HeadData(vec![1u8]), - ) - .await; - - let relay_parent_3 = Hash::from_low_u64_be(relay_parent_2.to_low_u64_be() - 1); - *test_state.claim_queue.get_mut(&CoreIndex(0)).unwrap() = - VecDeque::from([para_id_a, para_id_a, para_id_b]); - update_view(&mut virtual_overseer, &mut test_state, vec![(relay_parent_3, 3)]).await; - - submit_second_and_assert( - &mut virtual_overseer, - keystore.clone(), - para_id_b, - relay_parent_3, - collator_b, - HeadData(vec![3u8]), - ) - .await; - submit_second_and_assert( - &mut virtual_overseer, - keystore.clone(), - para_id_a, - relay_parent_3, - collator_a, - HeadData(vec![3u8]), - ) - .await; - - // At this point the claim queue is satisfied and any advertisement at `relay_parent_4` - // must be ignored - - let (candidate_a, _) = - create_dummy_candidate_and_commitments(para_id_a, HeadData(vec![5u8]), relay_parent_3); - let parent_head_data_a = HeadData(vec![5u8]); - - advertise_collation( - &mut virtual_overseer, - collator_a, - relay_parent_3, - Some((candidate_a.hash(), parent_head_data_a.hash())), - ) - .await; - - test_helpers::Yield::new().await; - assert_matches!(virtual_overseer.recv().now_or_never(), None); - - let (candidate_b, _) = - create_dummy_candidate_and_commitments(para_id_b, HeadData(vec![6u8]), relay_parent_3); - let parent_head_data_b = HeadData(vec![6u8]); - - advertise_collation( - &mut virtual_overseer, - collator_b, - relay_parent_3, - Some((candidate_b.hash(), parent_head_data_b.hash())), - ) - .await; - - // `CanSecond` shouldn't be sent as the advertisement should be ignored - test_helpers::Yield::new().await; - assert_matches!(virtual_overseer.recv().now_or_never(), None); - - // At `relay_parent_6` the advertisement for `para_id_b` falls out of the view so a new one - // can be accepted - let relay_parent_6 = Hash::from_low_u64_be(relay_parent_3.to_low_u64_be() - 2); - update_view(&mut virtual_overseer, &mut test_state, vec![(relay_parent_6, 6)]).await; - - submit_second_and_assert( - &mut virtual_overseer, - keystore.clone(), - para_id_a, - relay_parent_6, - collator_a, - HeadData(vec![3u8]), - ) - .await; - - virtual_overseer - }); -} - -#[test] -fn collation_fetching_fairness_handles_old_claims() { - let mut test_state = TestState::with_shared_core(); - - test_harness(ReputationAggregator::new(|_| true), |test_harness| async move { - let TestHarness { mut virtual_overseer, keystore } = test_harness; - - let pair_a = CollatorPair::generate().0; - let collator_a = PeerId::random(); - let para_id_a = test_state.chain_ids[0]; - - let pair_b = CollatorPair::generate().0; - let collator_b = PeerId::random(); - let para_id_b = test_state.chain_ids[1]; - - let relay_parent_2 = Hash::from_low_u64_be(test_state.relay_parent.to_low_u64_be() - 1); - - *test_state.claim_queue.get_mut(&CoreIndex(0)).unwrap() = - VecDeque::from([para_id_a, para_id_b, para_id_a]); - - update_view(&mut virtual_overseer, &mut test_state, vec![(relay_parent_2, 2)]).await; - - connect_and_declare_collator( - &mut virtual_overseer, - collator_a, - pair_a.clone(), - para_id_a, - CollationVersion::V2, - ) - .await; - - connect_and_declare_collator( - &mut virtual_overseer, - collator_b, - pair_b.clone(), - para_id_b, - CollationVersion::V2, - ) - .await; - - submit_second_and_assert( - &mut virtual_overseer, - keystore.clone(), - para_id_a, - relay_parent_2, - collator_a, - HeadData(vec![0u8]), - ) - .await; - - submit_second_and_assert( - &mut virtual_overseer, - keystore.clone(), - para_id_b, - relay_parent_2, - collator_b, - HeadData(vec![1u8]), - ) - .await; - - submit_second_and_assert( - &mut virtual_overseer, - keystore.clone(), - para_id_a, - relay_parent_2, - collator_a, - HeadData(vec![2u8]), - ) - .await; - - let relay_parent_3 = Hash::from_low_u64_be(relay_parent_2.to_low_u64_be() - 1); - - *test_state.claim_queue.get_mut(&CoreIndex(0)).unwrap() = - VecDeque::from([para_id_b, para_id_a, para_id_b]); - update_view(&mut virtual_overseer, &mut test_state, vec![(relay_parent_3, 3)]).await; - - // nothing is advertised here - test_helpers::Yield::new().await; - assert_matches!(virtual_overseer.recv().now_or_never(), None); - - let relay_parent_4 = Hash::from_low_u64_be(relay_parent_3.to_low_u64_be() - 1); - - *test_state.claim_queue.get_mut(&CoreIndex(0)).unwrap() = - VecDeque::from([para_id_a, para_id_b, para_id_a]); - update_view(&mut virtual_overseer, &mut test_state, vec![(relay_parent_4, 4)]).await; - - submit_second_and_assert( - &mut virtual_overseer, - keystore.clone(), - para_id_b, - relay_parent_4, - collator_b, - HeadData(vec![3u8]), - ) - .await; - - submit_second_and_assert( - &mut virtual_overseer, - keystore.clone(), - para_id_a, - relay_parent_4, - collator_a, - HeadData(vec![4u8]), - ) - .await; - - // At this point the claim queue is satisfied and any advertisement at `relay_parent_4` - // must be ignored - - // Advertisement for `para_id_a` at `relay_parent_4` which must be ignored - let (candidate_a, _) = - create_dummy_candidate_and_commitments(para_id_a, HeadData(vec![5u8]), relay_parent_4); - let parent_head_data_a = HeadData(vec![5u8]); - - advertise_collation( - &mut virtual_overseer, - collator_a, - relay_parent_4, - Some((candidate_a.hash(), parent_head_data_a.hash())), - ) - .await; - - test_helpers::Yield::new().await; - assert_matches!(virtual_overseer.recv().now_or_never(), None); - - // Advertisement for `para_id_b` at `relay_parent_4` which must be ignored - let (candidate_b, _) = - create_dummy_candidate_and_commitments(para_id_b, HeadData(vec![6u8]), relay_parent_4); - let parent_head_data_b = HeadData(vec![6u8]); - - advertise_collation( - &mut virtual_overseer, - collator_b, - relay_parent_4, - Some((candidate_b.hash(), parent_head_data_b.hash())), - ) - .await; - - // `CanSecond` shouldn't be sent as the advertisement should be ignored - test_helpers::Yield::new().await; - assert_matches!(virtual_overseer.recv().now_or_never(), None); - - virtual_overseer - }); -} - -#[test] -fn claims_below_are_counted_correctly() { - let mut test_state = TestState::with_one_scheduled_para(); - - // Shorten the claim queue to make the test smaller - let mut claim_queue = BTreeMap::new(); - claim_queue.insert( - CoreIndex(0), - VecDeque::from_iter( - [ParaId::from(test_state.chain_ids[0]), ParaId::from(test_state.chain_ids[0])] - .into_iter(), - ), - ); - test_state.claim_queue = claim_queue; - test_state.async_backing_params.max_candidate_depth = 3; - test_state.async_backing_params.allowed_ancestry_len = 2; - - test_harness(ReputationAggregator::new(|_| true), |test_harness| async move { - let TestHarness { mut virtual_overseer, keystore } = test_harness; - - let hash_a = Hash::from_low_u64_be(test_state.relay_parent.to_low_u64_be() - 1); - let hash_b = Hash::from_low_u64_be(hash_a.to_low_u64_be() - 1); - let hash_c = Hash::from_low_u64_be(hash_b.to_low_u64_be() - 1); - - let pair_a = CollatorPair::generate().0; - let collator_a = PeerId::random(); - let para_id_a = test_state.chain_ids[0]; - - update_view(&mut virtual_overseer, &mut test_state, vec![(hash_c, 2)]).await; - - connect_and_declare_collator( - &mut virtual_overseer, - collator_a, - pair_a.clone(), - para_id_a, - CollationVersion::V2, - ) - .await; - - // A collation at hash_a claims the spot at hash_a - submit_second_and_assert( - &mut virtual_overseer, - keystore.clone(), - ParaId::from(test_state.chain_ids[0]), - hash_a, - collator_a, - HeadData(vec![0u8]), - ) - .await; - - // Another collation at hash_a claims the spot at hash_b - submit_second_and_assert( - &mut virtual_overseer, - keystore.clone(), - ParaId::from(test_state.chain_ids[0]), - hash_a, - collator_a, - HeadData(vec![1u8]), - ) - .await; - - // Collation at hash_c claims its own spot - submit_second_and_assert( - &mut virtual_overseer, - keystore.clone(), - ParaId::from(test_state.chain_ids[0]), - hash_c, - collator_a, - HeadData(vec![2u8]), - ) - .await; - - // Collation at hash_b should be ignored because the claim queue is satisfied - let (ignored_candidate, _) = - create_dummy_candidate_and_commitments(para_id_a, HeadData(vec![3u8]), hash_b); - - advertise_collation( - &mut virtual_overseer, - collator_a, - hash_b, - Some((ignored_candidate.hash(), Hash::random())), - ) - .await; - - test_helpers::Yield::new().await; - assert_matches!(virtual_overseer.recv().now_or_never(), None); - - virtual_overseer - }); -} - -#[test] -fn claims_above_are_counted_correctly() { - let mut test_state = TestState::with_one_scheduled_para(); - - // Shorten the claim queue to make the test smaller - let mut claim_queue = BTreeMap::new(); - claim_queue.insert( - CoreIndex(0), - VecDeque::from_iter( - [ParaId::from(test_state.chain_ids[0]), ParaId::from(test_state.chain_ids[0])] - .into_iter(), - ), - ); - test_state.claim_queue = claim_queue; - test_state.async_backing_params.max_candidate_depth = 3; - test_state.async_backing_params.allowed_ancestry_len = 2; - - test_harness(ReputationAggregator::new(|_| true), |test_harness| async move { - let TestHarness { mut virtual_overseer, keystore } = test_harness; - - let hash_a = Hash::from_low_u64_be(test_state.relay_parent.to_low_u64_be() - 1); // block 0 - let hash_b = Hash::from_low_u64_be(hash_a.to_low_u64_be() - 1); // block 1 - let hash_c = Hash::from_low_u64_be(hash_b.to_low_u64_be() - 1); // block 2 - - let pair_a = CollatorPair::generate().0; - let collator_a = PeerId::random(); - let para_id_a = test_state.chain_ids[0]; - - update_view(&mut virtual_overseer, &mut test_state, vec![(hash_c, 2)]).await; - - connect_and_declare_collator( - &mut virtual_overseer, - collator_a, - pair_a.clone(), - para_id_a, - CollationVersion::V2, - ) - .await; - - // A collation at hash_b claims the spot at hash_b - submit_second_and_assert( - &mut virtual_overseer, - keystore.clone(), - ParaId::from(test_state.chain_ids[0]), - hash_b, - collator_a, - HeadData(vec![0u8]), - ) - .await; - - // Another collation at hash_b claims the spot at hash_c - submit_second_and_assert( - &mut virtual_overseer, - keystore.clone(), - ParaId::from(test_state.chain_ids[0]), - hash_b, - collator_a, - HeadData(vec![1u8]), - ) - .await; - - // Collation at hash_a claims its own spot - submit_second_and_assert( - &mut virtual_overseer, - keystore.clone(), - ParaId::from(test_state.chain_ids[0]), - hash_a, - collator_a, - HeadData(vec![0u8]), - ) - .await; - - // Another Collation at hash_a should be ignored because the claim queue is satisfied - let (ignored_candidate, _) = - create_dummy_candidate_and_commitments(para_id_a, HeadData(vec![2u8]), hash_a); - - advertise_collation( - &mut virtual_overseer, - collator_a, - hash_a, - Some((ignored_candidate.hash(), Hash::random())), - ) - .await; - - test_helpers::Yield::new().await; - assert_matches!(virtual_overseer.recv().now_or_never(), None); - - // Same for hash_b - let (ignored_candidate, _) = - create_dummy_candidate_and_commitments(para_id_a, HeadData(vec![3u8]), hash_b); - - advertise_collation( - &mut virtual_overseer, - collator_a, - hash_b, - Some((ignored_candidate.hash(), Hash::random())), - ) - .await; - - test_helpers::Yield::new().await; - assert_matches!(virtual_overseer.recv().now_or_never(), None); - - virtual_overseer - }); -} - -#[test] -fn claim_fills_last_free_slot() { - let mut test_state = TestState::with_one_scheduled_para(); - - // Shorten the claim queue to make the test smaller - let mut claim_queue = BTreeMap::new(); - claim_queue.insert( - CoreIndex(0), - VecDeque::from_iter( - [ParaId::from(test_state.chain_ids[0]), ParaId::from(test_state.chain_ids[0])] - .into_iter(), - ), - ); - test_state.claim_queue = claim_queue; - test_state.async_backing_params.max_candidate_depth = 3; - test_state.async_backing_params.allowed_ancestry_len = 2; - - test_harness(ReputationAggregator::new(|_| true), |test_harness| async move { - let TestHarness { mut virtual_overseer, keystore } = test_harness; - - let hash_a = Hash::from_low_u64_be(test_state.relay_parent.to_low_u64_be() - 1); // block 0 - let hash_b = Hash::from_low_u64_be(hash_a.to_low_u64_be() - 1); // block 1 - let hash_c = Hash::from_low_u64_be(hash_b.to_low_u64_be() - 1); // block 2 - - let pair_a = CollatorPair::generate().0; - let collator_a = PeerId::random(); - let para_id_a = test_state.chain_ids[0]; - - update_view(&mut virtual_overseer, &mut test_state, vec![(hash_c, 2)]).await; - - connect_and_declare_collator( - &mut virtual_overseer, - collator_a, - pair_a.clone(), - para_id_a, - CollationVersion::V2, - ) - .await; - - // A collation at hash_a claims its spot - submit_second_and_assert( - &mut virtual_overseer, - keystore.clone(), - ParaId::from(test_state.chain_ids[0]), - hash_a, - collator_a, - HeadData(vec![0u8]), - ) - .await; - - // Collation at hash_b claims its own spot - submit_second_and_assert( - &mut virtual_overseer, - keystore.clone(), - ParaId::from(test_state.chain_ids[0]), - hash_b, - collator_a, - HeadData(vec![3u8]), - ) - .await; - - // Collation at hash_c claims its own spot - submit_second_and_assert( - &mut virtual_overseer, - keystore.clone(), - ParaId::from(test_state.chain_ids[0]), - hash_c, - collator_a, - HeadData(vec![2u8]), - ) - .await; - - // Another Collation at hash_a should be ignored because the claim queue is satisfied - let (ignored_candidate, _) = - create_dummy_candidate_and_commitments(para_id_a, HeadData(vec![3u8]), hash_a); - - advertise_collation( - &mut virtual_overseer, - collator_a, - hash_a, - Some((ignored_candidate.hash(), Hash::random())), - ) - .await; - - test_helpers::Yield::new().await; - assert_matches!(virtual_overseer.recv().now_or_never(), None); - - // Same for hash_b - let (ignored_candidate, _) = - create_dummy_candidate_and_commitments(para_id_a, HeadData(vec![4u8]), hash_b); - - advertise_collation( - &mut virtual_overseer, - collator_a, - hash_b, - Some((ignored_candidate.hash(), Hash::random())), - ) - .await; - - test_helpers::Yield::new().await; - assert_matches!(virtual_overseer.recv().now_or_never(), None); - - virtual_overseer - }); -} diff --git a/polkadot/node/network/dispute-distribution/Cargo.toml b/polkadot/node/network/dispute-distribution/Cargo.toml index 079a37ca0aff..b4dcafe09eb6 100644 --- a/polkadot/node/network/dispute-distribution/Cargo.toml +++ b/polkadot/node/network/dispute-distribution/Cargo.toml @@ -5,39 +5,37 @@ description = "Polkadot Dispute Distribution subsystem, which ensures all concer authors.workspace = true edition.workspace = true license.workspace = true -homepage.workspace = true -repository.workspace = true [lints] workspace = true [dependencies] -codec = { features = ["std"], workspace = true, default-features = true } -derive_more = { workspace = true, default-features = true } -fatality = { workspace = true } futures = { workspace = true } futures-timer = { workspace = true } gum = { workspace = true, default-features = true } -indexmap = { workspace = true } +derive_more = { workspace = true, default-features = true } +codec = { features = ["std"], workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } polkadot-erasure-coding = { workspace = true, default-features = true } -polkadot-node-network-protocol = { workspace = true, default-features = true } -polkadot-node-primitives = { workspace = true, default-features = true } polkadot-node-subsystem = { workspace = true, default-features = true } +polkadot-node-network-protocol = { workspace = true, default-features = true } polkadot-node-subsystem-util = { workspace = true, default-features = true } -polkadot-primitives = { workspace = true, default-features = true } +polkadot-node-primitives = { workspace = true, default-features = true } sc-network = { workspace = true, default-features = true } -schnellru = { workspace = true } sp-application-crypto = { workspace = true, default-features = true } sp-keystore = { workspace = true, default-features = true } thiserror = { workspace = true } +fatality = { workspace = true } +schnellru = { workspace = true } +indexmap = { workspace = true } [dev-dependencies] -assert_matches = { workspace = true } async-channel = { workspace = true } async-trait = { workspace = true } -futures-timer = { workspace = true } polkadot-node-subsystem-test-helpers = { workspace = true } -polkadot-primitives-test-helpers = { workspace = true } -sc-keystore = { workspace = true, default-features = true } sp-keyring = { workspace = true, default-features = true } sp-tracing = { workspace = true, default-features = true } +sc-keystore = { workspace = true, default-features = true } +futures-timer = { workspace = true } +assert_matches = { workspace = true } +polkadot-primitives-test-helpers = { workspace = true } diff --git a/polkadot/node/network/gossip-support/Cargo.toml b/polkadot/node/network/gossip-support/Cargo.toml index 1ba556fc46b0..c8c19e5de070 100644 --- a/polkadot/node/network/gossip-support/Cargo.toml +++ b/polkadot/node/network/gossip-support/Cargo.toml @@ -5,19 +5,17 @@ description = "Polkadot Gossip Support subsystem. Responsible for keeping track authors.workspace = true edition.workspace = true license.workspace = true -homepage.workspace = true -repository.workspace = true [lints] workspace = true [dependencies] -sc-network = { workspace = true, default-features = true } -sc-network-common = { workspace = true, default-features = true } sp-application-crypto = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } sp-crypto-hashing = { workspace = true, default-features = true } -sp-keystore = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } +sc-network-common = { workspace = true, default-features = true } polkadot-node-network-protocol = { workspace = true, default-features = true } polkadot-node-subsystem = { workspace = true, default-features = true } @@ -26,15 +24,15 @@ polkadot-primitives = { workspace = true, default-features = true } futures = { workspace = true } futures-timer = { workspace = true } -gum = { workspace = true, default-features = true } rand = { workspace = true } rand_chacha = { workspace = true } +gum = { workspace = true, default-features = true } [dev-dependencies] -sp-authority-discovery = { workspace = true, default-features = true } -sp-consensus-babe = { workspace = true, default-features = true } sp-keyring = { workspace = true, default-features = true } +sp-consensus-babe = { workspace = true, default-features = true } sp-tracing = { workspace = true, default-features = true } +sp-authority-discovery = { workspace = true, default-features = true } polkadot-node-subsystem-test-helpers = { workspace = true } diff --git a/polkadot/node/network/protocol/Cargo.toml b/polkadot/node/network/protocol/Cargo.toml index 83a24959f60a..3d51d3c0a565 100644 --- a/polkadot/node/network/protocol/Cargo.toml +++ b/polkadot/node/network/protocol/Cargo.toml @@ -5,8 +5,6 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "Primitives types for the Node-side" -homepage.workspace = true -repository.workspace = true [lints] workspace = true @@ -14,22 +12,22 @@ workspace = true [dependencies] async-channel = { workspace = true } async-trait = { workspace = true } -bitvec = { workspace = true, default-features = true } -codec = { features = ["derive"], workspace = true } -derive_more = { workspace = true, default-features = true } -fatality = { workspace = true } -futures = { workspace = true } -gum = { workspace = true, default-features = true } hex = { workspace = true, default-features = true } -polkadot-node-primitives = { workspace = true, default-features = true } polkadot-primitives = { workspace = true, default-features = true } -rand = { workspace = true, default-features = true } -sc-authority-discovery = { workspace = true, default-features = true } +polkadot-node-primitives = { workspace = true, default-features = true } +codec = { features = ["derive"], workspace = true } sc-network = { workspace = true, default-features = true } sc-network-types = { workspace = true, default-features = true } +sc-authority-discovery = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } strum = { features = ["derive"], workspace = true, default-features = true } +futures = { workspace = true } thiserror = { workspace = true } +fatality = { workspace = true } +rand = { workspace = true, default-features = true } +derive_more = { workspace = true, default-features = true } +gum = { workspace = true, default-features = true } +bitvec = { workspace = true, default-features = true } [dev-dependencies] rand_chacha = { workspace = true, default-features = true } diff --git a/polkadot/node/network/protocol/src/grid_topology.rs b/polkadot/node/network/protocol/src/grid_topology.rs index f4c1a07ba3c2..4dd7d29fc25c 100644 --- a/polkadot/node/network/protocol/src/grid_topology.rs +++ b/polkadot/node/network/protocol/src/grid_topology.rs @@ -575,22 +575,6 @@ impl RequiredRouting { _ => false, } } - - /// Combine two required routing sets into one that would cover both routing modes. - pub fn combine(self, other: Self) -> Self { - match (self, other) { - (RequiredRouting::All, _) | (_, RequiredRouting::All) => RequiredRouting::All, - (RequiredRouting::GridXY, _) | (_, RequiredRouting::GridXY) => RequiredRouting::GridXY, - (RequiredRouting::GridX, RequiredRouting::GridY) | - (RequiredRouting::GridY, RequiredRouting::GridX) => RequiredRouting::GridXY, - (RequiredRouting::GridX, RequiredRouting::GridX) => RequiredRouting::GridX, - (RequiredRouting::GridY, RequiredRouting::GridY) => RequiredRouting::GridY, - (RequiredRouting::None, RequiredRouting::PendingTopology) | - (RequiredRouting::PendingTopology, RequiredRouting::None) => RequiredRouting::PendingTopology, - (RequiredRouting::None, _) | (RequiredRouting::PendingTopology, _) => other, - (_, RequiredRouting::None) | (_, RequiredRouting::PendingTopology) => self, - } - } } #[cfg(test)] @@ -603,50 +587,6 @@ mod tests { rand_chacha::ChaCha12Rng::seed_from_u64(12345) } - #[test] - fn test_required_routing_combine() { - assert_eq!(RequiredRouting::All.combine(RequiredRouting::None), RequiredRouting::All); - assert_eq!(RequiredRouting::All.combine(RequiredRouting::GridXY), RequiredRouting::All); - assert_eq!(RequiredRouting::GridXY.combine(RequiredRouting::All), RequiredRouting::All); - assert_eq!(RequiredRouting::None.combine(RequiredRouting::All), RequiredRouting::All); - assert_eq!(RequiredRouting::None.combine(RequiredRouting::None), RequiredRouting::None); - assert_eq!( - RequiredRouting::PendingTopology.combine(RequiredRouting::GridX), - RequiredRouting::GridX - ); - - assert_eq!( - RequiredRouting::GridX.combine(RequiredRouting::PendingTopology), - RequiredRouting::GridX - ); - assert_eq!(RequiredRouting::GridX.combine(RequiredRouting::GridY), RequiredRouting::GridXY); - assert_eq!(RequiredRouting::GridY.combine(RequiredRouting::GridX), RequiredRouting::GridXY); - assert_eq!( - RequiredRouting::GridXY.combine(RequiredRouting::GridXY), - RequiredRouting::GridXY - ); - assert_eq!(RequiredRouting::GridX.combine(RequiredRouting::GridX), RequiredRouting::GridX); - assert_eq!(RequiredRouting::GridY.combine(RequiredRouting::GridY), RequiredRouting::GridY); - - assert_eq!(RequiredRouting::None.combine(RequiredRouting::GridY), RequiredRouting::GridY); - assert_eq!(RequiredRouting::None.combine(RequiredRouting::GridX), RequiredRouting::GridX); - assert_eq!(RequiredRouting::None.combine(RequiredRouting::GridXY), RequiredRouting::GridXY); - - assert_eq!(RequiredRouting::GridY.combine(RequiredRouting::None), RequiredRouting::GridY); - assert_eq!(RequiredRouting::GridX.combine(RequiredRouting::None), RequiredRouting::GridX); - assert_eq!(RequiredRouting::GridXY.combine(RequiredRouting::None), RequiredRouting::GridXY); - - assert_eq!( - RequiredRouting::PendingTopology.combine(RequiredRouting::None), - RequiredRouting::PendingTopology - ); - - assert_eq!( - RequiredRouting::None.combine(RequiredRouting::PendingTopology), - RequiredRouting::PendingTopology - ); - } - #[test] fn test_random_routing_sample() { // This test is fragile as it relies on a specific ChaCha12Rng diff --git a/polkadot/node/network/statement-distribution/Cargo.toml b/polkadot/node/network/statement-distribution/Cargo.toml index 8bd058b8c849..de07937ffb0a 100644 --- a/polkadot/node/network/statement-distribution/Cargo.toml +++ b/polkadot/node/network/statement-distribution/Cargo.toml @@ -5,48 +5,46 @@ version = "7.0.0" authors.workspace = true edition.workspace = true license.workspace = true -homepage.workspace = true -repository.workspace = true [lints] workspace = true [dependencies] -arrayvec = { workspace = true } -bitvec = { workspace = true, default-features = true } -codec = { features = ["derive"], workspace = true } -fatality = { workspace = true } futures = { workspace = true } futures-timer = { workspace = true } gum = { workspace = true, default-features = true } -indexmap = { workspace = true } -polkadot-node-network-protocol = { workspace = true, default-features = true } -polkadot-node-primitives = { workspace = true, default-features = true } -polkadot-node-subsystem = { workspace = true, default-features = true } -polkadot-node-subsystem-util = { workspace = true, default-features = true } polkadot-primitives = { workspace = true, default-features = true } -sp-keystore = { workspace = true, default-features = true } sp-staking = { workspace = true } +sp-keystore = { workspace = true, default-features = true } +polkadot-node-subsystem = { workspace = true, default-features = true } +polkadot-node-primitives = { workspace = true, default-features = true } +polkadot-node-subsystem-util = { workspace = true, default-features = true } +polkadot-node-network-protocol = { workspace = true, default-features = true } +arrayvec = { workspace = true } +indexmap = { workspace = true } +codec = { features = ["derive"], workspace = true } thiserror = { workspace = true } +fatality = { workspace = true } +bitvec = { workspace = true, default-features = true } [dev-dependencies] -assert_matches = { workspace = true } async-channel = { workspace = true } -futures-timer = { workspace = true } +assert_matches = { workspace = true } polkadot-node-subsystem-test-helpers = { workspace = true } -polkadot-primitives = { workspace = true, features = ["test"] } -polkadot-primitives-test-helpers = { workspace = true } -polkadot-subsystem-bench = { workspace = true } -rand_chacha = { workspace = true, default-features = true } -rstest = { workspace = true } -sc-keystore = { workspace = true, default-features = true } -sc-network = { workspace = true, default-features = true } -sp-application-crypto = { workspace = true, default-features = true } sp-authority-discovery = { workspace = true, default-features = true } -sp-core = { workspace = true, default-features = true } sp-keyring = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-application-crypto = { workspace = true, default-features = true } sp-keystore = { workspace = true, default-features = true } sp-tracing = { workspace = true, default-features = true } +sc-keystore = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } +futures-timer = { workspace = true } +polkadot-primitives = { workspace = true, features = ["test"] } +polkadot-primitives-test-helpers = { workspace = true } +rand_chacha = { workspace = true, default-features = true } +polkadot-subsystem-bench = { workspace = true } +rstest = { workspace = true } [[bench]] name = "statement-distribution-regression-bench" diff --git a/polkadot/node/overseer/Cargo.toml b/polkadot/node/overseer/Cargo.toml index fd7f1e039247..2253a5ae0c66 100644 --- a/polkadot/node/overseer/Cargo.toml +++ b/polkadot/node/overseer/Cargo.toml @@ -5,37 +5,35 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "System overseer of the Polkadot node" -homepage.workspace = true -repository.workspace = true [lints] workspace = true [dependencies] -async-trait = { workspace = true } +sc-client-api = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } futures = { workspace = true } futures-timer = { workspace = true } -gum = { workspace = true, default-features = true } -orchestra = { features = ["futures_channel"], workspace = true } parking_lot = { workspace = true, default-features = true } -polkadot-node-metrics = { workspace = true, default-features = true } polkadot-node-network-protocol = { workspace = true, default-features = true } polkadot-node-primitives = { workspace = true, default-features = true } polkadot-node-subsystem-types = { workspace = true, default-features = true } +polkadot-node-metrics = { workspace = true, default-features = true } polkadot-primitives = { workspace = true, default-features = true } -sc-client-api = { workspace = true, default-features = true } -sp-api = { workspace = true, default-features = true } +orchestra = { features = ["futures_channel"], workspace = true } +gum = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } +async-trait = { workspace = true } tikv-jemalloc-ctl = { optional = true, workspace = true } [dev-dependencies] -assert_matches = { workspace = true } -femme = { workspace = true } -futures = { features = ["thread-pool"], workspace = true } metered = { features = ["futures_channel"], workspace = true } -polkadot-node-subsystem-test-helpers = { workspace = true } -polkadot-primitives-test-helpers = { workspace = true } sp-core = { workspace = true, default-features = true } +futures = { features = ["thread-pool"], workspace = true } +femme = { workspace = true } +assert_matches = { workspace = true } +polkadot-primitives-test-helpers = { workspace = true } +polkadot-node-subsystem-test-helpers = { workspace = true } [target.'cfg(target_os = "linux")'.dependencies] tikv-jemalloc-ctl = "0.5.0" diff --git a/polkadot/node/primitives/Cargo.toml b/polkadot/node/primitives/Cargo.toml index d138b77dea8f..7185205f905b 100644 --- a/polkadot/node/primitives/Cargo.toml +++ b/polkadot/node/primitives/Cargo.toml @@ -5,31 +5,29 @@ version = "7.0.0" authors.workspace = true edition.workspace = true license.workspace = true -homepage.workspace = true -repository.workspace = true [lints] workspace = true [dependencies] -bitvec = { features = ["alloc"], workspace = true } bounded-vec = { workspace = true } -codec = { features = ["derive"], workspace = true } futures = { workspace = true } futures-timer = { workspace = true } -polkadot-parachain-primitives = { workspace = true } polkadot-primitives = { workspace = true, default-features = true } -sc-keystore = { workspace = true } -schnorrkel = { workspace = true, default-features = true } -serde = { features = ["derive"], workspace = true, default-features = true } +codec = { features = ["derive"], workspace = true } +sp-core = { workspace = true, default-features = true } sp-application-crypto = { workspace = true, default-features = true } sp-consensus-babe = { workspace = true, default-features = true } sp-consensus-slots = { workspace = true } -sp-core = { workspace = true, default-features = true } sp-keystore = { workspace = true, default-features = true } sp-maybe-compressed-blob = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } +polkadot-parachain-primitives = { workspace = true } +schnorrkel = { workspace = true, default-features = true } thiserror = { workspace = true } +bitvec = { features = ["alloc"], workspace = true } +serde = { features = ["derive"], workspace = true, default-features = true } +sc-keystore = { workspace = true } [target.'cfg(not(target_os = "unknown"))'.dependencies] zstd = { version = "0.12.4", default-features = false } diff --git a/polkadot/node/primitives/src/lib.rs b/polkadot/node/primitives/src/lib.rs index 1e5ce6489bc8..6985e86098b0 100644 --- a/polkadot/node/primitives/src/lib.rs +++ b/polkadot/node/primitives/src/lib.rs @@ -59,7 +59,7 @@ pub use disputes::{ /// relatively rare. /// /// The associated worker binaries should use the same version as the node that spawns them. -pub const NODE_VERSION: &'static str = "1.17.0"; +pub const NODE_VERSION: &'static str = "1.16.1"; // For a 16-ary Merkle Prefix Trie, we can expect at most 16 32-byte hashes per node // plus some overhead: diff --git a/polkadot/node/service/Cargo.toml b/polkadot/node/service/Cargo.toml index 122040a9b207..6e8eade21a43 100644 --- a/polkadot/node/service/Cargo.toml +++ b/polkadot/node/service/Cargo.toml @@ -6,106 +6,104 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "Utils to tie different Polkadot components together and allow instantiation of a node." -homepage.workspace = true -repository.workspace = true [lints] workspace = true [dependencies] # Substrate Client -mmr-gadget = { workspace = true, default-features = true } sc-authority-discovery = { workspace = true, default-features = true } -sc-basic-authorship = { workspace = true, default-features = true } -sc-chain-spec = { workspace = true, default-features = true } -sc-client-api = { workspace = true, default-features = true } -sc-consensus = { workspace = true, default-features = true } sc-consensus-babe = { workspace = true, default-features = true } sc-consensus-beefy = { workspace = true, default-features = true } sc-consensus-grandpa = { workspace = true, default-features = true } +mmr-gadget = { workspace = true, default-features = true } +sp-mmr-primitives = { workspace = true, default-features = true } +sp-genesis-builder = { workspace = true, default-features = true } +sc-chain-spec = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sc-consensus = { workspace = true, default-features = true } sc-consensus-slots = { workspace = true, default-features = true } sc-executor = { workspace = true, default-features = true } -sc-keystore = { workspace = true, default-features = true } sc-network = { workspace = true, default-features = true } sc-network-sync = { workspace = true, default-features = true } -sc-offchain = { workspace = true, default-features = true } -sc-service = { workspace = true } +sc-transaction-pool = { workspace = true, default-features = true } +sc-transaction-pool-api = { workspace = true, default-features = true } sc-sync-state-rpc = { workspace = true, default-features = true } +sc-keystore = { workspace = true, default-features = true } +sc-basic-authorship = { workspace = true, default-features = true } +sc-offchain = { workspace = true, default-features = true } sc-sysinfo = { workspace = true, default-features = true } +sc-service = { workspace = true } sc-telemetry = { workspace = true, default-features = true } -sc-transaction-pool = { workspace = true, default-features = true } -sc-transaction-pool-api = { workspace = true, default-features = true } -sp-genesis-builder = { workspace = true, default-features = true } -sp-mmr-primitives = { workspace = true, default-features = true } # Substrate Primitives -pallet-transaction-payment = { workspace = true, default-features = true } -sp-api = { workspace = true, default-features = true } sp-authority-discovery = { workspace = true, default-features = true } -sp-block-builder = { workspace = true, default-features = true } -sp-blockchain = { workspace = true, default-features = true } sp-consensus = { workspace = true, default-features = true } -sp-consensus-babe = { workspace = true, default-features = true } sp-consensus-beefy = { workspace = true, default-features = true } sp-consensus-grandpa = { workspace = true, default-features = true } -sp-core = { workspace = true, default-features = true } sp-inherents = { workspace = true, default-features = true } -sp-io = { workspace = true, default-features = true } sp-keyring = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-block-builder = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } sp-offchain = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } sp-session = { workspace = true, default-features = true } -sp-timestamp = { workspace = true, default-features = true } sp-transaction-pool = { workspace = true, default-features = true } -sp-version = { workspace = true, default-features = true } +pallet-transaction-payment = { workspace = true, default-features = true } +sp-timestamp = { workspace = true, default-features = true } +sp-consensus-babe = { workspace = true, default-features = true } sp-weights = { workspace = true, default-features = true } +sp-version = { workspace = true, default-features = true } # Substrate Pallets +pallet-transaction-payment-rpc-runtime-api = { workspace = true, default-features = true } frame-metadata-hash-extension = { optional = true, workspace = true, default-features = true } frame-system = { workspace = true, default-features = true } -pallet-transaction-payment-rpc-runtime-api = { workspace = true, default-features = true } # Substrate Other -frame-benchmarking = { workspace = true, default-features = true } -frame-benchmarking-cli = { workspace = true, default-features = true } frame-system-rpc-runtime-api = { workspace = true, default-features = true } prometheus-endpoint = { workspace = true, default-features = true } +frame-benchmarking-cli = { workspace = true, default-features = true } +frame-benchmarking = { workspace = true, default-features = true } # External Crates async-trait = { workspace = true } -codec = { workspace = true, default-features = true } futures = { workspace = true } -gum = { workspace = true, default-features = true } is_executable = { workspace = true } -kvdb = { workspace = true } -kvdb-rocksdb = { optional = true, workspace = true } +gum = { workspace = true, default-features = true } log = { workspace = true, default-features = true } -parity-db = { optional = true, workspace = true } -parking_lot = { workspace = true, default-features = true } serde = { features = ["derive"], workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } thiserror = { workspace = true } +kvdb = { workspace = true } +kvdb-rocksdb = { optional = true, workspace = true } +parity-db = { optional = true, workspace = true } +codec = { workspace = true, default-features = true } +parking_lot = { workspace = true, default-features = true } # Polkadot polkadot-core-primitives = { workspace = true, default-features = true } polkadot-node-core-parachains-inherent = { workspace = true, default-features = true } -polkadot-node-network-protocol = { workspace = true, default-features = true } -polkadot-node-primitives = { workspace = true, default-features = true } -polkadot-node-subsystem = { workspace = true, default-features = true } -polkadot-node-subsystem-types = { workspace = true, default-features = true } -polkadot-node-subsystem-util = { workspace = true, default-features = true } polkadot-overseer = { workspace = true, default-features = true } polkadot-primitives = { workspace = true, default-features = true } +polkadot-node-primitives = { workspace = true, default-features = true } polkadot-rpc = { workspace = true, default-features = true } +polkadot-node-subsystem = { workspace = true, default-features = true } +polkadot-node-subsystem-util = { workspace = true, default-features = true } +polkadot-node-subsystem-types = { workspace = true, default-features = true } polkadot-runtime-parachains = { workspace = true, default-features = true } +polkadot-node-network-protocol = { workspace = true, default-features = true } # Polkadot Runtime Constants rococo-runtime-constants = { optional = true, workspace = true, default-features = true } westend-runtime-constants = { optional = true, workspace = true, default-features = true } # Polkadot Runtimes -rococo-runtime = { optional = true, workspace = true } westend-runtime = { optional = true, workspace = true } +rococo-runtime = { optional = true, workspace = true } # Polkadot Subsystems polkadot-approval-distribution = { optional = true, workspace = true, default-features = true } @@ -137,11 +135,11 @@ xcm = { workspace = true, default-features = true } xcm-runtime-apis = { workspace = true, default-features = true } [dev-dependencies] -assert_matches = { workspace = true } +polkadot-test-client = { workspace = true } polkadot-node-subsystem-test-helpers = { workspace = true } polkadot-primitives-test-helpers = { workspace = true } -polkadot-test-client = { workspace = true } sp-tracing = { workspace = true } +assert_matches = { workspace = true } tempfile = { workspace = true } [features] @@ -210,7 +208,6 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", "westend-runtime?/runtime-benchmarks", "xcm-runtime-apis/runtime-benchmarks", - "xcm/runtime-benchmarks", ] try-runtime = [ "frame-system/try-runtime", diff --git a/polkadot/node/service/src/lib.rs b/polkadot/node/service/src/lib.rs index 227bc5253994..d2424474302a 100644 --- a/polkadot/node/service/src/lib.rs +++ b/polkadot/node/service/src/lib.rs @@ -1003,7 +1003,7 @@ pub fn new_full< }) }; - let (network, system_rpc_tx, tx_handler_controller, sync_service) = + let (network, system_rpc_tx, tx_handler_controller, network_starter, sync_service) = sc_service::build_network(sc_service::BuildNetworkParams { config: &config, net_config, @@ -1383,6 +1383,8 @@ pub fn new_full< ); } + network_starter.start_network(); + Ok(NewFull { task_manager, client, diff --git a/polkadot/node/service/src/overseer.rs b/polkadot/node/service/src/overseer.rs index e4ea6efeaac2..279b6ff80704 100644 --- a/polkadot/node/service/src/overseer.rs +++ b/polkadot/node/service/src/overseer.rs @@ -210,7 +210,7 @@ pub fn validator_overseer_builder( AuthorityDiscoveryService, >, ChainApiSubsystem, - DummySubsystem, + CollationGenerationSubsystem, CollatorProtocolSubsystem, ApprovalDistributionSubsystem, ApprovalVotingSubsystem, @@ -237,7 +237,6 @@ where let network_bridge_metrics: NetworkBridgeMetrics = Metrics::register(registry)?; let approval_voting_parallel_metrics: ApprovalVotingParallelMetrics = Metrics::register(registry)?; - let builder = Overseer::builder() .network_bridge_tx(NetworkBridgeTxSubsystem::new( network_service.clone(), @@ -296,7 +295,7 @@ where )) .pvf_checker(PvfCheckerSubsystem::new(keystore.clone(), Metrics::register(registry)?)) .chain_api(ChainApiSubsystem::new(runtime_client.clone(), Metrics::register(registry)?)) - .collation_generation(DummySubsystem) + .collation_generation(CollationGenerationSubsystem::new(Metrics::register(registry)?)) .collator_protocol({ let side = match is_parachain_node { IsParachainNode::Collator(_) | IsParachainNode::FullNode => @@ -435,7 +434,7 @@ pub fn validator_with_parallel_overseer_builder( AuthorityDiscoveryService, >, ChainApiSubsystem, - DummySubsystem, + CollationGenerationSubsystem, CollatorProtocolSubsystem, DummySubsystem, DummySubsystem, @@ -520,7 +519,7 @@ where )) .pvf_checker(PvfCheckerSubsystem::new(keystore.clone(), Metrics::register(registry)?)) .chain_api(ChainApiSubsystem::new(runtime_client.clone(), Metrics::register(registry)?)) - .collation_generation(DummySubsystem) + .collation_generation(CollationGenerationSubsystem::new(Metrics::register(registry)?)) .collator_protocol({ let side = match is_parachain_node { IsParachainNode::Collator(_) | IsParachainNode::FullNode => diff --git a/polkadot/node/subsystem-bench/Cargo.toml b/polkadot/node/subsystem-bench/Cargo.toml index e288ee2b78d3..8633818e775d 100644 --- a/polkadot/node/subsystem-bench/Cargo.toml +++ b/polkadot/node/subsystem-bench/Cargo.toml @@ -21,79 +21,79 @@ doc = false [dependencies] -assert_matches = { workspace = true } -async-trait = { workspace = true } -bincode = { workspace = true } -clap = { features = ["derive"], workspace = true } -color-eyre = { workspace = true } -colored = { workspace = true } -futures = { workspace = true } -futures-timer = { workspace = true } -gum = { workspace = true, default-features = true } -hex = { workspace = true, default-features = true } +tikv-jemallocator = { features = ["profiling", "unprefixed_malloc_on_supported_platforms"], workspace = true, optional = true } jemalloc_pprof = { workspace = true, optional = true } -log = { workspace = true, default-features = true } -polkadot-availability-bitfield-distribution = { workspace = true, default-features = true } -polkadot-availability-distribution = { workspace = true, default-features = true } -polkadot-availability-recovery = { features = ["subsystem-benchmarks"], workspace = true, default-features = true } -polkadot-erasure-coding = { workspace = true, default-features = true } -polkadot-node-core-av-store = { workspace = true, default-features = true } -polkadot-node-core-chain-api = { workspace = true, default-features = true } -polkadot-node-network-protocol = { workspace = true, default-features = true } -polkadot-node-primitives = { workspace = true, default-features = true } +polkadot-service = { workspace = true, default-features = true } polkadot-node-subsystem = { workspace = true, default-features = true } -polkadot-node-subsystem-types = { workspace = true, default-features = true } polkadot-node-subsystem-util = { workspace = true, default-features = true } -polkadot-overseer = { workspace = true, default-features = true } +polkadot-node-subsystem-types = { workspace = true, default-features = true } +polkadot-node-primitives = { workspace = true, default-features = true } polkadot-primitives = { workspace = true, features = ["test"] } -polkadot-service = { workspace = true, default-features = true } +polkadot-node-network-protocol = { workspace = true, default-features = true } +polkadot-availability-recovery = { features = ["subsystem-benchmarks"], workspace = true, default-features = true } +polkadot-availability-distribution = { workspace = true, default-features = true } polkadot-statement-distribution = { workspace = true, default-features = true } -rand = { workspace = true, default-features = true } +polkadot-node-core-av-store = { workspace = true, default-features = true } +polkadot-node-core-chain-api = { workspace = true, default-features = true } +polkadot-availability-bitfield-distribution = { workspace = true, default-features = true } +color-eyre = { workspace = true } +polkadot-overseer = { workspace = true, default-features = true } +colored = { workspace = true } +assert_matches = { workspace = true } +async-trait = { workspace = true } +sp-keystore = { workspace = true, default-features = true } sc-keystore = { workspace = true, default-features = true } -sha1 = { workspace = true } sp-core = { workspace = true, default-features = true } -sp-keystore = { workspace = true, default-features = true } +clap = { features = ["derive"], workspace = true } +futures = { workspace = true } +futures-timer = { workspace = true } +bincode = { workspace = true } +sha1 = { workspace = true } +hex = { workspace = true, default-features = true } +gum = { workspace = true, default-features = true } +polkadot-erasure-coding = { workspace = true, default-features = true } +log = { workspace = true, default-features = true } sp-tracing = { workspace = true } -tikv-jemallocator = { features = ["profiling", "unprefixed_malloc_on_supported_platforms"], workspace = true, optional = true } +rand = { workspace = true, default-features = true } # `rand` only supports uniform distribution, we need normal distribution for latency. +rand_distr = { workspace = true } bitvec = { workspace = true, default-features = true } kvdb-memorydb = { workspace = true } -rand_distr = { workspace = true } -clap-num = { workspace = true } codec = { features = ["derive", "std"], workspace = true, default-features = true } -itertools = { workspace = true } -polkadot-node-metrics = { workspace = true, default-features = true } +tokio = { features = ["parking_lot", "rt-multi-thread"], workspace = true, default-features = true } +clap-num = { workspace = true } polkadot-node-subsystem-test-helpers = { workspace = true } -polkadot-primitives-test-helpers = { workspace = true } -prometheus = { workspace = true } -prometheus-endpoint = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } +sp-application-crypto = { workspace = true, default-features = true } sc-network = { workspace = true, default-features = true } sc-network-types = { workspace = true, default-features = true } sc-service = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +polkadot-node-metrics = { workspace = true, default-features = true } +itertools = { workspace = true } +polkadot-primitives-test-helpers = { workspace = true } +prometheus-endpoint = { workspace = true, default-features = true } +prometheus = { workspace = true } serde = { workspace = true, default-features = true } -serde_json = { workspace = true } serde_yaml = { workspace = true } -sp-application-crypto = { workspace = true, default-features = true } -sp-consensus = { workspace = true, default-features = true } -sp-keyring = { workspace = true, default-features = true } -tokio = { features = ["parking_lot", "rt-multi-thread"], workspace = true, default-features = true } +serde_json = { workspace = true } -polkadot-approval-distribution = { workspace = true, default-features = true } polkadot-node-core-approval-voting = { workspace = true, default-features = true } polkadot-node-core-approval-voting-parallel = { workspace = true, default-features = true } +polkadot-approval-distribution = { workspace = true, default-features = true } sp-consensus-babe = { workspace = true, default-features = true } sp-runtime = { workspace = true } sp-timestamp = { workspace = true, default-features = true } schnorrkel = { workspace = true } # rand_core should match schnorrkel -orchestra = { features = ["futures_channel"], workspace = true } +rand_core = { workspace = true } +rand_chacha = { workspace = true, default-features = true } paste = { workspace = true, default-features = true } +orchestra = { features = ["futures_channel"], workspace = true } pyroscope = { workspace = true } pyroscope_pprofrs = { workspace = true } -rand_chacha = { workspace = true, default-features = true } -rand_core = { workspace = true } strum = { features = ["derive"], workspace = true, default-features = true } [features] diff --git a/polkadot/node/subsystem-test-helpers/Cargo.toml b/polkadot/node/subsystem-test-helpers/Cargo.toml index 4e660b15c1e2..d3229291673c 100644 --- a/polkadot/node/subsystem-test-helpers/Cargo.toml +++ b/polkadot/node/subsystem-test-helpers/Cargo.toml @@ -14,16 +14,16 @@ workspace = true async-trait = { workspace = true } futures = { workspace = true } parking_lot = { workspace = true, default-features = true } -polkadot-erasure-coding = { workspace = true, default-features = true } -polkadot-node-primitives = { workspace = true, default-features = true } polkadot-node-subsystem = { workspace = true, default-features = true } +polkadot-erasure-coding = { workspace = true, default-features = true } polkadot-node-subsystem-util = { workspace = true, default-features = true } polkadot-primitives = { workspace = true, default-features = true } +polkadot-node-primitives = { workspace = true, default-features = true } sc-client-api = { workspace = true, default-features = true } -sc-keystore = { workspace = true, default-features = true } sc-utils = { workspace = true, default-features = true } -sp-application-crypto = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } -sp-keyring = { workspace = true, default-features = true } sp-keystore = { workspace = true, default-features = true } +sc-keystore = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } +sp-application-crypto = { workspace = true, default-features = true } diff --git a/polkadot/node/subsystem-types/Cargo.toml b/polkadot/node/subsystem-types/Cargo.toml index 6c88a4474137..b5686ec96be1 100644 --- a/polkadot/node/subsystem-types/Cargo.toml +++ b/polkadot/node/subsystem-types/Cargo.toml @@ -5,32 +5,30 @@ version = "7.0.0" authors.workspace = true edition.workspace = true license.workspace = true -homepage.workspace = true -repository.workspace = true [lints] workspace = true [dependencies] -async-trait = { workspace = true } -bitvec = { features = ["alloc"], workspace = true } derive_more = { workspace = true, default-features = true } fatality = { workspace = true } futures = { workspace = true } -orchestra = { features = ["futures_channel"], workspace = true } -polkadot-node-network-protocol = { workspace = true, default-features = true } -polkadot-node-primitives = { workspace = true, default-features = true } polkadot-primitives = { workspace = true, default-features = true } +polkadot-node-primitives = { workspace = true, default-features = true } +polkadot-node-network-protocol = { workspace = true, default-features = true } polkadot-statement-table = { workspace = true, default-features = true } -prometheus-endpoint = { workspace = true, default-features = true } -sc-client-api = { workspace = true, default-features = true } +orchestra = { features = ["futures_channel"], workspace = true } sc-network = { workspace = true, default-features = true } sc-network-types = { workspace = true, default-features = true } -sc-transaction-pool-api = { workspace = true, default-features = true } -smallvec = { workspace = true, default-features = true } sp-api = { workspace = true, default-features = true } -sp-authority-discovery = { workspace = true, default-features = true } sp-blockchain = { workspace = true, default-features = true } sp-consensus-babe = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } +sp-authority-discovery = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sc-transaction-pool-api = { workspace = true, default-features = true } +smallvec = { workspace = true, default-features = true } +prometheus-endpoint = { workspace = true, default-features = true } thiserror = { workspace = true } +async-trait = { workspace = true } +bitvec = { features = ["alloc"], workspace = true } diff --git a/polkadot/node/subsystem-types/src/messages.rs b/polkadot/node/subsystem-types/src/messages.rs index b541f9519219..28a3a1ab82ab 100644 --- a/polkadot/node/subsystem-types/src/messages.rs +++ b/polkadot/node/subsystem-types/src/messages.rs @@ -48,12 +48,12 @@ use polkadot_primitives::{ CommittedCandidateReceiptV2 as CommittedCandidateReceipt, CoreState, }, ApprovalVotingParams, AuthorityDiscoveryId, BlockNumber, CandidateCommitments, CandidateHash, - CandidateIndex, CoreIndex, DisputeState, ExecutorParams, GroupIndex, GroupRotationInfo, Hash, - HeadData, Header as BlockHeader, Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, - MultiDisputeStatementSet, NodeFeatures, OccupiedCoreAssumption, PersistedValidationData, - PvfCheckStatement, PvfExecKind as RuntimePvfExecKind, SessionIndex, SessionInfo, - SignedAvailabilityBitfield, SignedAvailabilityBitfields, ValidationCode, ValidationCodeHash, - ValidatorId, ValidatorIndex, ValidatorSignature, + CandidateIndex, CollatorId, CoreIndex, DisputeState, ExecutorParams, GroupIndex, + GroupRotationInfo, Hash, HeadData, Header as BlockHeader, Id as ParaId, InboundDownwardMessage, + InboundHrmpMessage, MultiDisputeStatementSet, NodeFeatures, OccupiedCoreAssumption, + PersistedValidationData, PvfCheckStatement, PvfExecKind as RuntimePvfExecKind, SessionIndex, + SessionInfo, SignedAvailabilityBitfield, SignedAvailabilityBitfields, ValidationCode, + ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, }; use polkadot_statement_table::v2::Misbehavior; use std::{ @@ -250,6 +250,9 @@ pub enum CollatorProtocolMessage { /// The core index where the candidate should be backed. core_index: CoreIndex, }, + /// Report a collator as having provided an invalid collation. This should lead to disconnect + /// and blacklist of the collator. + ReportCollator(CollatorId), /// Get a network bridge update. #[from] NetworkBridgeUpdate(NetworkBridgeEvent), diff --git a/polkadot/node/subsystem-util/Cargo.toml b/polkadot/node/subsystem-util/Cargo.toml index 0e6ebf611997..d12daa572055 100644 --- a/polkadot/node/subsystem-util/Cargo.toml +++ b/polkadot/node/subsystem-util/Cargo.toml @@ -5,41 +5,39 @@ version = "7.0.0" authors.workspace = true edition.workspace = true license.workspace = true -homepage.workspace = true -repository.workspace = true [lints] workspace = true [dependencies] async-trait = { workspace = true } -codec = { features = ["derive"], workspace = true } -derive_more = { workspace = true, default-features = true } -fatality = { workspace = true } futures = { workspace = true } futures-channel = { workspace = true } -gum = { workspace = true, default-features = true } itertools = { workspace = true } +codec = { features = ["derive"], workspace = true } parking_lot = { workspace = true, default-features = true } pin-project = { workspace = true } rand = { workspace = true, default-features = true } -schnellru = { workspace = true } thiserror = { workspace = true } +fatality = { workspace = true } +gum = { workspace = true, default-features = true } +derive_more = { workspace = true, default-features = true } +schnellru = { workspace = true } -metered = { features = ["futures_channel"], workspace = true } polkadot-erasure-coding = { workspace = true, default-features = true } +polkadot-node-subsystem = { workspace = true, default-features = true } +polkadot-node-subsystem-types = { workspace = true, default-features = true } polkadot-node-metrics = { workspace = true, default-features = true } polkadot-node-network-protocol = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } polkadot-node-primitives = { workspace = true, default-features = true } -polkadot-node-subsystem = { workspace = true, default-features = true } -polkadot-node-subsystem-types = { workspace = true, default-features = true } polkadot-overseer = { workspace = true, default-features = true } -polkadot-primitives = { workspace = true, default-features = true } +metered = { features = ["futures_channel"], workspace = true } -sc-client-api = { workspace = true, default-features = true } -sp-application-crypto = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } +sp-application-crypto = { workspace = true, default-features = true } sp-keystore = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } kvdb = { workspace = true } parity-db = { workspace = true } @@ -47,9 +45,9 @@ parity-db = { workspace = true } [dev-dependencies] assert_matches = { workspace = true } futures = { features = ["thread-pool"], workspace = true } -kvdb-memorydb = { workspace = true } -kvdb-shared-tests = { workspace = true } log = { workspace = true, default-features = true } polkadot-node-subsystem-test-helpers = { workspace = true } polkadot-primitives-test-helpers = { workspace = true } +kvdb-shared-tests = { workspace = true } tempfile = { workspace = true } +kvdb-memorydb = { workspace = true } diff --git a/polkadot/node/subsystem-util/src/backing_implicit_view.rs b/polkadot/node/subsystem-util/src/backing_implicit_view.rs index 67f5dad518e1..a805ef8165e5 100644 --- a/polkadot/node/subsystem-util/src/backing_implicit_view.rs +++ b/polkadot/node/subsystem-util/src/backing_implicit_view.rs @@ -20,15 +20,14 @@ use polkadot_node_subsystem::{ messages::{ChainApiMessage, ProspectiveParachainsMessage, RuntimeApiMessage}, SubsystemSender, }; -use polkadot_primitives::{AsyncBackingParams, BlockNumber, Hash, Id as ParaId}; +use polkadot_primitives::{BlockNumber, Hash, Id as ParaId}; -use std::collections::{HashMap, HashSet}; +use std::collections::HashMap; use crate::{ inclusion_emulator::RelayChainBlockInfo, - request_async_backing_params, request_session_index_for_child, - runtime::{self, recv_runtime}, - LOG_TARGET, + request_session_index_for_child, + runtime::{self, prospective_parachains_mode, recv_runtime, ProspectiveParachainsMode}, }; // Always aim to retain 1 block before the active leaves. @@ -174,7 +173,13 @@ impl View { return Err(FetchError::AlreadyKnown) } - let res = self.fetch_fresh_leaf_and_insert_ancestry(leaf_hash, &mut *sender).await; + let res = fetch_fresh_leaf_and_insert_ancestry( + leaf_hash, + &mut self.block_info_storage, + &mut *sender, + self.collating_for, + ) + .await; match res { Ok(fetched) => { @@ -318,205 +323,6 @@ impl View { .as_ref() .map(|mins| mins.allowed_relay_parents_for(para_id, block_info.block_number)) } - - /// Returns all paths from each leaf to the last block in state containing `relay_parent`. If no - /// paths exist the function will return an empty `Vec`. - pub fn paths_via_relay_parent(&self, relay_parent: &Hash) -> Vec> { - gum::trace!( - target: LOG_TARGET, - ?relay_parent, - leaves=?self.leaves, - block_info_storage=?self.block_info_storage, - "Finding paths via relay parent" - ); - - if self.leaves.is_empty() { - // No leaves so the view should be empty. Don't return any paths. - return vec![] - }; - - if !self.block_info_storage.contains_key(relay_parent) { - // `relay_parent` is not in the view - don't return any paths - return vec![] - } - - // Find all paths from each leaf to `relay_parent`. - let mut paths = Vec::new(); - for (leaf, _) in &self.leaves { - let mut path = Vec::new(); - let mut current_leaf = *leaf; - let mut visited = HashSet::new(); - let mut path_contains_target = false; - - // Start from the leaf and traverse all known blocks - loop { - if visited.contains(¤t_leaf) { - // There is a cycle - abandon this path - break - } - - current_leaf = match self.block_info_storage.get(¤t_leaf) { - Some(info) => { - // `current_leaf` is a known block - add it to the path and mark it as - // visited - path.push(current_leaf); - visited.insert(current_leaf); - - // `current_leaf` is the target `relay_parent`. Mark the path so that it's - // included in the result - if current_leaf == *relay_parent { - path_contains_target = true; - } - - // update `current_leaf` with the parent - info.parent_hash - }, - None => { - // path is complete - if path_contains_target { - // we want the path ordered from oldest to newest so reverse it - paths.push(path.into_iter().rev().collect()); - } - break - }, - }; - } - } - - paths - } - - async fn fetch_fresh_leaf_and_insert_ancestry( - &mut self, - leaf_hash: Hash, - sender: &mut Sender, - ) -> Result - where - Sender: SubsystemSender - + SubsystemSender - + SubsystemSender, - { - let leaf_header = { - let (tx, rx) = oneshot::channel(); - sender.send_message(ChainApiMessage::BlockHeader(leaf_hash, tx)).await; - - match rx.await { - Ok(Ok(Some(header))) => header, - Ok(Ok(None)) => - return Err(FetchError::BlockHeaderUnavailable( - leaf_hash, - BlockHeaderUnavailableReason::Unknown, - )), - Ok(Err(e)) => - return Err(FetchError::BlockHeaderUnavailable( - leaf_hash, - BlockHeaderUnavailableReason::Internal(e), - )), - Err(_) => - return Err(FetchError::BlockHeaderUnavailable( - leaf_hash, - BlockHeaderUnavailableReason::SubsystemUnavailable, - )), - } - }; - - // If the node is a collator, bypass prospective-parachains. We're only interested in the - // one paraid and the subsystem is not present. - let min_relay_parents = if let Some(para_id) = self.collating_for { - fetch_min_relay_parents_for_collator(leaf_hash, leaf_header.number, sender) - .await? - .map(|x| vec![(para_id, x)]) - .unwrap_or_default() - } else { - fetch_min_relay_parents_from_prospective_parachains(leaf_hash, sender).await? - }; - - let min_min = min_relay_parents.iter().map(|x| x.1).min().unwrap_or(leaf_header.number); - let expected_ancestry_len = (leaf_header.number.saturating_sub(min_min) as usize) + 1; - - let ancestry = if leaf_header.number > 0 { - let mut next_ancestor_number = leaf_header.number - 1; - let mut next_ancestor_hash = leaf_header.parent_hash; - - let mut ancestry = Vec::with_capacity(expected_ancestry_len); - ancestry.push(leaf_hash); - - // Ensure all ancestors up to and including `min_min` are in the - // block storage. When views advance incrementally, everything - // should already be present. - while next_ancestor_number >= min_min { - let parent_hash = if let Some(info) = - self.block_info_storage.get(&next_ancestor_hash) - { - info.parent_hash - } else { - // load the header and insert into block storage. - let (tx, rx) = oneshot::channel(); - sender.send_message(ChainApiMessage::BlockHeader(next_ancestor_hash, tx)).await; - - let header = match rx.await { - Ok(Ok(Some(header))) => header, - Ok(Ok(None)) => - return Err(FetchError::BlockHeaderUnavailable( - next_ancestor_hash, - BlockHeaderUnavailableReason::Unknown, - )), - Ok(Err(e)) => - return Err(FetchError::BlockHeaderUnavailable( - next_ancestor_hash, - BlockHeaderUnavailableReason::Internal(e), - )), - Err(_) => - return Err(FetchError::BlockHeaderUnavailable( - next_ancestor_hash, - BlockHeaderUnavailableReason::SubsystemUnavailable, - )), - }; - - self.block_info_storage.insert( - next_ancestor_hash, - BlockInfo { - block_number: next_ancestor_number, - parent_hash: header.parent_hash, - maybe_allowed_relay_parents: None, - }, - ); - - header.parent_hash - }; - - ancestry.push(next_ancestor_hash); - if next_ancestor_number == 0 { - break - } - - next_ancestor_number -= 1; - next_ancestor_hash = parent_hash; - } - - ancestry - } else { - vec![leaf_hash] - }; - - let fetched_ancestry = - FetchSummary { minimum_ancestor_number: min_min, leaf_number: leaf_header.number }; - - let allowed_relay_parents = AllowedRelayParents { - minimum_relay_parents: min_relay_parents.into_iter().collect(), - allowed_relay_parents_contiguous: ancestry, - }; - - let leaf_block_info = BlockInfo { - parent_hash: leaf_header.parent_hash, - block_number: leaf_header.number, - maybe_allowed_relay_parents: Some(allowed_relay_parents), - }; - - self.block_info_storage.insert(leaf_hash, leaf_block_info); - - Ok(fetched_ancestry) - } } /// Errors when fetching a leaf and associated ancestry. @@ -590,8 +396,13 @@ where + SubsystemSender + SubsystemSender, { - let AsyncBackingParams { allowed_ancestry_len, .. } = - recv_runtime(request_async_backing_params(leaf_hash, sender).await).await?; + let Ok(ProspectiveParachainsMode::Enabled { allowed_ancestry_len, .. }) = + prospective_parachains_mode(sender, leaf_hash).await + else { + // This should never happen, leaves that don't have prospective parachains mode enabled + // should not use implicit view. + return Ok(None) + }; // Fetch the session of the leaf. We must make sure that we stop at the ancestor which has a // different session index. @@ -605,7 +416,7 @@ where sender .send_message(ChainApiMessage::Ancestors { hash: leaf_hash, - k: allowed_ancestry_len as usize, + k: allowed_ancestry_len, response_channel: tx, }) .await; @@ -631,6 +442,137 @@ where Ok(Some(min)) } +async fn fetch_fresh_leaf_and_insert_ancestry( + leaf_hash: Hash, + block_info_storage: &mut HashMap, + sender: &mut Sender, + collating_for: Option, +) -> Result +where + Sender: SubsystemSender + + SubsystemSender + + SubsystemSender, +{ + let leaf_header = { + let (tx, rx) = oneshot::channel(); + sender.send_message(ChainApiMessage::BlockHeader(leaf_hash, tx)).await; + + match rx.await { + Ok(Ok(Some(header))) => header, + Ok(Ok(None)) => + return Err(FetchError::BlockHeaderUnavailable( + leaf_hash, + BlockHeaderUnavailableReason::Unknown, + )), + Ok(Err(e)) => + return Err(FetchError::BlockHeaderUnavailable( + leaf_hash, + BlockHeaderUnavailableReason::Internal(e), + )), + Err(_) => + return Err(FetchError::BlockHeaderUnavailable( + leaf_hash, + BlockHeaderUnavailableReason::SubsystemUnavailable, + )), + } + }; + + // If the node is a collator, bypass prospective-parachains. We're only interested in the one + // paraid and the subsystem is not present. + let min_relay_parents = if let Some(para_id) = collating_for { + fetch_min_relay_parents_for_collator(leaf_hash, leaf_header.number, sender) + .await? + .map(|x| vec![(para_id, x)]) + .unwrap_or_default() + } else { + fetch_min_relay_parents_from_prospective_parachains(leaf_hash, sender).await? + }; + + let min_min = min_relay_parents.iter().map(|x| x.1).min().unwrap_or(leaf_header.number); + let expected_ancestry_len = (leaf_header.number.saturating_sub(min_min) as usize) + 1; + + let ancestry = if leaf_header.number > 0 { + let mut next_ancestor_number = leaf_header.number - 1; + let mut next_ancestor_hash = leaf_header.parent_hash; + + let mut ancestry = Vec::with_capacity(expected_ancestry_len); + ancestry.push(leaf_hash); + + // Ensure all ancestors up to and including `min_min` are in the + // block storage. When views advance incrementally, everything + // should already be present. + while next_ancestor_number >= min_min { + let parent_hash = if let Some(info) = block_info_storage.get(&next_ancestor_hash) { + info.parent_hash + } else { + // load the header and insert into block storage. + let (tx, rx) = oneshot::channel(); + sender.send_message(ChainApiMessage::BlockHeader(next_ancestor_hash, tx)).await; + + let header = match rx.await { + Ok(Ok(Some(header))) => header, + Ok(Ok(None)) => + return Err(FetchError::BlockHeaderUnavailable( + next_ancestor_hash, + BlockHeaderUnavailableReason::Unknown, + )), + Ok(Err(e)) => + return Err(FetchError::BlockHeaderUnavailable( + next_ancestor_hash, + BlockHeaderUnavailableReason::Internal(e), + )), + Err(_) => + return Err(FetchError::BlockHeaderUnavailable( + next_ancestor_hash, + BlockHeaderUnavailableReason::SubsystemUnavailable, + )), + }; + + block_info_storage.insert( + next_ancestor_hash, + BlockInfo { + block_number: next_ancestor_number, + parent_hash: header.parent_hash, + maybe_allowed_relay_parents: None, + }, + ); + + header.parent_hash + }; + + ancestry.push(next_ancestor_hash); + if next_ancestor_number == 0 { + break + } + + next_ancestor_number -= 1; + next_ancestor_hash = parent_hash; + } + + ancestry + } else { + vec![leaf_hash] + }; + + let fetched_ancestry = + FetchSummary { minimum_ancestor_number: min_min, leaf_number: leaf_header.number }; + + let allowed_relay_parents = AllowedRelayParents { + minimum_relay_parents: min_relay_parents.into_iter().collect(), + allowed_relay_parents_contiguous: ancestry, + }; + + let leaf_block_info = BlockInfo { + parent_hash: leaf_header.parent_hash, + block_number: leaf_header.number, + maybe_allowed_relay_parents: Some(allowed_relay_parents), + }; + + block_info_storage.insert(leaf_hash, leaf_block_info); + + Ok(fetched_ancestry) +} + #[cfg(test)] mod tests { use super::*; @@ -861,23 +803,6 @@ mod tests { assert_eq!(view.known_allowed_relay_parents_under(&leaf, Some(PARA_A)), Some(&expected_ancestry[..(PARA_A_MIN_PARENT - 1) as usize])); assert_eq!(view.known_allowed_relay_parents_under(&leaf, Some(PARA_B)), Some(&expected_ancestry[..])); assert!(view.known_allowed_relay_parents_under(&leaf, Some(PARA_C)).unwrap().is_empty()); - - assert_eq!(view.leaves.len(), 1); - assert!(view.leaves.contains_key(leaf)); - assert!(view.paths_via_relay_parent(&CHAIN_B[0]).is_empty()); - assert!(view.paths_via_relay_parent(&CHAIN_A[0]).is_empty()); - assert_eq!( - view.paths_via_relay_parent(&CHAIN_B[min_min_idx]), - vec![CHAIN_B[min_min_idx..].to_vec()] - ); - assert_eq!( - view.paths_via_relay_parent(&CHAIN_B[min_min_idx + 1]), - vec![CHAIN_B[min_min_idx..].to_vec()] - ); - assert_eq!( - view.paths_via_relay_parent(&leaf), - vec![CHAIN_B[min_min_idx..].to_vec()] - ); } ); @@ -998,12 +923,6 @@ mod tests { assert!(view.known_allowed_relay_parents_under(&leaf, Some(PARA_B)).unwrap().is_empty()); assert!(view.known_allowed_relay_parents_under(&leaf, Some(PARA_C)).unwrap().is_empty()); - - assert!(view.paths_via_relay_parent(&CHAIN_A[0]).is_empty()); - assert_eq!( - view.paths_via_relay_parent(&CHAIN_B[min_min_idx]), - vec![CHAIN_B[min_min_idx..].to_vec()] - ); } ); @@ -1072,12 +991,6 @@ mod tests { assert!(view.known_allowed_relay_parents_under(&leaf, Some(PARA_B)).unwrap().is_empty()); assert!(view.known_allowed_relay_parents_under(&leaf, Some(PARA_C)).unwrap().is_empty()); - - assert!(view.paths_via_relay_parent(&GENESIS_HASH).is_empty()); - assert_eq!( - view.paths_via_relay_parent(&CHAIN_A[0]), - vec![CHAIN_A.to_vec()] - ); } ); } @@ -1252,69 +1165,4 @@ mod tests { Some(hashes) if hashes == &[GENESIS_HASH] ); } - - #[test] - fn path_with_fork() { - let pool = TaskExecutor::new(); - let (mut ctx, mut ctx_handle) = make_subsystem_context::(pool); - - let mut view = View::default(); - - assert_eq!(view.collating_for, None); - - // Chain A - let prospective_response = vec![(PARA_A, 0)]; // was PARA_A_MIN_PARENT - let leaf = CHAIN_A.last().unwrap(); - let blocks = [&[GENESIS_HASH], CHAIN_A].concat(); - let leaf_idx = blocks.len() - 1; - - let fut = view.activate_leaf(ctx.sender(), *leaf).timeout(TIMEOUT).map(|res| { - res.expect("`activate_leaf` timed out").unwrap(); - }); - let overseer_fut = async { - assert_block_header_requests(&mut ctx_handle, CHAIN_A, &blocks[leaf_idx..]).await; - assert_min_relay_parents_request(&mut ctx_handle, leaf, prospective_response).await; - assert_block_header_requests(&mut ctx_handle, CHAIN_A, &blocks[..leaf_idx]).await; - }; - futures::executor::block_on(join(fut, overseer_fut)); - - // Chain B - let prospective_response = vec![(PARA_A, 1)]; - - let leaf = CHAIN_B.last().unwrap(); - let leaf_idx = CHAIN_B.len() - 1; - - let fut = view.activate_leaf(ctx.sender(), *leaf).timeout(TIMEOUT).map(|res| { - res.expect("`activate_leaf` timed out").unwrap(); - }); - let overseer_fut = async { - assert_block_header_requests(&mut ctx_handle, CHAIN_B, &CHAIN_B[leaf_idx..]).await; - assert_min_relay_parents_request(&mut ctx_handle, leaf, prospective_response).await; - assert_block_header_requests(&mut ctx_handle, CHAIN_B, &CHAIN_B[0..leaf_idx]).await; - }; - futures::executor::block_on(join(fut, overseer_fut)); - - assert_eq!(view.leaves.len(), 2); - - let mut paths_to_genesis = view.paths_via_relay_parent(&GENESIS_HASH); - paths_to_genesis.sort(); - let mut expected_paths_to_genesis = vec![ - [GENESIS_HASH].iter().chain(CHAIN_A.iter()).copied().collect::>(), - [GENESIS_HASH].iter().chain(CHAIN_B.iter()).copied().collect::>(), - ]; - expected_paths_to_genesis.sort(); - assert_eq!(paths_to_genesis, expected_paths_to_genesis); - - let path_to_leaf_in_a = view.paths_via_relay_parent(&CHAIN_A[1]); - let expected_path_to_leaf_in_a = - vec![[GENESIS_HASH].iter().chain(CHAIN_A.iter()).copied().collect::>()]; - assert_eq!(path_to_leaf_in_a, expected_path_to_leaf_in_a); - - let path_to_leaf_in_b = view.paths_via_relay_parent(&CHAIN_B[4]); - let expected_path_to_leaf_in_b = - vec![[GENESIS_HASH].iter().chain(CHAIN_B.iter()).copied().collect::>()]; - assert_eq!(path_to_leaf_in_b, expected_path_to_leaf_in_b); - - assert_eq!(view.paths_via_relay_parent(&Hash::repeat_byte(0x0A)), Vec::>::new()); - } } diff --git a/polkadot/node/subsystem/Cargo.toml b/polkadot/node/subsystem/Cargo.toml index 8b4a26e33ee6..ce4bceec7336 100644 --- a/polkadot/node/subsystem/Cargo.toml +++ b/polkadot/node/subsystem/Cargo.toml @@ -5,12 +5,10 @@ version = "7.0.0" authors.workspace = true edition.workspace = true license.workspace = true -homepage.workspace = true -repository.workspace = true [lints] workspace = true [dependencies] -polkadot-node-subsystem-types = { workspace = true, default-features = true } polkadot-overseer = { workspace = true, default-features = true } +polkadot-node-subsystem-types = { workspace = true, default-features = true } diff --git a/polkadot/node/test/client/Cargo.toml b/polkadot/node/test/client/Cargo.toml index 13b14c0b9231..587af659fbd2 100644 --- a/polkadot/node/test/client/Cargo.toml +++ b/polkadot/node/test/client/Cargo.toml @@ -13,32 +13,32 @@ workspace = true codec = { features = ["derive"], workspace = true } # Polkadot dependencies -polkadot-node-subsystem = { workspace = true, default-features = true } -polkadot-primitives = { workspace = true, default-features = true } polkadot-test-runtime = { workspace = true } polkadot-test-service = { workspace = true } +polkadot-primitives = { workspace = true, default-features = true } +polkadot-node-subsystem = { workspace = true, default-features = true } # Substrate dependencies -frame-benchmarking = { workspace = true, default-features = true } +substrate-test-client = { workspace = true } +sc-service = { workspace = true, default-features = true } sc-block-builder = { workspace = true, default-features = true } sc-consensus = { workspace = true, default-features = true } sc-offchain = { workspace = true, default-features = true } -sc-service = { workspace = true, default-features = true } -sp-api = { workspace = true, default-features = true } sp-blockchain = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-inherents = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-timestamp = { workspace = true, default-features = true } sp-consensus = { workspace = true, default-features = true } sp-consensus-babe = { workspace = true, default-features = true } -sp-core = { workspace = true, default-features = true } -sp-inherents = { workspace = true, default-features = true } -sp-io = { workspace = true, default-features = true } -sp-runtime = { workspace = true, default-features = true } sp-state-machine = { workspace = true, default-features = true } -sp-timestamp = { workspace = true, default-features = true } -substrate-test-client = { workspace = true } +sp-io = { workspace = true, default-features = true } +frame-benchmarking = { workspace = true, default-features = true } [dev-dependencies] -futures = { workspace = true } sp-keyring = { workspace = true, default-features = true } +futures = { workspace = true } [features] runtime-benchmarks = [ diff --git a/polkadot/node/test/service/Cargo.toml b/polkadot/node/test/service/Cargo.toml index 54db2a0ac942..4ef9d88621fb 100644 --- a/polkadot/node/test/service/Cargo.toml +++ b/polkadot/node/test/service/Cargo.toml @@ -11,50 +11,50 @@ workspace = true [dependencies] futures = { workspace = true } -gum = { workspace = true, default-features = true } hex = { workspace = true, default-features = true } +gum = { workspace = true, default-features = true } rand = { workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } tempfile = { workspace = true } tokio = { workspace = true, default-features = true } # Polkadot dependencies -polkadot-node-primitives = { workspace = true, default-features = true } -polkadot-node-subsystem = { workspace = true, default-features = true } polkadot-overseer = { workspace = true, default-features = true } -polkadot-parachain-primitives = { workspace = true, default-features = true } polkadot-primitives = { workspace = true, default-features = true } +polkadot-parachain-primitives = { workspace = true, default-features = true } polkadot-rpc = { workspace = true, default-features = true } polkadot-runtime-common = { workspace = true, default-features = true } -polkadot-runtime-parachains = { workspace = true, default-features = true } polkadot-service = { workspace = true, default-features = true } +polkadot-node-subsystem = { workspace = true, default-features = true } +polkadot-node-primitives = { workspace = true, default-features = true } polkadot-test-runtime = { workspace = true } test-runtime-constants = { workspace = true, default-features = true } +polkadot-runtime-parachains = { workspace = true, default-features = true } # Substrate dependencies +sp-authority-discovery = { workspace = true, default-features = true } +sc-authority-discovery = { workspace = true, default-features = true } +sc-consensus-babe = { workspace = true, default-features = true } +sp-consensus-babe = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } frame-system = { workspace = true, default-features = true } -pallet-balances = { workspace = true, default-features = true } +sc-consensus-grandpa = { workspace = true, default-features = true } +sp-consensus-grandpa = { workspace = true, default-features = true } +sp-inherents = { workspace = true, default-features = true } pallet-staking = { workspace = true, default-features = true } +pallet-balances = { workspace = true, default-features = true } pallet-transaction-payment = { workspace = true, default-features = true } -sc-authority-discovery = { workspace = true, default-features = true } sc-chain-spec = { workspace = true, default-features = true } sc-cli = { workspace = true, default-features = true } sc-client-api = { workspace = true, default-features = true } sc-consensus = { workspace = true, default-features = true } -sc-consensus-babe = { workspace = true, default-features = true } -sc-consensus-grandpa = { workspace = true, default-features = true } sc-network = { workspace = true, default-features = true } -sc-service = { workspace = true } sc-tracing = { workspace = true, default-features = true } sc-transaction-pool = { workspace = true, default-features = true } +sc-service = { workspace = true } sp-arithmetic = { workspace = true, default-features = true } -sp-authority-discovery = { workspace = true, default-features = true } sp-blockchain = { workspace = true, default-features = true } -sp-consensus = { workspace = true, default-features = true } -sp-consensus-babe = { workspace = true, default-features = true } -sp-consensus-grandpa = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } -sp-inherents = { workspace = true, default-features = true } sp-keyring = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } sp-state-machine = { workspace = true, default-features = true } diff --git a/polkadot/node/test/service/src/lib.rs b/polkadot/node/test/service/src/lib.rs index f34bb62a7cf0..6e09bb9e4310 100644 --- a/polkadot/node/test/service/src/lib.rs +++ b/polkadot/node/test/service/src/lib.rs @@ -451,8 +451,8 @@ pub fn construct_extrinsic( /// Construct a transfer extrinsic. pub fn construct_transfer_extrinsic( client: &Client, - origin: sp_keyring::Sr25519Keyring, - dest: sp_keyring::Sr25519Keyring, + origin: sp_keyring::AccountKeyring, + dest: sp_keyring::AccountKeyring, value: Balance, ) -> UncheckedExtrinsic { let function = diff --git a/polkadot/node/tracking-allocator/Cargo.toml b/polkadot/node/tracking-allocator/Cargo.toml index 0fbf526ccb8b..d98377e53759 100644 --- a/polkadot/node/tracking-allocator/Cargo.toml +++ b/polkadot/node/tracking-allocator/Cargo.toml @@ -5,8 +5,6 @@ version = "2.0.0" authors.workspace = true edition.workspace = true license.workspace = true -homepage.workspace = true -repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/zombienet-backchannel/Cargo.toml b/polkadot/node/zombienet-backchannel/Cargo.toml index 0d04012e28a8..56c49a1ec305 100644 --- a/polkadot/node/zombienet-backchannel/Cargo.toml +++ b/polkadot/node/zombienet-backchannel/Cargo.toml @@ -12,13 +12,13 @@ license.workspace = true workspace = true [dependencies] -codec = { features = ["derive"], workspace = true, default-features = true } +tokio = { features = ["macros", "net", "rt-multi-thread", "sync"], workspace = true } +url = { workspace = true } +tokio-tungstenite = { workspace = true } futures-util = { workspace = true, default-features = true } -gum = { workspace = true, default-features = true } +codec = { features = ["derive"], workspace = true, default-features = true } reqwest = { features = ["rustls-tls"], workspace = true } +thiserror = { workspace = true } +gum = { workspace = true, default-features = true } serde = { features = ["derive"], workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } -thiserror = { workspace = true } -tokio = { features = ["macros", "net", "rt-multi-thread", "sync"], workspace = true } -tokio-tungstenite = { workspace = true } -url = { workspace = true } diff --git a/polkadot/parachain/Cargo.toml b/polkadot/parachain/Cargo.toml index 0dd103d58b25..9d0518fd46ad 100644 --- a/polkadot/parachain/Cargo.toml +++ b/polkadot/parachain/Cargo.toml @@ -5,8 +5,6 @@ authors.workspace = true edition.workspace = true license.workspace = true version = "6.0.0" -homepage.workspace = true -repository.workspace = true [lints] workspace = true @@ -15,14 +13,14 @@ workspace = true # note: special care is taken to avoid inclusion of `sp-io` externals when compiling # this crate for WASM. This is critical to avoid forcing all parachain WASM into implementing # various unnecessary Substrate-specific endpoints. -bounded-collections = { features = ["serde"], workspace = true } codec = { features = ["derive"], workspace = true } -derive_more = { workspace = true, default-features = true } -polkadot-core-primitives = { workspace = true } scale-info = { features = ["derive", "serde"], workspace = true } -sp-core = { features = ["serde"], workspace = true } sp-runtime = { features = ["serde"], workspace = true } +sp-core = { features = ["serde"], workspace = true } sp-weights = { workspace = true } +polkadot-core-primitives = { workspace = true } +derive_more = { workspace = true, default-features = true } +bounded-collections = { features = ["serde"], workspace = true } # all optional crates. serde = { features = ["alloc", "derive"], workspace = true } diff --git a/polkadot/parachain/src/primitives.rs b/polkadot/parachain/src/primitives.rs index 1f2f9e2e9cdc..c5757928c3fc 100644 --- a/polkadot/parachain/src/primitives.rs +++ b/polkadot/parachain/src/primitives.rs @@ -57,8 +57,6 @@ impl HeadData { } } -impl codec::EncodeLike for alloc::vec::Vec {} - /// Parachain validation code. #[derive( PartialEq, @@ -156,9 +154,6 @@ pub struct BlockData(#[cfg_attr(feature = "std", serde(with = "bytes"))] pub Vec #[cfg_attr(feature = "std", derive(derive_more::Display))] pub struct Id(u32); -impl codec::EncodeLike for Id {} -impl codec::EncodeLike for u32 {} - impl TypeId for Id { const TYPE_ID: [u8; 4] = *b"para"; } diff --git a/polkadot/parachain/test-parachains/Cargo.toml b/polkadot/parachain/test-parachains/Cargo.toml index 2a1e1722bff9..9f35653f957f 100644 --- a/polkadot/parachain/test-parachains/Cargo.toml +++ b/polkadot/parachain/test-parachains/Cargo.toml @@ -11,8 +11,8 @@ publish = false workspace = true [dependencies] -codec = { features = ["derive"], workspace = true } tiny-keccak = { features = ["keccak"], workspace = true } +codec = { features = ["derive"], workspace = true } test-parachain-adder = { workspace = true } test-parachain-halt = { workspace = true } diff --git a/polkadot/parachain/test-parachains/adder/Cargo.toml b/polkadot/parachain/test-parachains/adder/Cargo.toml index 945b0e156904..7a150b75d5cd 100644 --- a/polkadot/parachain/test-parachains/adder/Cargo.toml +++ b/polkadot/parachain/test-parachains/adder/Cargo.toml @@ -12,10 +12,10 @@ publish = false workspace = true [dependencies] -codec = { features = ["derive"], workspace = true } -dlmalloc = { features = ["global"], workspace = true } polkadot-parachain-primitives = { features = ["wasm-api"], workspace = true } +codec = { features = ["derive"], workspace = true } tiny-keccak = { features = ["keccak"], workspace = true } +dlmalloc = { features = ["global"], workspace = true } # We need to make sure the global allocator is disabled until we have support of full substrate externalities sp-io = { features = ["disable_allocator"], workspace = true } diff --git a/polkadot/parachain/test-parachains/adder/collator/Cargo.toml b/polkadot/parachain/test-parachains/adder/collator/Cargo.toml index 20305dc07c3a..061378a76a82 100644 --- a/polkadot/parachain/test-parachains/adder/collator/Cargo.toml +++ b/polkadot/parachain/test-parachains/adder/collator/Cargo.toml @@ -15,30 +15,30 @@ name = "adder-collator" path = "src/main.rs" [dependencies] -clap = { features = ["derive"], workspace = true } codec = { features = ["derive"], workspace = true } +clap = { features = ["derive"], workspace = true } futures = { workspace = true } futures-timer = { workspace = true } log = { workspace = true, default-features = true } +test-parachain-adder = { workspace = true } +polkadot-primitives = { workspace = true, default-features = true } polkadot-cli = { workspace = true, default-features = true } +polkadot-service = { features = ["rococo-native"], workspace = true, default-features = true } polkadot-node-primitives = { workspace = true, default-features = true } polkadot-node-subsystem = { workspace = true, default-features = true } -polkadot-primitives = { workspace = true, default-features = true } -polkadot-service = { features = ["rococo-native"], workspace = true, default-features = true } -test-parachain-adder = { workspace = true } sc-cli = { workspace = true, default-features = true } -sc-service = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } +sc-service = { workspace = true, default-features = true } [dev-dependencies] -polkadot-node-core-pvf = { features = ["test-utils"], workspace = true, default-features = true } polkadot-parachain-primitives = { workspace = true, default-features = true } polkadot-test-service = { workspace = true } +polkadot-node-core-pvf = { features = ["test-utils"], workspace = true, default-features = true } +substrate-test-utils = { workspace = true } sc-service = { workspace = true, default-features = true } sp-keyring = { workspace = true, default-features = true } -substrate-test-utils = { workspace = true } tokio = { features = ["macros"], workspace = true, default-features = true } diff --git a/polkadot/parachain/test-parachains/adder/collator/tests/integration.rs b/polkadot/parachain/test-parachains/adder/collator/tests/integration.rs index 5d728517c4bb..85abf8bf36b9 100644 --- a/polkadot/parachain/test-parachains/adder/collator/tests/integration.rs +++ b/polkadot/parachain/test-parachains/adder/collator/tests/integration.rs @@ -23,7 +23,7 @@ #[tokio::test(flavor = "multi_thread")] async fn collating_using_adder_collator() { use polkadot_primitives::Id as ParaId; - use sp_keyring::Sr25519Keyring::*; + use sp_keyring::AccountKeyring::*; let mut builder = sc_cli::LoggerBuilder::new(""); builder.with_colors(false); diff --git a/polkadot/parachain/test-parachains/halt/Cargo.toml b/polkadot/parachain/test-parachains/halt/Cargo.toml index ea8372ccd121..f8272f6ed196 100644 --- a/polkadot/parachain/test-parachains/halt/Cargo.toml +++ b/polkadot/parachain/test-parachains/halt/Cargo.toml @@ -14,8 +14,8 @@ workspace = true [dependencies] [build-dependencies] -rustversion = { workspace = true } substrate-wasm-builder = { workspace = true, default-features = true } +rustversion = { workspace = true } [features] default = ["std"] diff --git a/polkadot/parachain/test-parachains/undying/Cargo.toml b/polkadot/parachain/test-parachains/undying/Cargo.toml index 43b5a3352434..4b2e12ebf435 100644 --- a/polkadot/parachain/test-parachains/undying/Cargo.toml +++ b/polkadot/parachain/test-parachains/undying/Cargo.toml @@ -12,11 +12,11 @@ license.workspace = true workspace = true [dependencies] +polkadot-parachain-primitives = { features = ["wasm-api"], workspace = true } codec = { features = ["derive"], workspace = true } +tiny-keccak = { features = ["keccak"], workspace = true } dlmalloc = { features = ["global"], workspace = true } log = { workspace = true } -polkadot-parachain-primitives = { features = ["wasm-api"], workspace = true } -tiny-keccak = { features = ["keccak"], workspace = true } # We need to make sure the global allocator is disabled until we have support of full substrate externalities sp-io = { features = ["disable_allocator"], workspace = true } diff --git a/polkadot/parachain/test-parachains/undying/collator/Cargo.toml b/polkadot/parachain/test-parachains/undying/collator/Cargo.toml index b964b4dc49ce..5760258c70ea 100644 --- a/polkadot/parachain/test-parachains/undying/collator/Cargo.toml +++ b/polkadot/parachain/test-parachains/undying/collator/Cargo.toml @@ -15,30 +15,30 @@ name = "undying-collator" path = "src/main.rs" [dependencies] -clap = { features = ["derive"], workspace = true } codec = { features = ["derive"], workspace = true } +clap = { features = ["derive"], workspace = true } futures = { workspace = true } futures-timer = { workspace = true } log = { workspace = true, default-features = true } +test-parachain-undying = { workspace = true } +polkadot-primitives = { workspace = true, default-features = true } polkadot-cli = { workspace = true, default-features = true } +polkadot-service = { features = ["rococo-native"], workspace = true, default-features = true } polkadot-node-primitives = { workspace = true, default-features = true } polkadot-node-subsystem = { workspace = true, default-features = true } -polkadot-primitives = { workspace = true, default-features = true } -polkadot-service = { features = ["rococo-native"], workspace = true, default-features = true } -test-parachain-undying = { workspace = true } sc-cli = { workspace = true, default-features = true } -sc-service = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } +sc-service = { workspace = true, default-features = true } [dev-dependencies] -polkadot-node-core-pvf = { features = ["test-utils"], workspace = true, default-features = true } polkadot-parachain-primitives = { workspace = true, default-features = true } polkadot-test-service = { workspace = true } +polkadot-node-core-pvf = { features = ["test-utils"], workspace = true, default-features = true } +substrate-test-utils = { workspace = true } sc-service = { workspace = true, default-features = true } sp-keyring = { workspace = true, default-features = true } -substrate-test-utils = { workspace = true } tokio = { features = ["macros"], workspace = true, default-features = true } diff --git a/polkadot/parachain/test-parachains/undying/collator/tests/integration.rs b/polkadot/parachain/test-parachains/undying/collator/tests/integration.rs index b8e32b13bc9c..8be535b9bb4c 100644 --- a/polkadot/parachain/test-parachains/undying/collator/tests/integration.rs +++ b/polkadot/parachain/test-parachains/undying/collator/tests/integration.rs @@ -22,7 +22,7 @@ #[tokio::test(flavor = "multi_thread")] async fn collating_using_undying_collator() { use polkadot_primitives::Id as ParaId; - use sp_keyring::Sr25519Keyring::*; + use sp_keyring::AccountKeyring::*; let mut builder = sc_cli::LoggerBuilder::new(""); builder.with_colors(false); diff --git a/polkadot/primitives/Cargo.toml b/polkadot/primitives/Cargo.toml index e693fe8c4a8c..dd269caa2d60 100644 --- a/polkadot/primitives/Cargo.toml +++ b/polkadot/primitives/Cargo.toml @@ -5,31 +5,29 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "Shared primitives used by Polkadot runtime" -homepage.workspace = true -repository.workspace = true [lints] workspace = true [dependencies] bitvec = { features = ["alloc", "serde"], workspace = true } -codec = { features = ["bit-vec", "derive"], workspace = true } hex-literal = { workspace = true, default-features = true } -log = { workspace = true } +codec = { features = ["bit-vec", "derive"], workspace = true } scale-info = { features = ["bit-vec", "derive", "serde"], workspace = true } +log = { workspace = true } serde = { features = ["alloc", "derive"], workspace = true } thiserror = { workspace = true, optional = true } -sp-api = { workspace = true } sp-application-crypto = { features = ["serde"], workspace = true } +sp-inherents = { workspace = true } +sp-core = { workspace = true } +sp-runtime = { workspace = true } +sp-api = { workspace = true } sp-arithmetic = { features = ["serde"], workspace = true } sp-authority-discovery = { features = ["serde"], workspace = true } sp-consensus-slots = { features = ["serde"], workspace = true } -sp-core = { workspace = true } -sp-inherents = { workspace = true } sp-io = { workspace = true } sp-keystore = { optional = true, workspace = true } -sp-runtime = { workspace = true } sp-staking = { features = ["serde"], workspace = true } sp-std = { workspace = true, optional = true } diff --git a/polkadot/primitives/test-helpers/Cargo.toml b/polkadot/primitives/test-helpers/Cargo.toml index 962b210848c8..27de3c4b9c56 100644 --- a/polkadot/primitives/test-helpers/Cargo.toml +++ b/polkadot/primitives/test-helpers/Cargo.toml @@ -10,9 +10,9 @@ license.workspace = true workspace = true [dependencies] -polkadot-primitives = { features = ["test"], workspace = true, default-features = true } -rand = { workspace = true, default-features = true } -sp-application-crypto = { workspace = true } -sp-core = { features = ["std"], workspace = true, default-features = true } sp-keyring = { workspace = true, default-features = true } +sp-application-crypto = { workspace = true } sp-runtime = { workspace = true, default-features = true } +sp-core = { features = ["std"], workspace = true, default-features = true } +polkadot-primitives = { features = ["test"], workspace = true, default-features = true } +rand = { workspace = true, default-features = true } diff --git a/polkadot/roadmap/implementers-guide/src/node/collators/collator-protocol.md b/polkadot/roadmap/implementers-guide/src/node/collators/collator-protocol.md index 586a4169b5bc..432d9ab69bab 100644 --- a/polkadot/roadmap/implementers-guide/src/node/collators/collator-protocol.md +++ b/polkadot/roadmap/implementers-guide/src/node/collators/collator-protocol.md @@ -151,6 +151,12 @@ time per relay parent. This reduces the bandwidth requirements and as we can sec the others are probably not required anyway. If the request times out, we need to note the collator as being unreliable and reduce its priority relative to other collators. +As a validator, once the collation has been fetched some other subsystem will inspect and do deeper validation of the +collation. The subsystem will report to this subsystem with a [`CollatorProtocolMessage`][CPM]`::ReportCollator`. In +that case, if we are connected directly to the collator, we apply a cost to the `PeerId` associated with the collator +and potentially disconnect or blacklist it. If the collation is seconded, we notify the collator and apply a benefit to +the `PeerId` associated with the collator. + ### Interaction with [Candidate Backing][CB] As collators advertise the availability, a validator will simply second the first valid parablock candidate per relay diff --git a/polkadot/roadmap/implementers-guide/src/node/subsystems-and-jobs.md b/polkadot/roadmap/implementers-guide/src/node/subsystems-and-jobs.md index a96f3fa3d4a0..a3ca7347eb63 100644 --- a/polkadot/roadmap/implementers-guide/src/node/subsystems-and-jobs.md +++ b/polkadot/roadmap/implementers-guide/src/node/subsystems-and-jobs.md @@ -129,6 +129,7 @@ digraph { cand_sel -> coll_prot [arrowhead = "diamond", label = "FetchCollation"] cand_sel -> cand_back [arrowhead = "onormal", label = "Second"] + cand_sel -> coll_prot [arrowhead = "onormal", label = "ReportCollator"] cand_val -> runt_api [arrowhead = "diamond", label = "Request::PersistedValidationData"] cand_val -> runt_api [arrowhead = "diamond", label = "Request::ValidationCode"] @@ -230,7 +231,7 @@ sequenceDiagram VS ->> CandidateSelection: Collation - Note over CandidateSelection: Lots of other machinery in play here,
but there are only two outcomes from the
perspective of the `CollatorProtocol`: + Note over CandidateSelection: Lots of other machinery in play here,
but there are only three outcomes from the
perspective of the `CollatorProtocol`: alt happy path CandidateSelection -->> VS: FetchCollation @@ -241,6 +242,10 @@ sequenceDiagram NB ->> VS: Collation Deactivate VS + else collation invalid or unexpected + CandidateSelection ->> VS: ReportCollator + VS ->> NB: ReportPeer + else CandidateSelection already selected a different candidate Note over CandidateSelection: silently drop end diff --git a/polkadot/roadmap/implementers-guide/src/node/utility/provisioner.md b/polkadot/roadmap/implementers-guide/src/node/utility/provisioner.md index 0fe7fdd13653..64727d39fabe 100644 --- a/polkadot/roadmap/implementers-guide/src/node/utility/provisioner.md +++ b/polkadot/roadmap/implementers-guide/src/node/utility/provisioner.md @@ -74,8 +74,9 @@ Subsystem](../disputes/dispute-coordinator.md). Misbehavior reports are currentl subsystem](../backing/candidate-backing.md) and contain the following misbehaviors: 1. `Misbehavior::ValidityDoubleVote` -2. `Misbehavior::UnauthorizedStatement` -3. `Misbehavior::DoubleSign` +2. `Misbehavior::MultipleCandidates` +3. `Misbehavior::UnauthorizedStatement` +4. `Misbehavior::DoubleSign` But we choose not to punish these forms of misbehavior for the time being. Risks from misbehavior are sufficiently mitigated at the protocol level via reputation changes. Punitive actions here may become desirable enough to dedicate diff --git a/polkadot/roadmap/implementers-guide/src/types/overseer-protocol.md b/polkadot/roadmap/implementers-guide/src/types/overseer-protocol.md index cb862440727b..6e24d969dde4 100644 --- a/polkadot/roadmap/implementers-guide/src/types/overseer-protocol.md +++ b/polkadot/roadmap/implementers-guide/src/types/overseer-protocol.md @@ -436,6 +436,9 @@ enum CollatorProtocolMessage { DistributeCollation(CandidateReceipt, PoV, Option>), /// Fetch a collation under the given relay-parent for the given ParaId. FetchCollation(Hash, ParaId, ResponseChannel<(CandidateReceipt, PoV)>), + /// Report a collator as having provided an invalid collation. This should lead to disconnect + /// and blacklist of the collator. + ReportCollator(CollatorId), /// Note a collator as having provided a good collation. NoteGoodCollation(CollatorId, SignedFullStatement), /// Notify a collator that its collation was seconded. @@ -694,6 +697,14 @@ mod generic { Invalidity(Digest, Signature, Signature), } + /// Misbehavior: declaring multiple candidates. + pub struct MultipleCandidates { + /// The first candidate seen. + pub first: (Candidate, Signature), + /// The second candidate seen. + pub second: (Candidate, Signature), + } + /// Misbehavior: submitted statement for wrong group. pub struct UnauthorizedStatement { /// A signed statement which was submitted without proper authority. @@ -703,6 +714,8 @@ mod generic { pub enum Misbehavior { /// Voted invalid and valid on validity. ValidityDoubleVote(ValidityDoubleVote), + /// Submitted multiple candidates. + MultipleCandidates(MultipleCandidates), /// Submitted a message that was unauthorized. UnauthorizedStatement(UnauthorizedStatement), /// Submitted two valid signatures for the same message. diff --git a/polkadot/rpc/Cargo.toml b/polkadot/rpc/Cargo.toml index 33ce3ff4acc6..d01528d4dee0 100644 --- a/polkadot/rpc/Cargo.toml +++ b/polkadot/rpc/Cargo.toml @@ -5,19 +5,25 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "Polkadot specific RPC functionality." -homepage.workspace = true -repository.workspace = true [lints] workspace = true [dependencies] jsonrpsee = { features = ["server"], workspace = true } -mmr-rpc = { workspace = true, default-features = true } -pallet-transaction-payment-rpc = { workspace = true, default-features = true } polkadot-primitives = { workspace = true, default-features = true } -sc-chain-spec = { workspace = true, default-features = true } sc-client-api = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-application-crypto = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sp-consensus-babe = { workspace = true, default-features = true } +sp-consensus-beefy = { workspace = true, default-features = true } +sc-chain-spec = { workspace = true, default-features = true } +sc-rpc = { workspace = true, default-features = true } +sc-rpc-spec-v2 = { workspace = true, default-features = true } sc-consensus-babe = { workspace = true, default-features = true } sc-consensus-babe-rpc = { workspace = true, default-features = true } sc-consensus-beefy = { workspace = true, default-features = true } @@ -25,18 +31,10 @@ sc-consensus-beefy-rpc = { workspace = true, default-features = true } sc-consensus-epochs = { workspace = true, default-features = true } sc-consensus-grandpa = { workspace = true, default-features = true } sc-consensus-grandpa-rpc = { workspace = true, default-features = true } -sc-rpc = { workspace = true, default-features = true } -sc-rpc-spec-v2 = { workspace = true, default-features = true } sc-sync-state-rpc = { workspace = true, default-features = true } sc-transaction-pool-api = { workspace = true, default-features = true } -sp-api = { workspace = true, default-features = true } -sp-application-crypto = { workspace = true, default-features = true } -sp-block-builder = { workspace = true, default-features = true } -sp-blockchain = { workspace = true, default-features = true } -sp-consensus = { workspace = true, default-features = true } -sp-consensus-babe = { workspace = true, default-features = true } -sp-consensus-beefy = { workspace = true, default-features = true } -sp-keystore = { workspace = true, default-features = true } -sp-runtime = { workspace = true, default-features = true } substrate-frame-rpc-system = { workspace = true, default-features = true } +mmr-rpc = { workspace = true, default-features = true } +pallet-transaction-payment-rpc = { workspace = true, default-features = true } +sp-block-builder = { workspace = true, default-features = true } substrate-state-trie-migration-rpc = { workspace = true, default-features = true } diff --git a/polkadot/runtime/common/Cargo.toml b/polkadot/runtime/common/Cargo.toml index 4ffa5c475ed7..01b56b31cf20 100644 --- a/polkadot/runtime/common/Cargo.toml +++ b/polkadot/runtime/common/Cargo.toml @@ -5,16 +5,14 @@ description = "Pallets and constants used in Relay Chain networks." authors.workspace = true edition.workspace = true license.workspace = true -homepage.workspace = true -repository.workspace = true [lints] workspace = true [dependencies] +impl-trait-for-tuples = { workspace = true } bitvec = { features = ["alloc"], workspace = true } codec = { features = ["derive"], workspace = true } -impl-trait-for-tuples = { workspace = true } log = { workspace = true } rustc-hex = { workspace = true } scale-info = { features = ["derive"], workspace = true } @@ -23,55 +21,55 @@ serde_derive = { workspace = true } static_assertions = { workspace = true, default-features = true } sp-api = { workspace = true } -sp-core = { features = ["serde"], workspace = true } sp-inherents = { workspace = true } sp-io = { workspace = true } -sp-keyring = { workspace = true } -sp-npos-elections = { features = ["serde"], workspace = true } sp-runtime = { features = ["serde"], workspace = true } sp-session = { workspace = true } sp-staking = { features = ["serde"], workspace = true } +sp-core = { features = ["serde"], workspace = true } +sp-keyring = { workspace = true } +sp-npos-elections = { features = ["serde"], workspace = true } -frame-election-provider-support = { workspace = true } -frame-support = { workspace = true } -frame-system = { workspace = true } -pallet-asset-rate = { optional = true, workspace = true } pallet-authorship = { workspace = true } pallet-balances = { workspace = true } pallet-broker = { workspace = true } -pallet-election-provider-multi-phase = { workspace = true } pallet-fast-unstake = { workspace = true } pallet-identity = { workspace = true } pallet-session = { workspace = true } +frame-support = { workspace = true } pallet-staking = { workspace = true } pallet-staking-reward-fn = { workspace = true } +frame-system = { workspace = true } pallet-timestamp = { workspace = true } +pallet-vesting = { workspace = true } pallet-transaction-payment = { workspace = true } pallet-treasury = { workspace = true } -pallet-vesting = { workspace = true } +pallet-asset-rate = { optional = true, workspace = true } +pallet-election-provider-multi-phase = { workspace = true } +frame-election-provider-support = { workspace = true } frame-benchmarking = { optional = true, workspace = true } pallet-babe = { optional = true, workspace = true } -libsecp256k1 = { workspace = true } polkadot-primitives = { workspace = true } +libsecp256k1 = { workspace = true } polkadot-runtime-parachains = { workspace = true } slot-range-helper = { workspace = true } xcm = { workspace = true } -xcm-builder = { workspace = true } xcm-executor = { optional = true, workspace = true } +xcm-builder = { workspace = true } [dev-dependencies] -frame-support-test = { workspace = true } hex-literal = { workspace = true, default-features = true } -libsecp256k1 = { workspace = true, default-features = true } +frame-support-test = { workspace = true } pallet-babe = { workspace = true, default-features = true } pallet-treasury = { workspace = true, default-features = true } -polkadot-primitives-test-helpers = { workspace = true } -serde_json = { workspace = true, default-features = true } -sp-keyring = { workspace = true, default-features = true } sp-keystore = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } +serde_json = { workspace = true, default-features = true } +libsecp256k1 = { workspace = true, default-features = true } +polkadot-primitives-test-helpers = { workspace = true } [features] default = ["std"] @@ -142,7 +140,6 @@ runtime-benchmarks = [ "sp-staking/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", - "xcm/runtime-benchmarks", ] try-runtime = [ "frame-election-provider-support/try-runtime", diff --git a/polkadot/runtime/common/slot_range_helper/Cargo.toml b/polkadot/runtime/common/slot_range_helper/Cargo.toml index 684cdcd01e14..02810b75283f 100644 --- a/polkadot/runtime/common/slot_range_helper/Cargo.toml +++ b/polkadot/runtime/common/slot_range_helper/Cargo.toml @@ -5,16 +5,14 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "Helper crate for generating slot ranges for the Polkadot runtime." -homepage.workspace = true -repository.workspace = true [lints] workspace = true [dependencies] -codec = { features = ["derive"], workspace = true } -enumn = { workspace = true } paste = { workspace = true, default-features = true } +enumn = { workspace = true } +codec = { features = ["derive"], workspace = true } sp-runtime = { workspace = true } [features] diff --git a/polkadot/runtime/common/src/auctions.rs b/polkadot/runtime/common/src/auctions.rs new file mode 100644 index 000000000000..78f20d918bab --- /dev/null +++ b/polkadot/runtime/common/src/auctions.rs @@ -0,0 +1,1934 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Auctioning system to determine the set of Parachains in operation. This includes logic for the +//! auctioning mechanism and for reserving balance as part of the "payment". Unreserving the balance +//! happens elsewhere. + +use crate::{ + slot_range::SlotRange, + traits::{AuctionStatus, Auctioneer, LeaseError, Leaser, Registrar}, +}; +use alloc::{vec, vec::Vec}; +use codec::Decode; +use core::mem::swap; +use frame_support::{ + dispatch::DispatchResult, + ensure, + traits::{Currency, Get, Randomness, ReservableCurrency}, + weights::Weight, +}; +use frame_system::pallet_prelude::BlockNumberFor; +pub use pallet::*; +use polkadot_primitives::Id as ParaId; +use sp_runtime::traits::{CheckedSub, One, Saturating, Zero}; + +type CurrencyOf = <::Leaser as Leaser>>::Currency; +type BalanceOf = <<::Leaser as Leaser>>::Currency as Currency< + ::AccountId, +>>::Balance; + +pub trait WeightInfo { + fn new_auction() -> Weight; + fn bid() -> Weight; + fn cancel_auction() -> Weight; + fn on_initialize() -> Weight; +} + +pub struct TestWeightInfo; +impl WeightInfo for TestWeightInfo { + fn new_auction() -> Weight { + Weight::zero() + } + fn bid() -> Weight { + Weight::zero() + } + fn cancel_auction() -> Weight { + Weight::zero() + } + fn on_initialize() -> Weight { + Weight::zero() + } +} + +/// An auction index. We count auctions in this type. +pub type AuctionIndex = u32; + +type LeasePeriodOf = <::Leaser as Leaser>>::LeasePeriod; + +// Winning data type. This encodes the top bidders of each range together with their bid. +type WinningData = [Option<(::AccountId, ParaId, BalanceOf)>; + SlotRange::SLOT_RANGE_COUNT]; +// Winners data type. This encodes each of the final winners of a parachain auction, the parachain +// index assigned to them, their winning bid and the range that they won. +type WinnersData = + Vec<(::AccountId, ParaId, BalanceOf, SlotRange)>; + +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::{dispatch::DispatchClass, pallet_prelude::*, traits::EnsureOrigin}; + use frame_system::{ensure_root, ensure_signed, pallet_prelude::*}; + + #[pallet::pallet] + pub struct Pallet(_); + + /// The module's configuration trait. + #[pallet::config] + pub trait Config: frame_system::Config { + /// The overarching event type. + type RuntimeEvent: From> + IsType<::RuntimeEvent>; + + /// The type representing the leasing system. + type Leaser: Leaser< + BlockNumberFor, + AccountId = Self::AccountId, + LeasePeriod = BlockNumberFor, + >; + + /// The parachain registrar type. + type Registrar: Registrar; + + /// The number of blocks over which an auction may be retroactively ended. + #[pallet::constant] + type EndingPeriod: Get>; + + /// The length of each sample to take during the ending period. + /// + /// `EndingPeriod` / `SampleLength` = Total # of Samples + #[pallet::constant] + type SampleLength: Get>; + + /// Something that provides randomness in the runtime. + type Randomness: Randomness>; + + /// The origin which may initiate auctions. + type InitiateOrigin: EnsureOrigin; + + /// Weight Information for the Extrinsics in the Pallet + type WeightInfo: WeightInfo; + } + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + /// An auction started. Provides its index and the block number where it will begin to + /// close and the first lease period of the quadruplet that is auctioned. + AuctionStarted { + auction_index: AuctionIndex, + lease_period: LeasePeriodOf, + ending: BlockNumberFor, + }, + /// An auction ended. All funds become unreserved. + AuctionClosed { auction_index: AuctionIndex }, + /// Funds were reserved for a winning bid. First balance is the extra amount reserved. + /// Second is the total. + Reserved { bidder: T::AccountId, extra_reserved: BalanceOf, total_amount: BalanceOf }, + /// Funds were unreserved since bidder is no longer active. `[bidder, amount]` + Unreserved { bidder: T::AccountId, amount: BalanceOf }, + /// Someone attempted to lease the same slot twice for a parachain. The amount is held in + /// reserve but no parachain slot has been leased. + ReserveConfiscated { para_id: ParaId, leaser: T::AccountId, amount: BalanceOf }, + /// A new bid has been accepted as the current winner. + BidAccepted { + bidder: T::AccountId, + para_id: ParaId, + amount: BalanceOf, + first_slot: LeasePeriodOf, + last_slot: LeasePeriodOf, + }, + /// The winning offset was chosen for an auction. This will map into the `Winning` storage + /// map. + WinningOffset { auction_index: AuctionIndex, block_number: BlockNumberFor }, + } + + #[pallet::error] + pub enum Error { + /// This auction is already in progress. + AuctionInProgress, + /// The lease period is in the past. + LeasePeriodInPast, + /// Para is not registered + ParaNotRegistered, + /// Not a current auction. + NotCurrentAuction, + /// Not an auction. + NotAuction, + /// Auction has already ended. + AuctionEnded, + /// The para is already leased out for part of this range. + AlreadyLeasedOut, + } + + /// Number of auctions started so far. + #[pallet::storage] + pub type AuctionCounter = StorageValue<_, AuctionIndex, ValueQuery>; + + /// Information relating to the current auction, if there is one. + /// + /// The first item in the tuple is the lease period index that the first of the four + /// contiguous lease periods on auction is for. The second is the block number when the + /// auction will "begin to end", i.e. the first block of the Ending Period of the auction. + #[pallet::storage] + pub type AuctionInfo = StorageValue<_, (LeasePeriodOf, BlockNumberFor)>; + + /// Amounts currently reserved in the accounts of the bidders currently winning + /// (sub-)ranges. + #[pallet::storage] + pub type ReservedAmounts = + StorageMap<_, Twox64Concat, (T::AccountId, ParaId), BalanceOf>; + + /// The winning bids for each of the 10 ranges at each sample in the final Ending Period of + /// the current auction. The map's key is the 0-based index into the Sample Size. The + /// first sample of the ending period is 0; the last is `Sample Size - 1`. + #[pallet::storage] + pub type Winning = StorageMap<_, Twox64Concat, BlockNumberFor, WinningData>; + + #[pallet::extra_constants] + impl Pallet { + #[pallet::constant_name(SlotRangeCount)] + fn slot_range_count() -> u32 { + SlotRange::SLOT_RANGE_COUNT as u32 + } + + #[pallet::constant_name(LeasePeriodsPerSlot)] + fn lease_periods_per_slot() -> u32 { + SlotRange::LEASE_PERIODS_PER_SLOT as u32 + } + } + + #[pallet::hooks] + impl Hooks> for Pallet { + fn on_initialize(n: BlockNumberFor) -> Weight { + let mut weight = T::DbWeight::get().reads(1); + + // If the current auction was in its ending period last block, then ensure that the + // (sub-)range winner information is duplicated from the previous block in case no bids + // happened in the last block. + if let AuctionStatus::EndingPeriod(offset, _sub_sample) = Self::auction_status(n) { + weight = weight.saturating_add(T::DbWeight::get().reads(1)); + if !Winning::::contains_key(&offset) { + weight = weight.saturating_add(T::DbWeight::get().writes(1)); + let winning_data = offset + .checked_sub(&One::one()) + .and_then(Winning::::get) + .unwrap_or([Self::EMPTY; SlotRange::SLOT_RANGE_COUNT]); + Winning::::insert(offset, winning_data); + } + } + + // Check to see if an auction just ended. + if let Some((winning_ranges, auction_lease_period_index)) = Self::check_auction_end(n) { + // Auction is ended now. We have the winning ranges and the lease period index which + // acts as the offset. Handle it. + Self::manage_auction_end(auction_lease_period_index, winning_ranges); + weight = weight.saturating_add(T::WeightInfo::on_initialize()); + } + + weight + } + } + + #[pallet::call] + impl Pallet { + /// Create a new auction. + /// + /// This can only happen when there isn't already an auction in progress and may only be + /// called by the root origin. Accepts the `duration` of this auction and the + /// `lease_period_index` of the initial lease period of the four that are to be auctioned. + #[pallet::call_index(0)] + #[pallet::weight((T::WeightInfo::new_auction(), DispatchClass::Operational))] + pub fn new_auction( + origin: OriginFor, + #[pallet::compact] duration: BlockNumberFor, + #[pallet::compact] lease_period_index: LeasePeriodOf, + ) -> DispatchResult { + T::InitiateOrigin::ensure_origin(origin)?; + Self::do_new_auction(duration, lease_period_index) + } + + /// Make a new bid from an account (including a parachain account) for deploying a new + /// parachain. + /// + /// Multiple simultaneous bids from the same bidder are allowed only as long as all active + /// bids overlap each other (i.e. are mutually exclusive). Bids cannot be redacted. + /// + /// - `sub` is the sub-bidder ID, allowing for multiple competing bids to be made by (and + /// funded by) the same account. + /// - `auction_index` is the index of the auction to bid on. Should just be the present + /// value of `AuctionCounter`. + /// - `first_slot` is the first lease period index of the range to bid on. This is the + /// absolute lease period index value, not an auction-specific offset. + /// - `last_slot` is the last lease period index of the range to bid on. This is the + /// absolute lease period index value, not an auction-specific offset. + /// - `amount` is the amount to bid to be held as deposit for the parachain should the + /// bid win. This amount is held throughout the range. + #[pallet::call_index(1)] + #[pallet::weight(T::WeightInfo::bid())] + pub fn bid( + origin: OriginFor, + #[pallet::compact] para: ParaId, + #[pallet::compact] auction_index: AuctionIndex, + #[pallet::compact] first_slot: LeasePeriodOf, + #[pallet::compact] last_slot: LeasePeriodOf, + #[pallet::compact] amount: BalanceOf, + ) -> DispatchResult { + let who = ensure_signed(origin)?; + Self::handle_bid(who, para, auction_index, first_slot, last_slot, amount)?; + Ok(()) + } + + /// Cancel an in-progress auction. + /// + /// Can only be called by Root origin. + #[pallet::call_index(2)] + #[pallet::weight(T::WeightInfo::cancel_auction())] + pub fn cancel_auction(origin: OriginFor) -> DispatchResult { + ensure_root(origin)?; + // Unreserve all bids. + for ((bidder, _), amount) in ReservedAmounts::::drain() { + CurrencyOf::::unreserve(&bidder, amount); + } + #[allow(deprecated)] + Winning::::remove_all(None); + AuctionInfo::::kill(); + Ok(()) + } + } +} + +impl Auctioneer> for Pallet { + type AccountId = T::AccountId; + type LeasePeriod = BlockNumberFor; + type Currency = CurrencyOf; + + fn new_auction( + duration: BlockNumberFor, + lease_period_index: LeasePeriodOf, + ) -> DispatchResult { + Self::do_new_auction(duration, lease_period_index) + } + + // Returns the status of the auction given the current block number. + fn auction_status(now: BlockNumberFor) -> AuctionStatus> { + let early_end = match AuctionInfo::::get() { + Some((_, early_end)) => early_end, + None => return AuctionStatus::NotStarted, + }; + + let after_early_end = match now.checked_sub(&early_end) { + Some(after_early_end) => after_early_end, + None => return AuctionStatus::StartingPeriod, + }; + + let ending_period = T::EndingPeriod::get(); + if after_early_end < ending_period { + let sample_length = T::SampleLength::get().max(One::one()); + let sample = after_early_end / sample_length; + let sub_sample = after_early_end % sample_length; + return AuctionStatus::EndingPeriod(sample, sub_sample) + } else { + // This is safe because of the comparison operator above + return AuctionStatus::VrfDelay(after_early_end - ending_period) + } + } + + fn place_bid( + bidder: T::AccountId, + para: ParaId, + first_slot: LeasePeriodOf, + last_slot: LeasePeriodOf, + amount: BalanceOf, + ) -> DispatchResult { + Self::handle_bid(bidder, para, AuctionCounter::::get(), first_slot, last_slot, amount) + } + + fn lease_period_index(b: BlockNumberFor) -> Option<(Self::LeasePeriod, bool)> { + T::Leaser::lease_period_index(b) + } + + #[cfg(any(feature = "runtime-benchmarks", test))] + fn lease_period_length() -> (BlockNumberFor, BlockNumberFor) { + T::Leaser::lease_period_length() + } + + fn has_won_an_auction(para: ParaId, bidder: &T::AccountId) -> bool { + !T::Leaser::deposit_held(para, bidder).is_zero() + } +} + +impl Pallet { + // A trick to allow me to initialize large arrays with nothing in them. + const EMPTY: Option<(::AccountId, ParaId, BalanceOf)> = None; + + /// Create a new auction. + /// + /// This can only happen when there isn't already an auction in progress. Accepts the `duration` + /// of this auction and the `lease_period_index` of the initial lease period of the four that + /// are to be auctioned. + fn do_new_auction( + duration: BlockNumberFor, + lease_period_index: LeasePeriodOf, + ) -> DispatchResult { + let maybe_auction = AuctionInfo::::get(); + ensure!(maybe_auction.is_none(), Error::::AuctionInProgress); + let now = frame_system::Pallet::::block_number(); + if let Some((current_lease_period, _)) = T::Leaser::lease_period_index(now) { + // If there is no active lease period, then we don't need to make this check. + ensure!(lease_period_index >= current_lease_period, Error::::LeasePeriodInPast); + } + + // Bump the counter. + let n = AuctionCounter::::mutate(|n| { + *n += 1; + *n + }); + + // Set the information. + let ending = frame_system::Pallet::::block_number().saturating_add(duration); + AuctionInfo::::put((lease_period_index, ending)); + + Self::deposit_event(Event::::AuctionStarted { + auction_index: n, + lease_period: lease_period_index, + ending, + }); + Ok(()) + } + + /// Actually place a bid in the current auction. + /// + /// - `bidder`: The account that will be funding this bid. + /// - `auction_index`: The auction index of the bid. For this to succeed, must equal + /// the current value of `AuctionCounter`. + /// - `first_slot`: The first lease period index of the range to be bid on. + /// - `last_slot`: The last lease period index of the range to be bid on (inclusive). + /// - `amount`: The total amount to be the bid for deposit over the range. + pub fn handle_bid( + bidder: T::AccountId, + para: ParaId, + auction_index: u32, + first_slot: LeasePeriodOf, + last_slot: LeasePeriodOf, + amount: BalanceOf, + ) -> DispatchResult { + // Ensure para is registered before placing a bid on it. + ensure!(T::Registrar::is_registered(para), Error::::ParaNotRegistered); + // Bidding on latest auction. + ensure!(auction_index == AuctionCounter::::get(), Error::::NotCurrentAuction); + // Assume it's actually an auction (this should never fail because of above). + let (first_lease_period, _) = AuctionInfo::::get().ok_or(Error::::NotAuction)?; + + // Get the auction status and the current sample block. For the starting period, the sample + // block is zero. + let auction_status = Self::auction_status(frame_system::Pallet::::block_number()); + // The offset into the ending samples of the auction. + let offset = match auction_status { + AuctionStatus::NotStarted => return Err(Error::::AuctionEnded.into()), + AuctionStatus::StartingPeriod => Zero::zero(), + AuctionStatus::EndingPeriod(o, _) => o, + AuctionStatus::VrfDelay(_) => return Err(Error::::AuctionEnded.into()), + }; + + // We also make sure that the bid is not for any existing leases the para already has. + ensure!( + !T::Leaser::already_leased(para, first_slot, last_slot), + Error::::AlreadyLeasedOut + ); + + // Our range. + let range = SlotRange::new_bounded(first_lease_period, first_slot, last_slot)?; + // Range as an array index. + let range_index = range as u8 as usize; + + // The current winning ranges. + let mut current_winning = Winning::::get(offset) + .or_else(|| offset.checked_sub(&One::one()).and_then(Winning::::get)) + .unwrap_or([Self::EMPTY; SlotRange::SLOT_RANGE_COUNT]); + + // If this bid beat the previous winner of our range. + if current_winning[range_index].as_ref().map_or(true, |last| amount > last.2) { + // Ok; we are the new winner of this range - reserve the additional amount and record. + + // Get the amount already held on deposit if this is a renewal bid (i.e. there's + // an existing lease on the same para by the same leaser). + let existing_lease_deposit = T::Leaser::deposit_held(para, &bidder); + let reserve_required = amount.saturating_sub(existing_lease_deposit); + + // Get the amount already reserved in any prior and still active bids by us. + let bidder_para = (bidder.clone(), para); + let already_reserved = ReservedAmounts::::get(&bidder_para).unwrap_or_default(); + + // If these don't already cover the bid... + if let Some(additional) = reserve_required.checked_sub(&already_reserved) { + // ...then reserve some more funds from their account, failing if there's not + // enough funds. + CurrencyOf::::reserve(&bidder, additional)?; + // ...and record the amount reserved. + ReservedAmounts::::insert(&bidder_para, reserve_required); + + Self::deposit_event(Event::::Reserved { + bidder: bidder.clone(), + extra_reserved: additional, + total_amount: reserve_required, + }); + } + + // Return any funds reserved for the previous winner if we are not in the ending period + // and they no longer have any active bids. + let mut outgoing_winner = Some((bidder.clone(), para, amount)); + swap(&mut current_winning[range_index], &mut outgoing_winner); + if let Some((who, para, _amount)) = outgoing_winner { + if auction_status.is_starting() && + current_winning + .iter() + .filter_map(Option::as_ref) + .all(|&(ref other, other_para, _)| other != &who || other_para != para) + { + // Previous bidder is no longer winning any ranges: unreserve their funds. + if let Some(amount) = ReservedAmounts::::take(&(who.clone(), para)) { + // It really should be reserved; there's not much we can do here on fail. + let err_amt = CurrencyOf::::unreserve(&who, amount); + debug_assert!(err_amt.is_zero()); + Self::deposit_event(Event::::Unreserved { bidder: who, amount }); + } + } + } + + // Update the range winner. + Winning::::insert(offset, ¤t_winning); + Self::deposit_event(Event::::BidAccepted { + bidder, + para_id: para, + amount, + first_slot, + last_slot, + }); + } + Ok(()) + } + + /// Some when the auction's end is known (with the end block number). None if it is unknown. + /// If `Some` then the block number must be at most the previous block and at least the + /// previous block minus `T::EndingPeriod::get()`. + /// + /// This mutates the state, cleaning up `AuctionInfo` and `Winning` in the case of an auction + /// ending. An immediately subsequent call with the same argument will always return `None`. + fn check_auction_end(now: BlockNumberFor) -> Option<(WinningData, LeasePeriodOf)> { + if let Some((lease_period_index, early_end)) = AuctionInfo::::get() { + let ending_period = T::EndingPeriod::get(); + let late_end = early_end.saturating_add(ending_period); + let is_ended = now >= late_end; + if is_ended { + // auction definitely ended. + // check to see if we can determine the actual ending point. + let (raw_offset, known_since) = T::Randomness::random(&b"para_auction"[..]); + + if late_end <= known_since { + // Our random seed was known only after the auction ended. Good to use. + let raw_offset_block_number = >::decode( + &mut raw_offset.as_ref(), + ) + .expect("secure hashes should always be bigger than the block number; qed"); + let offset = (raw_offset_block_number % ending_period) / + T::SampleLength::get().max(One::one()); + + let auction_counter = AuctionCounter::::get(); + Self::deposit_event(Event::::WinningOffset { + auction_index: auction_counter, + block_number: offset, + }); + let res = Winning::::get(offset) + .unwrap_or([Self::EMPTY; SlotRange::SLOT_RANGE_COUNT]); + // This `remove_all` statement should remove at most `EndingPeriod` / + // `SampleLength` items, which should be bounded and sensibly configured in the + // runtime. + #[allow(deprecated)] + Winning::::remove_all(None); + AuctionInfo::::kill(); + return Some((res, lease_period_index)) + } + } + } + None + } + + /// Auction just ended. We have the current lease period, the auction's lease period (which + /// is guaranteed to be at least the current period) and the bidders that were winning each + /// range at the time of the auction's close. + fn manage_auction_end( + auction_lease_period_index: LeasePeriodOf, + winning_ranges: WinningData, + ) { + // First, unreserve all amounts that were reserved for the bids. We will later re-reserve + // the amounts from the bidders that ended up being assigned the slot so there's no need to + // special-case them here. + for ((bidder, _), amount) in ReservedAmounts::::drain() { + CurrencyOf::::unreserve(&bidder, amount); + } + + // Next, calculate the winning combination of slots and thus the final winners of the + // auction. + let winners = Self::calculate_winners(winning_ranges); + + // Go through those winners and re-reserve their bid, updating our table of deposits + // accordingly. + for (leaser, para, amount, range) in winners.into_iter() { + let begin_offset = LeasePeriodOf::::from(range.as_pair().0 as u32); + let period_begin = auction_lease_period_index + begin_offset; + let period_count = LeasePeriodOf::::from(range.len() as u32); + + match T::Leaser::lease_out(para, &leaser, amount, period_begin, period_count) { + Err(LeaseError::ReserveFailed) | + Err(LeaseError::AlreadyEnded) | + Err(LeaseError::NoLeasePeriod) => { + // Should never happen since we just unreserved this amount (and our offset is + // from the present period). But if it does, there's not much we can do. + }, + Err(LeaseError::AlreadyLeased) => { + // The leaser attempted to get a second lease on the same para ID, possibly + // griefing us. Let's keep the amount reserved and let governance sort it out. + if CurrencyOf::::reserve(&leaser, amount).is_ok() { + Self::deposit_event(Event::::ReserveConfiscated { + para_id: para, + leaser, + amount, + }); + } + }, + Ok(()) => {}, // Nothing to report. + } + } + + Self::deposit_event(Event::::AuctionClosed { + auction_index: AuctionCounter::::get(), + }); + } + + /// Calculate the final winners from the winning slots. + /// + /// This is a simple dynamic programming algorithm designed by Al, the original code is at: + /// `https://github.com/w3f/consensus/blob/master/NPoS/auctiondynamicthing.py` + fn calculate_winners(mut winning: WinningData) -> WinnersData { + let winning_ranges = { + let mut best_winners_ending_at: [(Vec, BalanceOf); + SlotRange::LEASE_PERIODS_PER_SLOT] = Default::default(); + let best_bid = |range: SlotRange| { + winning[range as u8 as usize] + .as_ref() + .map(|(_, _, amount)| *amount * (range.len() as u32).into()) + }; + for i in 0..SlotRange::LEASE_PERIODS_PER_SLOT { + let r = SlotRange::new_bounded(0, 0, i as u32).expect("`i < LPPS`; qed"); + if let Some(bid) = best_bid(r) { + best_winners_ending_at[i] = (vec![r], bid); + } + for j in 0..i { + let r = SlotRange::new_bounded(0, j as u32 + 1, i as u32) + .expect("`i < LPPS`; `j < i`; `j + 1 < LPPS`; qed"); + if let Some(mut bid) = best_bid(r) { + bid += best_winners_ending_at[j].1; + if bid > best_winners_ending_at[i].1 { + let mut new_winners = best_winners_ending_at[j].0.clone(); + new_winners.push(r); + best_winners_ending_at[i] = (new_winners, bid); + } + } else { + if best_winners_ending_at[j].1 > best_winners_ending_at[i].1 { + best_winners_ending_at[i] = best_winners_ending_at[j].clone(); + } + } + } + } + best_winners_ending_at[SlotRange::LEASE_PERIODS_PER_SLOT - 1].0.clone() + }; + + winning_ranges + .into_iter() + .filter_map(|range| { + winning[range as u8 as usize] + .take() + .map(|(bidder, para, amount)| (bidder, para, amount, range)) + }) + .collect::>() + } +} + +/// tests for this module +#[cfg(test)] +mod tests { + use super::*; + use crate::{auctions, mock::TestRegistrar}; + use frame_support::{ + assert_noop, assert_ok, assert_storage_noop, derive_impl, ord_parameter_types, + parameter_types, + traits::{EitherOfDiverse, OnFinalize, OnInitialize}, + }; + use frame_system::{EnsureRoot, EnsureSignedBy}; + use pallet_balances; + use polkadot_primitives::{BlockNumber, Id as ParaId}; + use polkadot_primitives_test_helpers::{dummy_hash, dummy_head_data, dummy_validation_code}; + use sp_core::H256; + use sp_runtime::{ + traits::{BlakeTwo256, IdentityLookup}, + BuildStorage, + DispatchError::BadOrigin, + }; + use std::{cell::RefCell, collections::BTreeMap}; + + type Block = frame_system::mocking::MockBlockU32; + + frame_support::construct_runtime!( + pub enum Test + { + System: frame_system, + Balances: pallet_balances, + Auctions: auctions, + } + ); + + #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] + impl frame_system::Config for Test { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); + type RuntimeOrigin = RuntimeOrigin; + type RuntimeCall = RuntimeCall; + type Nonce = u64; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type Block = Block; + type RuntimeEvent = RuntimeEvent; + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); + type MaxConsumers = frame_support::traits::ConstU32<16>; + } + + #[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] + impl pallet_balances::Config for Test { + type AccountStore = System; + } + + #[derive(Eq, PartialEq, Ord, PartialOrd, Clone, Copy, Debug)] + pub struct LeaseData { + leaser: u64, + amount: u64, + } + + thread_local! { + pub static LEASES: + RefCell> = RefCell::new(BTreeMap::new()); + } + + fn leases() -> Vec<((ParaId, BlockNumber), LeaseData)> { + LEASES.with(|p| (&*p.borrow()).clone().into_iter().collect::>()) + } + + pub struct TestLeaser; + impl Leaser for TestLeaser { + type AccountId = u64; + type LeasePeriod = BlockNumber; + type Currency = Balances; + + fn lease_out( + para: ParaId, + leaser: &Self::AccountId, + amount: >::Balance, + period_begin: Self::LeasePeriod, + period_count: Self::LeasePeriod, + ) -> Result<(), LeaseError> { + LEASES.with(|l| { + let mut leases = l.borrow_mut(); + let now = System::block_number(); + let (current_lease_period, _) = + Self::lease_period_index(now).ok_or(LeaseError::NoLeasePeriod)?; + if period_begin < current_lease_period { + return Err(LeaseError::AlreadyEnded) + } + for period in period_begin..(period_begin + period_count) { + if leases.contains_key(&(para, period)) { + return Err(LeaseError::AlreadyLeased) + } + leases.insert((para, period), LeaseData { leaser: *leaser, amount }); + } + Ok(()) + }) + } + + fn deposit_held( + para: ParaId, + leaser: &Self::AccountId, + ) -> >::Balance { + leases() + .iter() + .filter_map(|((id, _period), data)| { + if id == ¶ && &data.leaser == leaser { + Some(data.amount) + } else { + None + } + }) + .max() + .unwrap_or_default() + } + + fn lease_period_length() -> (BlockNumber, BlockNumber) { + (10, 0) + } + + fn lease_period_index(b: BlockNumber) -> Option<(Self::LeasePeriod, bool)> { + let (lease_period_length, offset) = Self::lease_period_length(); + let b = b.checked_sub(offset)?; + + let lease_period = b / lease_period_length; + let first_block = (b % lease_period_length).is_zero(); + + Some((lease_period, first_block)) + } + + fn already_leased( + para_id: ParaId, + first_period: Self::LeasePeriod, + last_period: Self::LeasePeriod, + ) -> bool { + leases().into_iter().any(|((para, period), _data)| { + para == para_id && first_period <= period && period <= last_period + }) + } + } + + ord_parameter_types! { + pub const Six: u64 = 6; + } + + type RootOrSix = EitherOfDiverse, EnsureSignedBy>; + + thread_local! { + pub static LAST_RANDOM: RefCell> = RefCell::new(None); + } + fn set_last_random(output: H256, known_since: u32) { + LAST_RANDOM.with(|p| *p.borrow_mut() = Some((output, known_since))) + } + pub struct TestPastRandomness; + impl Randomness for TestPastRandomness { + fn random(_subject: &[u8]) -> (H256, u32) { + LAST_RANDOM.with(|p| { + if let Some((output, known_since)) = &*p.borrow() { + (*output, *known_since) + } else { + (H256::zero(), frame_system::Pallet::::block_number()) + } + }) + } + } + + parameter_types! { + pub static EndingPeriod: BlockNumber = 3; + pub static SampleLength: BlockNumber = 1; + } + + impl Config for Test { + type RuntimeEvent = RuntimeEvent; + type Leaser = TestLeaser; + type Registrar = TestRegistrar; + type EndingPeriod = EndingPeriod; + type SampleLength = SampleLength; + type Randomness = TestPastRandomness; + type InitiateOrigin = RootOrSix; + type WeightInfo = crate::auctions::TestWeightInfo; + } + + // This function basically just builds a genesis storage key/value store according to + // our desired mock up. + pub fn new_test_ext() -> sp_io::TestExternalities { + let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); + pallet_balances::GenesisConfig:: { + balances: vec![(1, 10), (2, 20), (3, 30), (4, 40), (5, 50), (6, 60)], + } + .assimilate_storage(&mut t) + .unwrap(); + let mut ext: sp_io::TestExternalities = t.into(); + ext.execute_with(|| { + // Register para 0, 1, 2, and 3 for tests + assert_ok!(TestRegistrar::::register( + 1, + 0.into(), + dummy_head_data(), + dummy_validation_code() + )); + assert_ok!(TestRegistrar::::register( + 1, + 1.into(), + dummy_head_data(), + dummy_validation_code() + )); + assert_ok!(TestRegistrar::::register( + 1, + 2.into(), + dummy_head_data(), + dummy_validation_code() + )); + assert_ok!(TestRegistrar::::register( + 1, + 3.into(), + dummy_head_data(), + dummy_validation_code() + )); + }); + ext + } + + fn run_to_block(n: BlockNumber) { + while System::block_number() < n { + Auctions::on_finalize(System::block_number()); + Balances::on_finalize(System::block_number()); + System::on_finalize(System::block_number()); + System::set_block_number(System::block_number() + 1); + System::on_initialize(System::block_number()); + Balances::on_initialize(System::block_number()); + Auctions::on_initialize(System::block_number()); + } + } + + #[test] + fn basic_setup_works() { + new_test_ext().execute_with(|| { + assert_eq!(AuctionCounter::::get(), 0); + assert_eq!(TestLeaser::deposit_held(0u32.into(), &1), 0); + assert_eq!( + Auctions::auction_status(System::block_number()), + AuctionStatus::::NotStarted + ); + + run_to_block(10); + + assert_eq!(AuctionCounter::::get(), 0); + assert_eq!(TestLeaser::deposit_held(0u32.into(), &1), 0); + assert_eq!( + Auctions::auction_status(System::block_number()), + AuctionStatus::::NotStarted + ); + }); + } + + #[test] + fn can_start_auction() { + new_test_ext().execute_with(|| { + run_to_block(1); + + assert_noop!(Auctions::new_auction(RuntimeOrigin::signed(1), 5, 1), BadOrigin); + assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1)); + + assert_eq!(AuctionCounter::::get(), 1); + assert_eq!( + Auctions::auction_status(System::block_number()), + AuctionStatus::::StartingPeriod + ); + }); + } + + #[test] + fn bidding_works() { + new_test_ext().execute_with(|| { + run_to_block(1); + assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1)); + assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 1, 1, 4, 5)); + + assert_eq!(Balances::reserved_balance(1), 5); + assert_eq!(Balances::free_balance(1), 5); + assert_eq!( + Winning::::get(0).unwrap()[SlotRange::ZeroThree as u8 as usize], + Some((1, 0.into(), 5)) + ); + }); + } + + #[test] + fn under_bidding_works() { + new_test_ext().execute_with(|| { + run_to_block(1); + assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1)); + + assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 1, 1, 4, 5)); + + assert_storage_noop!({ + assert_ok!(Auctions::bid(RuntimeOrigin::signed(2), 0.into(), 1, 1, 4, 1)); + }); + }); + } + + #[test] + fn over_bidding_works() { + new_test_ext().execute_with(|| { + run_to_block(1); + assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1)); + assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 1, 1, 4, 5)); + assert_ok!(Auctions::bid(RuntimeOrigin::signed(2), 0.into(), 1, 1, 4, 6)); + + assert_eq!(Balances::reserved_balance(1), 0); + assert_eq!(Balances::free_balance(1), 10); + assert_eq!(Balances::reserved_balance(2), 6); + assert_eq!(Balances::free_balance(2), 14); + assert_eq!( + Winning::::get(0).unwrap()[SlotRange::ZeroThree as u8 as usize], + Some((2, 0.into(), 6)) + ); + }); + } + + #[test] + fn auction_proceeds_correctly() { + new_test_ext().execute_with(|| { + run_to_block(1); + + assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1)); + + assert_eq!(AuctionCounter::::get(), 1); + assert_eq!( + Auctions::auction_status(System::block_number()), + AuctionStatus::::StartingPeriod + ); + + run_to_block(2); + assert_eq!( + Auctions::auction_status(System::block_number()), + AuctionStatus::::StartingPeriod + ); + + run_to_block(3); + assert_eq!( + Auctions::auction_status(System::block_number()), + AuctionStatus::::StartingPeriod + ); + + run_to_block(4); + assert_eq!( + Auctions::auction_status(System::block_number()), + AuctionStatus::::StartingPeriod + ); + + run_to_block(5); + assert_eq!( + Auctions::auction_status(System::block_number()), + AuctionStatus::::StartingPeriod + ); + + run_to_block(6); + assert_eq!( + Auctions::auction_status(System::block_number()), + AuctionStatus::::EndingPeriod(0, 0) + ); + + run_to_block(7); + assert_eq!( + Auctions::auction_status(System::block_number()), + AuctionStatus::::EndingPeriod(1, 0) + ); + + run_to_block(8); + assert_eq!( + Auctions::auction_status(System::block_number()), + AuctionStatus::::EndingPeriod(2, 0) + ); + + run_to_block(9); + assert_eq!( + Auctions::auction_status(System::block_number()), + AuctionStatus::::NotStarted + ); + }); + } + + #[test] + fn can_win_auction() { + new_test_ext().execute_with(|| { + run_to_block(1); + assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1)); + assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 1, 1, 4, 1)); + assert_eq!(Balances::reserved_balance(1), 1); + assert_eq!(Balances::free_balance(1), 9); + run_to_block(9); + + assert_eq!( + leases(), + vec![ + ((0.into(), 1), LeaseData { leaser: 1, amount: 1 }), + ((0.into(), 2), LeaseData { leaser: 1, amount: 1 }), + ((0.into(), 3), LeaseData { leaser: 1, amount: 1 }), + ((0.into(), 4), LeaseData { leaser: 1, amount: 1 }), + ] + ); + assert_eq!(TestLeaser::deposit_held(0.into(), &1), 1); + }); + } + + #[test] + fn can_win_auction_with_late_randomness() { + new_test_ext().execute_with(|| { + run_to_block(1); + assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1)); + assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 1, 1, 4, 1)); + assert_eq!(Balances::reserved_balance(1), 1); + assert_eq!(Balances::free_balance(1), 9); + assert_eq!( + Auctions::auction_status(System::block_number()), + AuctionStatus::::StartingPeriod + ); + run_to_block(8); + // Auction has not yet ended. + assert_eq!(leases(), vec![]); + assert_eq!( + Auctions::auction_status(System::block_number()), + AuctionStatus::::EndingPeriod(2, 0) + ); + // This will prevent the auction's winner from being decided in the next block, since + // the random seed was known before the final bids were made. + set_last_random(H256::zero(), 8); + // Auction definitely ended now, but we don't know exactly when in the last 3 blocks yet + // since no randomness available yet. + run_to_block(9); + // Auction has now ended... But auction winner still not yet decided, so no leases yet. + assert_eq!( + Auctions::auction_status(System::block_number()), + AuctionStatus::::VrfDelay(0) + ); + assert_eq!(leases(), vec![]); + + // Random seed now updated to a value known at block 9, when the auction ended. This + // means that the winner can now be chosen. + set_last_random(H256::zero(), 9); + run_to_block(10); + // Auction ended and winner selected + assert_eq!( + Auctions::auction_status(System::block_number()), + AuctionStatus::::NotStarted + ); + assert_eq!( + leases(), + vec![ + ((0.into(), 1), LeaseData { leaser: 1, amount: 1 }), + ((0.into(), 2), LeaseData { leaser: 1, amount: 1 }), + ((0.into(), 3), LeaseData { leaser: 1, amount: 1 }), + ((0.into(), 4), LeaseData { leaser: 1, amount: 1 }), + ] + ); + assert_eq!(TestLeaser::deposit_held(0.into(), &1), 1); + }); + } + + #[test] + fn can_win_incomplete_auction() { + new_test_ext().execute_with(|| { + run_to_block(1); + assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1)); + assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 1, 4, 4, 5)); + run_to_block(9); + + assert_eq!(leases(), vec![((0.into(), 4), LeaseData { leaser: 1, amount: 5 }),]); + assert_eq!(TestLeaser::deposit_held(0.into(), &1), 5); + }); + } + + #[test] + fn should_choose_best_combination() { + new_test_ext().execute_with(|| { + run_to_block(1); + assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1)); + assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 1, 1, 1, 1)); + assert_ok!(Auctions::bid(RuntimeOrigin::signed(2), 0.into(), 1, 2, 3, 4)); + assert_ok!(Auctions::bid(RuntimeOrigin::signed(3), 0.into(), 1, 4, 4, 2)); + assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 1.into(), 1, 1, 4, 2)); + run_to_block(9); + + assert_eq!( + leases(), + vec![ + ((0.into(), 1), LeaseData { leaser: 1, amount: 1 }), + ((0.into(), 2), LeaseData { leaser: 2, amount: 4 }), + ((0.into(), 3), LeaseData { leaser: 2, amount: 4 }), + ((0.into(), 4), LeaseData { leaser: 3, amount: 2 }), + ] + ); + assert_eq!(TestLeaser::deposit_held(0.into(), &1), 1); + assert_eq!(TestLeaser::deposit_held(1.into(), &1), 0); + assert_eq!(TestLeaser::deposit_held(0.into(), &2), 4); + assert_eq!(TestLeaser::deposit_held(0.into(), &3), 2); + }); + } + + #[test] + fn gap_bid_works() { + new_test_ext().execute_with(|| { + run_to_block(1); + assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1)); + + // User 1 will make a bid for period 1 and 4 for the same Para 0 + assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 1, 1, 1, 1)); + assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 1, 4, 4, 4)); + + // User 2 and 3 will make a bid for para 1 on period 2 and 3 respectively + assert_ok!(Auctions::bid(RuntimeOrigin::signed(2), 1.into(), 1, 2, 2, 2)); + assert_ok!(Auctions::bid(RuntimeOrigin::signed(3), 1.into(), 1, 3, 3, 3)); + + // Total reserved should be the max of the two + assert_eq!(Balances::reserved_balance(1), 4); + + // Other people are reserved correctly too + assert_eq!(Balances::reserved_balance(2), 2); + assert_eq!(Balances::reserved_balance(3), 3); + + // End the auction. + run_to_block(9); + + assert_eq!( + leases(), + vec![ + ((0.into(), 1), LeaseData { leaser: 1, amount: 1 }), + ((0.into(), 4), LeaseData { leaser: 1, amount: 4 }), + ((1.into(), 2), LeaseData { leaser: 2, amount: 2 }), + ((1.into(), 3), LeaseData { leaser: 3, amount: 3 }), + ] + ); + assert_eq!(TestLeaser::deposit_held(0.into(), &1), 4); + assert_eq!(TestLeaser::deposit_held(1.into(), &2), 2); + assert_eq!(TestLeaser::deposit_held(1.into(), &3), 3); + }); + } + + #[test] + fn deposit_credit_should_work() { + new_test_ext().execute_with(|| { + run_to_block(1); + assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1)); + assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 1, 1, 1, 5)); + assert_eq!(Balances::reserved_balance(1), 5); + run_to_block(10); + + assert_eq!(leases(), vec![((0.into(), 1), LeaseData { leaser: 1, amount: 5 }),]); + assert_eq!(TestLeaser::deposit_held(0.into(), &1), 5); + + assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 2)); + assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 2, 2, 2, 6)); + // Only 1 reserved since we have a deposit credit of 5. + assert_eq!(Balances::reserved_balance(1), 1); + run_to_block(20); + + assert_eq!( + leases(), + vec![ + ((0.into(), 1), LeaseData { leaser: 1, amount: 5 }), + ((0.into(), 2), LeaseData { leaser: 1, amount: 6 }), + ] + ); + assert_eq!(TestLeaser::deposit_held(0.into(), &1), 6); + }); + } + + #[test] + fn deposit_credit_on_alt_para_should_not_count() { + new_test_ext().execute_with(|| { + run_to_block(1); + assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1)); + assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 1, 1, 1, 5)); + assert_eq!(Balances::reserved_balance(1), 5); + run_to_block(10); + + assert_eq!(leases(), vec![((0.into(), 1), LeaseData { leaser: 1, amount: 5 }),]); + assert_eq!(TestLeaser::deposit_held(0.into(), &1), 5); + + assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 2)); + assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 1.into(), 2, 2, 2, 6)); + // 6 reserved since we are bidding on a new para; only works because we don't + assert_eq!(Balances::reserved_balance(1), 6); + run_to_block(20); + + assert_eq!( + leases(), + vec![ + ((0.into(), 1), LeaseData { leaser: 1, amount: 5 }), + ((1.into(), 2), LeaseData { leaser: 1, amount: 6 }), + ] + ); + assert_eq!(TestLeaser::deposit_held(0.into(), &1), 5); + assert_eq!(TestLeaser::deposit_held(1.into(), &1), 6); + }); + } + + #[test] + fn multiple_bids_work_pre_ending() { + new_test_ext().execute_with(|| { + run_to_block(1); + + assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1)); + + for i in 1..6u64 { + run_to_block(i as _); + assert_ok!(Auctions::bid(RuntimeOrigin::signed(i), 0.into(), 1, 1, 4, i)); + for j in 1..6 { + assert_eq!(Balances::reserved_balance(j), if j == i { j } else { 0 }); + assert_eq!(Balances::free_balance(j), if j == i { j * 9 } else { j * 10 }); + } + } + + run_to_block(9); + assert_eq!( + leases(), + vec![ + ((0.into(), 1), LeaseData { leaser: 5, amount: 5 }), + ((0.into(), 2), LeaseData { leaser: 5, amount: 5 }), + ((0.into(), 3), LeaseData { leaser: 5, amount: 5 }), + ((0.into(), 4), LeaseData { leaser: 5, amount: 5 }), + ] + ); + }); + } + + #[test] + fn multiple_bids_work_post_ending() { + new_test_ext().execute_with(|| { + run_to_block(1); + + assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 0, 1)); + + for i in 1..6u64 { + run_to_block(((i - 1) / 2 + 1) as _); + assert_ok!(Auctions::bid(RuntimeOrigin::signed(i), 0.into(), 1, 1, 4, i)); + for j in 1..6 { + assert_eq!(Balances::reserved_balance(j), if j <= i { j } else { 0 }); + assert_eq!(Balances::free_balance(j), if j <= i { j * 9 } else { j * 10 }); + } + } + for i in 1..6u64 { + assert_eq!(ReservedAmounts::::get((i, ParaId::from(0))).unwrap(), i); + } + + run_to_block(5); + assert_eq!( + leases(), + (1..=4) + .map(|i| ((0.into(), i), LeaseData { leaser: 2, amount: 2 })) + .collect::>() + ); + }); + } + + #[test] + fn incomplete_calculate_winners_works() { + let mut winning = [None; SlotRange::SLOT_RANGE_COUNT]; + winning[SlotRange::ThreeThree as u8 as usize] = Some((1, 0.into(), 1)); + + let winners = vec![(1, 0.into(), 1, SlotRange::ThreeThree)]; + + assert_eq!(Auctions::calculate_winners(winning), winners); + } + + #[test] + fn first_incomplete_calculate_winners_works() { + let mut winning = [None; SlotRange::SLOT_RANGE_COUNT]; + winning[0] = Some((1, 0.into(), 1)); + + let winners = vec![(1, 0.into(), 1, SlotRange::ZeroZero)]; + + assert_eq!(Auctions::calculate_winners(winning), winners); + } + + #[test] + fn calculate_winners_works() { + let mut winning = [None; SlotRange::SLOT_RANGE_COUNT]; + winning[SlotRange::ZeroZero as u8 as usize] = Some((2, 0.into(), 2)); + winning[SlotRange::ZeroThree as u8 as usize] = Some((1, 100.into(), 1)); + winning[SlotRange::OneOne as u8 as usize] = Some((3, 1.into(), 1)); + winning[SlotRange::TwoTwo as u8 as usize] = Some((1, 2.into(), 53)); + winning[SlotRange::ThreeThree as u8 as usize] = Some((5, 3.into(), 1)); + + let winners = vec![ + (2, 0.into(), 2, SlotRange::ZeroZero), + (3, 1.into(), 1, SlotRange::OneOne), + (1, 2.into(), 53, SlotRange::TwoTwo), + (5, 3.into(), 1, SlotRange::ThreeThree), + ]; + assert_eq!(Auctions::calculate_winners(winning), winners); + + winning[SlotRange::ZeroOne as u8 as usize] = Some((4, 10.into(), 3)); + let winners = vec![ + (4, 10.into(), 3, SlotRange::ZeroOne), + (1, 2.into(), 53, SlotRange::TwoTwo), + (5, 3.into(), 1, SlotRange::ThreeThree), + ]; + assert_eq!(Auctions::calculate_winners(winning), winners); + + winning[SlotRange::ZeroThree as u8 as usize] = Some((1, 100.into(), 100)); + let winners = vec![(1, 100.into(), 100, SlotRange::ZeroThree)]; + assert_eq!(Auctions::calculate_winners(winning), winners); + } + + #[test] + fn lower_bids_are_correctly_refunded() { + new_test_ext().execute_with(|| { + run_to_block(1); + assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 1, 1)); + let para_1 = ParaId::from(1_u32); + let para_2 = ParaId::from(2_u32); + + // Make a bid and reserve a balance + assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), para_1, 1, 1, 4, 9)); + assert_eq!(Balances::reserved_balance(1), 9); + assert_eq!(ReservedAmounts::::get((1, para_1)), Some(9)); + assert_eq!(Balances::reserved_balance(2), 0); + assert_eq!(ReservedAmounts::::get((2, para_2)), None); + + // Bigger bid, reserves new balance and returns funds + assert_ok!(Auctions::bid(RuntimeOrigin::signed(2), para_2, 1, 1, 4, 19)); + assert_eq!(Balances::reserved_balance(1), 0); + assert_eq!(ReservedAmounts::::get((1, para_1)), None); + assert_eq!(Balances::reserved_balance(2), 19); + assert_eq!(ReservedAmounts::::get((2, para_2)), Some(19)); + }); + } + + #[test] + fn initialize_winners_in_ending_period_works() { + new_test_ext().execute_with(|| { + let ed: u64 = ::ExistentialDeposit::get(); + assert_eq!(ed, 1); + run_to_block(1); + assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 9, 1)); + let para_1 = ParaId::from(1_u32); + let para_2 = ParaId::from(2_u32); + let para_3 = ParaId::from(3_u32); + + // Make bids + assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), para_1, 1, 1, 4, 9)); + assert_ok!(Auctions::bid(RuntimeOrigin::signed(2), para_2, 1, 3, 4, 19)); + + assert_eq!( + Auctions::auction_status(System::block_number()), + AuctionStatus::::StartingPeriod + ); + let mut winning = [None; SlotRange::SLOT_RANGE_COUNT]; + winning[SlotRange::ZeroThree as u8 as usize] = Some((1, para_1, 9)); + winning[SlotRange::TwoThree as u8 as usize] = Some((2, para_2, 19)); + assert_eq!(Winning::::get(0), Some(winning)); + + run_to_block(9); + assert_eq!( + Auctions::auction_status(System::block_number()), + AuctionStatus::::StartingPeriod + ); + + run_to_block(10); + assert_eq!( + Auctions::auction_status(System::block_number()), + AuctionStatus::::EndingPeriod(0, 0) + ); + assert_eq!(Winning::::get(0), Some(winning)); + + run_to_block(11); + assert_eq!( + Auctions::auction_status(System::block_number()), + AuctionStatus::::EndingPeriod(1, 0) + ); + assert_eq!(Winning::::get(1), Some(winning)); + assert_ok!(Auctions::bid(RuntimeOrigin::signed(3), para_3, 1, 3, 4, 29)); + + run_to_block(12); + assert_eq!( + Auctions::auction_status(System::block_number()), + AuctionStatus::::EndingPeriod(2, 0) + ); + winning[SlotRange::TwoThree as u8 as usize] = Some((3, para_3, 29)); + assert_eq!(Winning::::get(2), Some(winning)); + }); + } + + #[test] + fn handle_bid_requires_registered_para() { + new_test_ext().execute_with(|| { + run_to_block(1); + assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1)); + assert_noop!( + Auctions::bid(RuntimeOrigin::signed(1), 1337.into(), 1, 1, 4, 1), + Error::::ParaNotRegistered + ); + assert_ok!(TestRegistrar::::register( + 1, + 1337.into(), + dummy_head_data(), + dummy_validation_code() + )); + assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 1337.into(), 1, 1, 4, 1)); + }); + } + + #[test] + fn handle_bid_checks_existing_lease_periods() { + new_test_ext().execute_with(|| { + run_to_block(1); + assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1)); + assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 1, 2, 3, 1)); + assert_eq!(Balances::reserved_balance(1), 1); + assert_eq!(Balances::free_balance(1), 9); + run_to_block(9); + + assert_eq!( + leases(), + vec![ + ((0.into(), 2), LeaseData { leaser: 1, amount: 1 }), + ((0.into(), 3), LeaseData { leaser: 1, amount: 1 }), + ] + ); + assert_eq!(TestLeaser::deposit_held(0.into(), &1), 1); + + // Para 1 just won an auction above and won some lease periods. + // No bids can work which overlap these periods. + assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1)); + assert_noop!( + Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 2, 1, 4, 1), + Error::::AlreadyLeasedOut, + ); + assert_noop!( + Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 2, 1, 2, 1), + Error::::AlreadyLeasedOut, + ); + assert_noop!( + Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 2, 3, 4, 1), + Error::::AlreadyLeasedOut, + ); + // This is okay, not an overlapping bid. + assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 2, 1, 1, 1)); + }); + } + + // Here we will test that taking only 10 samples during the ending period works as expected. + #[test] + fn less_winning_samples_work() { + new_test_ext().execute_with(|| { + let ed: u64 = ::ExistentialDeposit::get(); + assert_eq!(ed, 1); + EndingPeriod::set(30); + SampleLength::set(10); + + run_to_block(1); + assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 9, 11)); + let para_1 = ParaId::from(1_u32); + let para_2 = ParaId::from(2_u32); + let para_3 = ParaId::from(3_u32); + + // Make bids + assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), para_1, 1, 11, 14, 9)); + assert_ok!(Auctions::bid(RuntimeOrigin::signed(2), para_2, 1, 13, 14, 19)); + + assert_eq!( + Auctions::auction_status(System::block_number()), + AuctionStatus::::StartingPeriod + ); + let mut winning = [None; SlotRange::SLOT_RANGE_COUNT]; + winning[SlotRange::ZeroThree as u8 as usize] = Some((1, para_1, 9)); + winning[SlotRange::TwoThree as u8 as usize] = Some((2, para_2, 19)); + assert_eq!(Winning::::get(0), Some(winning)); + + run_to_block(9); + assert_eq!( + Auctions::auction_status(System::block_number()), + AuctionStatus::::StartingPeriod + ); + + run_to_block(10); + assert_eq!( + Auctions::auction_status(System::block_number()), + AuctionStatus::::EndingPeriod(0, 0) + ); + assert_eq!(Winning::::get(0), Some(winning)); + + // New bids update the current winning + assert_ok!(Auctions::bid(RuntimeOrigin::signed(3), para_3, 1, 14, 14, 29)); + winning[SlotRange::ThreeThree as u8 as usize] = Some((3, para_3, 29)); + assert_eq!(Winning::::get(0), Some(winning)); + + run_to_block(20); + assert_eq!( + Auctions::auction_status(System::block_number()), + AuctionStatus::::EndingPeriod(1, 0) + ); + assert_eq!(Winning::::get(1), Some(winning)); + run_to_block(25); + // Overbid mid sample + assert_ok!(Auctions::bid(RuntimeOrigin::signed(3), para_3, 1, 13, 14, 29)); + winning[SlotRange::TwoThree as u8 as usize] = Some((3, para_3, 29)); + assert_eq!(Winning::::get(1), Some(winning)); + + run_to_block(30); + assert_eq!( + Auctions::auction_status(System::block_number()), + AuctionStatus::::EndingPeriod(2, 0) + ); + assert_eq!(Winning::::get(2), Some(winning)); + + set_last_random(H256::from([254; 32]), 40); + run_to_block(40); + // Auction ended and winner selected + assert_eq!( + Auctions::auction_status(System::block_number()), + AuctionStatus::::NotStarted + ); + assert_eq!( + leases(), + vec![ + ((3.into(), 13), LeaseData { leaser: 3, amount: 29 }), + ((3.into(), 14), LeaseData { leaser: 3, amount: 29 }), + ] + ); + }); + } + + #[test] + fn auction_status_works() { + new_test_ext().execute_with(|| { + EndingPeriod::set(30); + SampleLength::set(10); + set_last_random(dummy_hash(), 0); + + assert_eq!( + Auctions::auction_status(System::block_number()), + AuctionStatus::::NotStarted + ); + + run_to_block(1); + assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 9, 11)); + + run_to_block(9); + assert_eq!( + Auctions::auction_status(System::block_number()), + AuctionStatus::::StartingPeriod + ); + + run_to_block(10); + assert_eq!( + Auctions::auction_status(System::block_number()), + AuctionStatus::::EndingPeriod(0, 0) + ); + + run_to_block(11); + assert_eq!( + Auctions::auction_status(System::block_number()), + AuctionStatus::::EndingPeriod(0, 1) + ); + + run_to_block(19); + assert_eq!( + Auctions::auction_status(System::block_number()), + AuctionStatus::::EndingPeriod(0, 9) + ); + + run_to_block(20); + assert_eq!( + Auctions::auction_status(System::block_number()), + AuctionStatus::::EndingPeriod(1, 0) + ); + + run_to_block(25); + assert_eq!( + Auctions::auction_status(System::block_number()), + AuctionStatus::::EndingPeriod(1, 5) + ); + + run_to_block(30); + assert_eq!( + Auctions::auction_status(System::block_number()), + AuctionStatus::::EndingPeriod(2, 0) + ); + + run_to_block(39); + assert_eq!( + Auctions::auction_status(System::block_number()), + AuctionStatus::::EndingPeriod(2, 9) + ); + + run_to_block(40); + assert_eq!( + Auctions::auction_status(System::block_number()), + AuctionStatus::::VrfDelay(0) + ); + + run_to_block(44); + assert_eq!( + Auctions::auction_status(System::block_number()), + AuctionStatus::::VrfDelay(4) + ); + + set_last_random(dummy_hash(), 45); + run_to_block(45); + assert_eq!( + Auctions::auction_status(System::block_number()), + AuctionStatus::::NotStarted + ); + }); + } + + #[test] + fn can_cancel_auction() { + new_test_ext().execute_with(|| { + run_to_block(1); + assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1)); + assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 1, 1, 4, 1)); + assert_eq!(Balances::reserved_balance(1), 1); + assert_eq!(Balances::free_balance(1), 9); + + assert_noop!(Auctions::cancel_auction(RuntimeOrigin::signed(6)), BadOrigin); + assert_ok!(Auctions::cancel_auction(RuntimeOrigin::root())); + + assert!(AuctionInfo::::get().is_none()); + assert_eq!(Balances::reserved_balance(1), 0); + assert_eq!(ReservedAmounts::::iter().count(), 0); + assert_eq!(Winning::::iter().count(), 0); + }); + } +} + +#[cfg(feature = "runtime-benchmarks")] +mod benchmarking { + use super::{Pallet as Auctions, *}; + use frame_support::{ + assert_ok, + traits::{EnsureOrigin, OnInitialize}, + }; + use frame_system::RawOrigin; + use polkadot_runtime_parachains::paras; + use sp_runtime::{traits::Bounded, SaturatedConversion}; + + use frame_benchmarking::{account, benchmarks, whitelisted_caller, BenchmarkError}; + + fn assert_last_event(generic_event: ::RuntimeEvent) { + let events = frame_system::Pallet::::events(); + let system_event: ::RuntimeEvent = generic_event.into(); + // compare to the last event record + let frame_system::EventRecord { event, .. } = &events[events.len() - 1]; + assert_eq!(event, &system_event); + } + + fn fill_winners(lease_period_index: LeasePeriodOf) { + let auction_index = AuctionCounter::::get(); + let minimum_balance = CurrencyOf::::minimum_balance(); + + for n in 1..=SlotRange::SLOT_RANGE_COUNT as u32 { + let owner = account("owner", n, 0); + let worst_validation_code = T::Registrar::worst_validation_code(); + let worst_head_data = T::Registrar::worst_head_data(); + CurrencyOf::::make_free_balance_be(&owner, BalanceOf::::max_value()); + + assert!(T::Registrar::register( + owner, + ParaId::from(n), + worst_head_data, + worst_validation_code + ) + .is_ok()); + } + assert_ok!(paras::Pallet::::add_trusted_validation_code( + frame_system::Origin::::Root.into(), + T::Registrar::worst_validation_code(), + )); + + T::Registrar::execute_pending_transitions(); + + for n in 1..=SlotRange::SLOT_RANGE_COUNT as u32 { + let bidder = account("bidder", n, 0); + CurrencyOf::::make_free_balance_be(&bidder, BalanceOf::::max_value()); + + let slot_range = SlotRange::n((n - 1) as u8).unwrap(); + let (start, end) = slot_range.as_pair(); + + assert!(Auctions::::bid( + RawOrigin::Signed(bidder).into(), + ParaId::from(n), + auction_index, + lease_period_index + start.into(), // First Slot + lease_period_index + end.into(), // Last slot + minimum_balance.saturating_mul(n.into()), // Amount + ) + .is_ok()); + } + } + + benchmarks! { + where_clause { where T: pallet_babe::Config + paras::Config } + + new_auction { + let duration = BlockNumberFor::::max_value(); + let lease_period_index = LeasePeriodOf::::max_value(); + let origin = + T::InitiateOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; + }: _(origin, duration, lease_period_index) + verify { + assert_last_event::(Event::::AuctionStarted { + auction_index: AuctionCounter::::get(), + lease_period: LeasePeriodOf::::max_value(), + ending: BlockNumberFor::::max_value(), + }.into()); + } + + // Worst case scenario a new bid comes in which kicks out an existing bid for the same slot. + bid { + // If there is an offset, we need to be on that block to be able to do lease things. + let (_, offset) = T::Leaser::lease_period_length(); + frame_system::Pallet::::set_block_number(offset + One::one()); + + // Create a new auction + let duration = BlockNumberFor::::max_value(); + let lease_period_index = LeasePeriodOf::::zero(); + let origin = T::InitiateOrigin::try_successful_origin() + .expect("InitiateOrigin has no successful origin required for the benchmark"); + Auctions::::new_auction(origin, duration, lease_period_index)?; + + let para = ParaId::from(0); + let new_para = ParaId::from(1_u32); + + // Register the paras + let owner = account("owner", 0, 0); + CurrencyOf::::make_free_balance_be(&owner, BalanceOf::::max_value()); + let worst_head_data = T::Registrar::worst_head_data(); + let worst_validation_code = T::Registrar::worst_validation_code(); + T::Registrar::register(owner.clone(), para, worst_head_data.clone(), worst_validation_code.clone())?; + T::Registrar::register(owner, new_para, worst_head_data, worst_validation_code.clone())?; + assert_ok!(paras::Pallet::::add_trusted_validation_code( + frame_system::Origin::::Root.into(), + worst_validation_code, + )); + + T::Registrar::execute_pending_transitions(); + + // Make an existing bid + let auction_index = AuctionCounter::::get(); + let first_slot = AuctionInfo::::get().unwrap().0; + let last_slot = first_slot + 3u32.into(); + let first_amount = CurrencyOf::::minimum_balance(); + let first_bidder: T::AccountId = account("first_bidder", 0, 0); + CurrencyOf::::make_free_balance_be(&first_bidder, BalanceOf::::max_value()); + Auctions::::bid( + RawOrigin::Signed(first_bidder.clone()).into(), + para, + auction_index, + first_slot, + last_slot, + first_amount, + )?; + + let caller: T::AccountId = whitelisted_caller(); + CurrencyOf::::make_free_balance_be(&caller, BalanceOf::::max_value()); + let bigger_amount = CurrencyOf::::minimum_balance().saturating_mul(10u32.into()); + assert_eq!(CurrencyOf::::reserved_balance(&first_bidder), first_amount); + }: _(RawOrigin::Signed(caller.clone()), new_para, auction_index, first_slot, last_slot, bigger_amount) + verify { + // Confirms that we unreserved funds from a previous bidder, which is worst case scenario. + assert_eq!(CurrencyOf::::reserved_balance(&caller), bigger_amount); + } + + // Worst case: 10 bidders taking all wining spots, and we need to calculate the winner for auction end. + // Entire winner map should be full and removed at the end of the benchmark. + on_initialize { + // If there is an offset, we need to be on that block to be able to do lease things. + let (lease_length, offset) = T::Leaser::lease_period_length(); + frame_system::Pallet::::set_block_number(offset + One::one()); + + // Create a new auction + let duration: BlockNumberFor = lease_length / 2u32.into(); + let lease_period_index = LeasePeriodOf::::zero(); + let now = frame_system::Pallet::::block_number(); + let origin = T::InitiateOrigin::try_successful_origin() + .expect("InitiateOrigin has no successful origin required for the benchmark"); + Auctions::::new_auction(origin, duration, lease_period_index)?; + + fill_winners::(lease_period_index); + + for winner in Winning::::get(BlockNumberFor::::from(0u32)).unwrap().iter() { + assert!(winner.is_some()); + } + + let winning_data = Winning::::get(BlockNumberFor::::from(0u32)).unwrap(); + // Make winning map full + for i in 0u32 .. (T::EndingPeriod::get() / T::SampleLength::get()).saturated_into() { + Winning::::insert(BlockNumberFor::::from(i), winning_data.clone()); + } + + // Move ahead to the block we want to initialize + frame_system::Pallet::::set_block_number(duration + now + T::EndingPeriod::get()); + + // Trigger epoch change for new random number value: + { + pallet_babe::EpochStart::::set((Zero::zero(), u32::MAX.into())); + pallet_babe::Pallet::::on_initialize(duration + now + T::EndingPeriod::get()); + let authorities = pallet_babe::Pallet::::authorities(); + // Check for non empty authority set since it otherwise emits a No-OP warning. + if !authorities.is_empty() { + pallet_babe::Pallet::::enact_epoch_change(authorities.clone(), authorities, None); + } + } + + }: { + Auctions::::on_initialize(duration + now + T::EndingPeriod::get()); + } verify { + let auction_index = AuctionCounter::::get(); + assert_last_event::(Event::::AuctionClosed { auction_index }.into()); + assert!(Winning::::iter().count().is_zero()); + } + + // Worst case: 10 bidders taking all wining spots, and winning data is full. + cancel_auction { + // If there is an offset, we need to be on that block to be able to do lease things. + let (lease_length, offset) = T::Leaser::lease_period_length(); + frame_system::Pallet::::set_block_number(offset + One::one()); + + // Create a new auction + let duration: BlockNumberFor = lease_length / 2u32.into(); + let lease_period_index = LeasePeriodOf::::zero(); + let now = frame_system::Pallet::::block_number(); + let origin = T::InitiateOrigin::try_successful_origin() + .expect("InitiateOrigin has no successful origin required for the benchmark"); + Auctions::::new_auction(origin, duration, lease_period_index)?; + + fill_winners::(lease_period_index); + + let winning_data = Winning::::get(BlockNumberFor::::from(0u32)).unwrap(); + for winner in winning_data.iter() { + assert!(winner.is_some()); + } + + // Make winning map full + for i in 0u32 .. (T::EndingPeriod::get() / T::SampleLength::get()).saturated_into() { + Winning::::insert(BlockNumberFor::::from(i), winning_data.clone()); + } + assert!(AuctionInfo::::get().is_some()); + }: _(RawOrigin::Root) + verify { + assert!(AuctionInfo::::get().is_none()); + } + + impl_benchmark_test_suite!( + Auctions, + crate::integration_tests::new_test_ext(), + crate::integration_tests::Test, + ); + } +} diff --git a/polkadot/runtime/common/src/auctions/benchmarking.rs b/polkadot/runtime/common/src/auctions/benchmarking.rs deleted file mode 100644 index 6d52cd850b6f..000000000000 --- a/polkadot/runtime/common/src/auctions/benchmarking.rs +++ /dev/null @@ -1,282 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -//! Benchmarking for auctions pallet - -#![cfg(feature = "runtime-benchmarks")] -use super::{Pallet as Auctions, *}; -use frame_support::{ - assert_ok, - traits::{EnsureOrigin, OnInitialize}, -}; -use frame_system::RawOrigin; -use polkadot_runtime_parachains::paras; -use sp_runtime::{traits::Bounded, SaturatedConversion}; - -use frame_benchmarking::v2::*; - -fn assert_last_event(generic_event: ::RuntimeEvent) { - let events = frame_system::Pallet::::events(); - let system_event: ::RuntimeEvent = generic_event.into(); - // compare to the last event record - let frame_system::EventRecord { event, .. } = &events[events.len() - 1]; - assert_eq!(event, &system_event); -} - -fn fill_winners(lease_period_index: LeasePeriodOf) { - let auction_index = AuctionCounter::::get(); - let minimum_balance = CurrencyOf::::minimum_balance(); - - for n in 1..=SlotRange::SLOT_RANGE_COUNT as u32 { - let owner = account("owner", n, 0); - let worst_validation_code = T::Registrar::worst_validation_code(); - let worst_head_data = T::Registrar::worst_head_data(); - CurrencyOf::::make_free_balance_be(&owner, BalanceOf::::max_value()); - - assert!(T::Registrar::register( - owner, - ParaId::from(n), - worst_head_data, - worst_validation_code - ) - .is_ok()); - } - assert_ok!(paras::Pallet::::add_trusted_validation_code( - frame_system::Origin::::Root.into(), - T::Registrar::worst_validation_code(), - )); - - T::Registrar::execute_pending_transitions(); - - for n in 1..=SlotRange::SLOT_RANGE_COUNT as u32 { - let bidder = account("bidder", n, 0); - CurrencyOf::::make_free_balance_be(&bidder, BalanceOf::::max_value()); - - let slot_range = SlotRange::n((n - 1) as u8).unwrap(); - let (start, end) = slot_range.as_pair(); - - assert!(Auctions::::bid( - RawOrigin::Signed(bidder).into(), - ParaId::from(n), - auction_index, - lease_period_index + start.into(), // First Slot - lease_period_index + end.into(), // Last slot - minimum_balance.saturating_mul(n.into()), // Amount - ) - .is_ok()); - } -} - -#[benchmarks( - where T: pallet_babe::Config + paras::Config, - )] -mod benchmarks { - use super::*; - - #[benchmark] - fn new_auction() -> Result<(), BenchmarkError> { - let duration = BlockNumberFor::::max_value(); - let lease_period_index = LeasePeriodOf::::max_value(); - let origin = - T::InitiateOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; - - #[extrinsic_call] - _(origin as T::RuntimeOrigin, duration, lease_period_index); - - assert_last_event::( - Event::::AuctionStarted { - auction_index: AuctionCounter::::get(), - lease_period: LeasePeriodOf::::max_value(), - ending: BlockNumberFor::::max_value(), - } - .into(), - ); - - Ok(()) - } - - // Worst case scenario a new bid comes in which kicks out an existing bid for the same slot. - #[benchmark] - fn bid() -> Result<(), BenchmarkError> { - // If there is an offset, we need to be on that block to be able to do lease things. - let (_, offset) = T::Leaser::lease_period_length(); - frame_system::Pallet::::set_block_number(offset + One::one()); - - // Create a new auction - let duration = BlockNumberFor::::max_value(); - let lease_period_index = LeasePeriodOf::::zero(); - let origin = T::InitiateOrigin::try_successful_origin() - .expect("InitiateOrigin has no successful origin required for the benchmark"); - Auctions::::new_auction(origin, duration, lease_period_index)?; - - let para = ParaId::from(0); - let new_para = ParaId::from(1_u32); - - // Register the paras - let owner = account("owner", 0, 0); - CurrencyOf::::make_free_balance_be(&owner, BalanceOf::::max_value()); - let worst_head_data = T::Registrar::worst_head_data(); - let worst_validation_code = T::Registrar::worst_validation_code(); - T::Registrar::register( - owner.clone(), - para, - worst_head_data.clone(), - worst_validation_code.clone(), - )?; - T::Registrar::register(owner, new_para, worst_head_data, worst_validation_code.clone())?; - assert_ok!(paras::Pallet::::add_trusted_validation_code( - frame_system::Origin::::Root.into(), - worst_validation_code, - )); - - T::Registrar::execute_pending_transitions(); - - // Make an existing bid - let auction_index = AuctionCounter::::get(); - let first_slot = AuctionInfo::::get().unwrap().0; - let last_slot = first_slot + 3u32.into(); - let first_amount = CurrencyOf::::minimum_balance(); - let first_bidder: T::AccountId = account("first_bidder", 0, 0); - CurrencyOf::::make_free_balance_be(&first_bidder, BalanceOf::::max_value()); - Auctions::::bid( - RawOrigin::Signed(first_bidder.clone()).into(), - para, - auction_index, - first_slot, - last_slot, - first_amount, - )?; - - let caller: T::AccountId = whitelisted_caller(); - CurrencyOf::::make_free_balance_be(&caller, BalanceOf::::max_value()); - let bigger_amount = CurrencyOf::::minimum_balance().saturating_mul(10u32.into()); - assert_eq!(CurrencyOf::::reserved_balance(&first_bidder), first_amount); - - #[extrinsic_call] - _( - RawOrigin::Signed(caller.clone()), - new_para, - auction_index, - first_slot, - last_slot, - bigger_amount, - ); - - // Confirms that we unreserved funds from a previous bidder, which is worst case - // scenario. - assert_eq!(CurrencyOf::::reserved_balance(&caller), bigger_amount); - - Ok(()) - } - - // Worst case: 10 bidders taking all wining spots, and we need to calculate the winner for - // auction end. Entire winner map should be full and removed at the end of the benchmark. - #[benchmark] - fn on_initialize() -> Result<(), BenchmarkError> { - // If there is an offset, we need to be on that block to be able to do lease things. - let (lease_length, offset) = T::Leaser::lease_period_length(); - frame_system::Pallet::::set_block_number(offset + One::one()); - - // Create a new auction - let duration: BlockNumberFor = lease_length / 2u32.into(); - let lease_period_index = LeasePeriodOf::::zero(); - let now = frame_system::Pallet::::block_number(); - let origin = T::InitiateOrigin::try_successful_origin() - .expect("InitiateOrigin has no successful origin required for the benchmark"); - Auctions::::new_auction(origin, duration, lease_period_index)?; - - fill_winners::(lease_period_index); - - for winner in Winning::::get(BlockNumberFor::::from(0u32)).unwrap().iter() { - assert!(winner.is_some()); - } - - let winning_data = Winning::::get(BlockNumberFor::::from(0u32)).unwrap(); - // Make winning map full - for i in 0u32..(T::EndingPeriod::get() / T::SampleLength::get()).saturated_into() { - Winning::::insert(BlockNumberFor::::from(i), winning_data.clone()); - } - - // Move ahead to the block we want to initialize - frame_system::Pallet::::set_block_number(duration + now + T::EndingPeriod::get()); - - // Trigger epoch change for new random number value: - { - pallet_babe::EpochStart::::set((Zero::zero(), u32::MAX.into())); - pallet_babe::Pallet::::on_initialize(duration + now + T::EndingPeriod::get()); - let authorities = pallet_babe::Pallet::::authorities(); - // Check for non empty authority set since it otherwise emits a No-OP warning. - if !authorities.is_empty() { - pallet_babe::Pallet::::enact_epoch_change( - authorities.clone(), - authorities, - None, - ); - } - } - - #[block] - { - Auctions::::on_initialize(duration + now + T::EndingPeriod::get()); - } - - let auction_index = AuctionCounter::::get(); - assert_last_event::(Event::::AuctionClosed { auction_index }.into()); - assert!(Winning::::iter().count().is_zero()); - - Ok(()) - } - - // Worst case: 10 bidders taking all wining spots, and winning data is full. - #[benchmark] - fn cancel_auction() -> Result<(), BenchmarkError> { - // If there is an offset, we need to be on that block to be able to do lease things. - let (lease_length, offset) = T::Leaser::lease_period_length(); - frame_system::Pallet::::set_block_number(offset + One::one()); - - // Create a new auction - let duration: BlockNumberFor = lease_length / 2u32.into(); - let lease_period_index = LeasePeriodOf::::zero(); - let origin = T::InitiateOrigin::try_successful_origin() - .expect("InitiateOrigin has no successful origin required for the benchmark"); - Auctions::::new_auction(origin, duration, lease_period_index)?; - - fill_winners::(lease_period_index); - - let winning_data = Winning::::get(BlockNumberFor::::from(0u32)).unwrap(); - for winner in winning_data.iter() { - assert!(winner.is_some()); - } - - // Make winning map full - for i in 0u32..(T::EndingPeriod::get() / T::SampleLength::get()).saturated_into() { - Winning::::insert(BlockNumberFor::::from(i), winning_data.clone()); - } - assert!(AuctionInfo::::get().is_some()); - - #[extrinsic_call] - _(RawOrigin::Root); - - assert!(AuctionInfo::::get().is_none()); - Ok(()) - } - - impl_benchmark_test_suite!( - Auctions, - crate::integration_tests::new_test_ext(), - crate::integration_tests::Test, - ); -} diff --git a/polkadot/runtime/common/src/auctions/mock.rs b/polkadot/runtime/common/src/auctions/mock.rs deleted file mode 100644 index 9fe19e579cfa..000000000000 --- a/polkadot/runtime/common/src/auctions/mock.rs +++ /dev/null @@ -1,258 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -//! Mocking utilities for testing in auctions pallet. - -#[cfg(test)] -use super::*; -use crate::{auctions, mock::TestRegistrar}; -use frame_support::{ - assert_ok, derive_impl, ord_parameter_types, parameter_types, - traits::{EitherOfDiverse, OnFinalize, OnInitialize}, -}; -use frame_system::{EnsureRoot, EnsureSignedBy}; -use pallet_balances; -use polkadot_primitives::{BlockNumber, Id as ParaId}; -use polkadot_primitives_test_helpers::{dummy_head_data, dummy_validation_code}; -use sp_core::H256; -use sp_runtime::{ - traits::{BlakeTwo256, IdentityLookup}, - BuildStorage, -}; -use std::{cell::RefCell, collections::BTreeMap}; - -type Block = frame_system::mocking::MockBlockU32; - -frame_support::construct_runtime!( - pub enum Test - { - System: frame_system, - Balances: pallet_balances, - Auctions: auctions, - } -); - -#[derive_impl(frame_system::config_preludes::TestDefaultConfig)] -impl frame_system::Config for Test { - type BaseCallFilter = frame_support::traits::Everything; - type BlockWeights = (); - type BlockLength = (); - type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; - type RuntimeCall = RuntimeCall; - type Nonce = u64; - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; - type Block = Block; - type RuntimeEvent = RuntimeEvent; - type Version = (); - type PalletInfo = PalletInfo; - type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - type SystemWeightInfo = (); - type SS58Prefix = (); - type OnSetCode = (); - type MaxConsumers = frame_support::traits::ConstU32<16>; -} - -#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] -impl pallet_balances::Config for Test { - type AccountStore = System; -} - -#[derive(Eq, PartialEq, Ord, PartialOrd, Clone, Copy, Debug)] -pub struct LeaseData { - pub leaser: u64, - pub amount: u64, -} - -thread_local! { - pub static LEASES: - RefCell> = RefCell::new(BTreeMap::new()); -} - -pub fn leases() -> Vec<((ParaId, BlockNumber), LeaseData)> { - LEASES.with(|p| (&*p.borrow()).clone().into_iter().collect::>()) -} - -pub struct TestLeaser; -impl Leaser for TestLeaser { - type AccountId = u64; - type LeasePeriod = BlockNumber; - type Currency = Balances; - - fn lease_out( - para: ParaId, - leaser: &Self::AccountId, - amount: >::Balance, - period_begin: Self::LeasePeriod, - period_count: Self::LeasePeriod, - ) -> Result<(), LeaseError> { - LEASES.with(|l| { - let mut leases = l.borrow_mut(); - let now = System::block_number(); - let (current_lease_period, _) = - Self::lease_period_index(now).ok_or(LeaseError::NoLeasePeriod)?; - if period_begin < current_lease_period { - return Err(LeaseError::AlreadyEnded); - } - for period in period_begin..(period_begin + period_count) { - if leases.contains_key(&(para, period)) { - return Err(LeaseError::AlreadyLeased); - } - leases.insert((para, period), LeaseData { leaser: *leaser, amount }); - } - Ok(()) - }) - } - - fn deposit_held( - para: ParaId, - leaser: &Self::AccountId, - ) -> >::Balance { - leases() - .iter() - .filter_map(|((id, _period), data)| { - if id == ¶ && &data.leaser == leaser { - Some(data.amount) - } else { - None - } - }) - .max() - .unwrap_or_default() - } - - fn lease_period_length() -> (BlockNumber, BlockNumber) { - (10, 0) - } - - fn lease_period_index(b: BlockNumber) -> Option<(Self::LeasePeriod, bool)> { - let (lease_period_length, offset) = Self::lease_period_length(); - let b = b.checked_sub(offset)?; - - let lease_period = b / lease_period_length; - let first_block = (b % lease_period_length).is_zero(); - - Some((lease_period, first_block)) - } - - fn already_leased( - para_id: ParaId, - first_period: Self::LeasePeriod, - last_period: Self::LeasePeriod, - ) -> bool { - leases().into_iter().any(|((para, period), _data)| { - para == para_id && first_period <= period && period <= last_period - }) - } -} - -ord_parameter_types! { - pub const Six: u64 = 6; -} - -type RootOrSix = EitherOfDiverse, EnsureSignedBy>; - -thread_local! { - pub static LAST_RANDOM: RefCell> = RefCell::new(None); -} -pub fn set_last_random(output: H256, known_since: u32) { - LAST_RANDOM.with(|p| *p.borrow_mut() = Some((output, known_since))) -} -pub struct TestPastRandomness; -impl Randomness for TestPastRandomness { - fn random(_subject: &[u8]) -> (H256, u32) { - LAST_RANDOM.with(|p| { - if let Some((output, known_since)) = &*p.borrow() { - (*output, *known_since) - } else { - (H256::zero(), frame_system::Pallet::::block_number()) - } - }) - } -} - -parameter_types! { - pub static EndingPeriod: BlockNumber = 3; - pub static SampleLength: BlockNumber = 1; -} - -impl Config for Test { - type RuntimeEvent = RuntimeEvent; - type Leaser = TestLeaser; - type Registrar = TestRegistrar; - type EndingPeriod = EndingPeriod; - type SampleLength = SampleLength; - type Randomness = TestPastRandomness; - type InitiateOrigin = RootOrSix; - type WeightInfo = crate::auctions::TestWeightInfo; -} - -// This function basically just builds a genesis storage key/value store according to -// our desired mock up. -pub fn new_test_ext() -> sp_io::TestExternalities { - let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); - pallet_balances::GenesisConfig:: { - balances: vec![(1, 10), (2, 20), (3, 30), (4, 40), (5, 50), (6, 60)], - } - .assimilate_storage(&mut t) - .unwrap(); - let mut ext: sp_io::TestExternalities = t.into(); - ext.execute_with(|| { - // Register para 0, 1, 2, and 3 for tests - assert_ok!(TestRegistrar::::register( - 1, - 0.into(), - dummy_head_data(), - dummy_validation_code() - )); - assert_ok!(TestRegistrar::::register( - 1, - 1.into(), - dummy_head_data(), - dummy_validation_code() - )); - assert_ok!(TestRegistrar::::register( - 1, - 2.into(), - dummy_head_data(), - dummy_validation_code() - )); - assert_ok!(TestRegistrar::::register( - 1, - 3.into(), - dummy_head_data(), - dummy_validation_code() - )); - }); - ext -} - -pub fn run_to_block(n: BlockNumber) { - while System::block_number() < n { - Auctions::on_finalize(System::block_number()); - Balances::on_finalize(System::block_number()); - System::on_finalize(System::block_number()); - System::set_block_number(System::block_number() + 1); - System::on_initialize(System::block_number()); - Balances::on_initialize(System::block_number()); - Auctions::on_initialize(System::block_number()); - } -} diff --git a/polkadot/runtime/common/src/auctions/mod.rs b/polkadot/runtime/common/src/auctions/mod.rs deleted file mode 100644 index 84d8a3846d40..000000000000 --- a/polkadot/runtime/common/src/auctions/mod.rs +++ /dev/null @@ -1,677 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -//! Auctioning system to determine the set of Parachains in operation. This includes logic for the -//! auctioning mechanism and for reserving balance as part of the "payment". Unreserving the balance -//! happens elsewhere. - -use crate::{ - slot_range::SlotRange, - traits::{AuctionStatus, Auctioneer, LeaseError, Leaser, Registrar}, -}; -use alloc::{vec, vec::Vec}; -use codec::Decode; -use core::mem::swap; -use frame_support::{ - dispatch::DispatchResult, - ensure, - traits::{Currency, Get, Randomness, ReservableCurrency}, - weights::Weight, -}; -use frame_system::pallet_prelude::BlockNumberFor; -pub use pallet::*; -use polkadot_primitives::Id as ParaId; -use sp_runtime::traits::{CheckedSub, One, Saturating, Zero}; - -type CurrencyOf = <::Leaser as Leaser>>::Currency; -type BalanceOf = <<::Leaser as Leaser>>::Currency as Currency< - ::AccountId, ->>::Balance; - -pub trait WeightInfo { - fn new_auction() -> Weight; - fn bid() -> Weight; - fn cancel_auction() -> Weight; - fn on_initialize() -> Weight; -} - -pub struct TestWeightInfo; -impl WeightInfo for TestWeightInfo { - fn new_auction() -> Weight { - Weight::zero() - } - fn bid() -> Weight { - Weight::zero() - } - fn cancel_auction() -> Weight { - Weight::zero() - } - fn on_initialize() -> Weight { - Weight::zero() - } -} - -/// An auction index. We count auctions in this type. -pub type AuctionIndex = u32; - -type LeasePeriodOf = <::Leaser as Leaser>>::LeasePeriod; - -// Winning data type. This encodes the top bidders of each range together with their bid. -type WinningData = [Option<(::AccountId, ParaId, BalanceOf)>; - SlotRange::SLOT_RANGE_COUNT]; -// Winners data type. This encodes each of the final winners of a parachain auction, the parachain -// index assigned to them, their winning bid and the range that they won. -type WinnersData = - Vec<(::AccountId, ParaId, BalanceOf, SlotRange)>; - -#[frame_support::pallet] -pub mod pallet { - use super::*; - use frame_support::{dispatch::DispatchClass, pallet_prelude::*, traits::EnsureOrigin}; - use frame_system::{ensure_root, ensure_signed, pallet_prelude::*}; - - #[pallet::pallet] - pub struct Pallet(_); - - /// The module's configuration trait. - #[pallet::config] - pub trait Config: frame_system::Config { - /// The overarching event type. - type RuntimeEvent: From> + IsType<::RuntimeEvent>; - - /// The type representing the leasing system. - type Leaser: Leaser< - BlockNumberFor, - AccountId = Self::AccountId, - LeasePeriod = BlockNumberFor, - >; - - /// The parachain registrar type. - type Registrar: Registrar; - - /// The number of blocks over which an auction may be retroactively ended. - #[pallet::constant] - type EndingPeriod: Get>; - - /// The length of each sample to take during the ending period. - /// - /// `EndingPeriod` / `SampleLength` = Total # of Samples - #[pallet::constant] - type SampleLength: Get>; - - /// Something that provides randomness in the runtime. - type Randomness: Randomness>; - - /// The origin which may initiate auctions. - type InitiateOrigin: EnsureOrigin; - - /// Weight Information for the Extrinsics in the Pallet - type WeightInfo: WeightInfo; - } - - #[pallet::event] - #[pallet::generate_deposit(pub(super) fn deposit_event)] - pub enum Event { - /// An auction started. Provides its index and the block number where it will begin to - /// close and the first lease period of the quadruplet that is auctioned. - AuctionStarted { - auction_index: AuctionIndex, - lease_period: LeasePeriodOf, - ending: BlockNumberFor, - }, - /// An auction ended. All funds become unreserved. - AuctionClosed { auction_index: AuctionIndex }, - /// Funds were reserved for a winning bid. First balance is the extra amount reserved. - /// Second is the total. - Reserved { bidder: T::AccountId, extra_reserved: BalanceOf, total_amount: BalanceOf }, - /// Funds were unreserved since bidder is no longer active. `[bidder, amount]` - Unreserved { bidder: T::AccountId, amount: BalanceOf }, - /// Someone attempted to lease the same slot twice for a parachain. The amount is held in - /// reserve but no parachain slot has been leased. - ReserveConfiscated { para_id: ParaId, leaser: T::AccountId, amount: BalanceOf }, - /// A new bid has been accepted as the current winner. - BidAccepted { - bidder: T::AccountId, - para_id: ParaId, - amount: BalanceOf, - first_slot: LeasePeriodOf, - last_slot: LeasePeriodOf, - }, - /// The winning offset was chosen for an auction. This will map into the `Winning` storage - /// map. - WinningOffset { auction_index: AuctionIndex, block_number: BlockNumberFor }, - } - - #[pallet::error] - pub enum Error { - /// This auction is already in progress. - AuctionInProgress, - /// The lease period is in the past. - LeasePeriodInPast, - /// Para is not registered - ParaNotRegistered, - /// Not a current auction. - NotCurrentAuction, - /// Not an auction. - NotAuction, - /// Auction has already ended. - AuctionEnded, - /// The para is already leased out for part of this range. - AlreadyLeasedOut, - } - - /// Number of auctions started so far. - #[pallet::storage] - pub type AuctionCounter = StorageValue<_, AuctionIndex, ValueQuery>; - - /// Information relating to the current auction, if there is one. - /// - /// The first item in the tuple is the lease period index that the first of the four - /// contiguous lease periods on auction is for. The second is the block number when the - /// auction will "begin to end", i.e. the first block of the Ending Period of the auction. - #[pallet::storage] - pub type AuctionInfo = StorageValue<_, (LeasePeriodOf, BlockNumberFor)>; - - /// Amounts currently reserved in the accounts of the bidders currently winning - /// (sub-)ranges. - #[pallet::storage] - pub type ReservedAmounts = - StorageMap<_, Twox64Concat, (T::AccountId, ParaId), BalanceOf>; - - /// The winning bids for each of the 10 ranges at each sample in the final Ending Period of - /// the current auction. The map's key is the 0-based index into the Sample Size. The - /// first sample of the ending period is 0; the last is `Sample Size - 1`. - #[pallet::storage] - pub type Winning = StorageMap<_, Twox64Concat, BlockNumberFor, WinningData>; - - #[pallet::extra_constants] - impl Pallet { - #[pallet::constant_name(SlotRangeCount)] - fn slot_range_count() -> u32 { - SlotRange::SLOT_RANGE_COUNT as u32 - } - - #[pallet::constant_name(LeasePeriodsPerSlot)] - fn lease_periods_per_slot() -> u32 { - SlotRange::LEASE_PERIODS_PER_SLOT as u32 - } - } - - #[pallet::hooks] - impl Hooks> for Pallet { - fn on_initialize(n: BlockNumberFor) -> Weight { - let mut weight = T::DbWeight::get().reads(1); - - // If the current auction was in its ending period last block, then ensure that the - // (sub-)range winner information is duplicated from the previous block in case no bids - // happened in the last block. - if let AuctionStatus::EndingPeriod(offset, _sub_sample) = Self::auction_status(n) { - weight = weight.saturating_add(T::DbWeight::get().reads(1)); - if !Winning::::contains_key(&offset) { - weight = weight.saturating_add(T::DbWeight::get().writes(1)); - let winning_data = offset - .checked_sub(&One::one()) - .and_then(Winning::::get) - .unwrap_or([Self::EMPTY; SlotRange::SLOT_RANGE_COUNT]); - Winning::::insert(offset, winning_data); - } - } - - // Check to see if an auction just ended. - if let Some((winning_ranges, auction_lease_period_index)) = Self::check_auction_end(n) { - // Auction is ended now. We have the winning ranges and the lease period index which - // acts as the offset. Handle it. - Self::manage_auction_end(auction_lease_period_index, winning_ranges); - weight = weight.saturating_add(T::WeightInfo::on_initialize()); - } - - weight - } - } - - #[pallet::call] - impl Pallet { - /// Create a new auction. - /// - /// This can only happen when there isn't already an auction in progress and may only be - /// called by the root origin. Accepts the `duration` of this auction and the - /// `lease_period_index` of the initial lease period of the four that are to be auctioned. - #[pallet::call_index(0)] - #[pallet::weight((T::WeightInfo::new_auction(), DispatchClass::Operational))] - pub fn new_auction( - origin: OriginFor, - #[pallet::compact] duration: BlockNumberFor, - #[pallet::compact] lease_period_index: LeasePeriodOf, - ) -> DispatchResult { - T::InitiateOrigin::ensure_origin(origin)?; - Self::do_new_auction(duration, lease_period_index) - } - - /// Make a new bid from an account (including a parachain account) for deploying a new - /// parachain. - /// - /// Multiple simultaneous bids from the same bidder are allowed only as long as all active - /// bids overlap each other (i.e. are mutually exclusive). Bids cannot be redacted. - /// - /// - `sub` is the sub-bidder ID, allowing for multiple competing bids to be made by (and - /// funded by) the same account. - /// - `auction_index` is the index of the auction to bid on. Should just be the present - /// value of `AuctionCounter`. - /// - `first_slot` is the first lease period index of the range to bid on. This is the - /// absolute lease period index value, not an auction-specific offset. - /// - `last_slot` is the last lease period index of the range to bid on. This is the - /// absolute lease period index value, not an auction-specific offset. - /// - `amount` is the amount to bid to be held as deposit for the parachain should the - /// bid win. This amount is held throughout the range. - #[pallet::call_index(1)] - #[pallet::weight(T::WeightInfo::bid())] - pub fn bid( - origin: OriginFor, - #[pallet::compact] para: ParaId, - #[pallet::compact] auction_index: AuctionIndex, - #[pallet::compact] first_slot: LeasePeriodOf, - #[pallet::compact] last_slot: LeasePeriodOf, - #[pallet::compact] amount: BalanceOf, - ) -> DispatchResult { - let who = ensure_signed(origin)?; - Self::handle_bid(who, para, auction_index, first_slot, last_slot, amount)?; - Ok(()) - } - - /// Cancel an in-progress auction. - /// - /// Can only be called by Root origin. - #[pallet::call_index(2)] - #[pallet::weight(T::WeightInfo::cancel_auction())] - pub fn cancel_auction(origin: OriginFor) -> DispatchResult { - ensure_root(origin)?; - // Unreserve all bids. - for ((bidder, _), amount) in ReservedAmounts::::drain() { - CurrencyOf::::unreserve(&bidder, amount); - } - #[allow(deprecated)] - Winning::::remove_all(None); - AuctionInfo::::kill(); - Ok(()) - } - } -} - -impl Auctioneer> for Pallet { - type AccountId = T::AccountId; - type LeasePeriod = BlockNumberFor; - type Currency = CurrencyOf; - - fn new_auction( - duration: BlockNumberFor, - lease_period_index: LeasePeriodOf, - ) -> DispatchResult { - Self::do_new_auction(duration, lease_period_index) - } - - // Returns the status of the auction given the current block number. - fn auction_status(now: BlockNumberFor) -> AuctionStatus> { - let early_end = match AuctionInfo::::get() { - Some((_, early_end)) => early_end, - None => return AuctionStatus::NotStarted, - }; - - let after_early_end = match now.checked_sub(&early_end) { - Some(after_early_end) => after_early_end, - None => return AuctionStatus::StartingPeriod, - }; - - let ending_period = T::EndingPeriod::get(); - if after_early_end < ending_period { - let sample_length = T::SampleLength::get().max(One::one()); - let sample = after_early_end / sample_length; - let sub_sample = after_early_end % sample_length; - return AuctionStatus::EndingPeriod(sample, sub_sample) - } else { - // This is safe because of the comparison operator above - return AuctionStatus::VrfDelay(after_early_end - ending_period) - } - } - - fn place_bid( - bidder: T::AccountId, - para: ParaId, - first_slot: LeasePeriodOf, - last_slot: LeasePeriodOf, - amount: BalanceOf, - ) -> DispatchResult { - Self::handle_bid(bidder, para, AuctionCounter::::get(), first_slot, last_slot, amount) - } - - fn lease_period_index(b: BlockNumberFor) -> Option<(Self::LeasePeriod, bool)> { - T::Leaser::lease_period_index(b) - } - - #[cfg(any(feature = "runtime-benchmarks", test))] - fn lease_period_length() -> (BlockNumberFor, BlockNumberFor) { - T::Leaser::lease_period_length() - } - - fn has_won_an_auction(para: ParaId, bidder: &T::AccountId) -> bool { - !T::Leaser::deposit_held(para, bidder).is_zero() - } -} - -impl Pallet { - // A trick to allow me to initialize large arrays with nothing in them. - const EMPTY: Option<(::AccountId, ParaId, BalanceOf)> = None; - - /// Create a new auction. - /// - /// This can only happen when there isn't already an auction in progress. Accepts the `duration` - /// of this auction and the `lease_period_index` of the initial lease period of the four that - /// are to be auctioned. - fn do_new_auction( - duration: BlockNumberFor, - lease_period_index: LeasePeriodOf, - ) -> DispatchResult { - let maybe_auction = AuctionInfo::::get(); - ensure!(maybe_auction.is_none(), Error::::AuctionInProgress); - let now = frame_system::Pallet::::block_number(); - if let Some((current_lease_period, _)) = T::Leaser::lease_period_index(now) { - // If there is no active lease period, then we don't need to make this check. - ensure!(lease_period_index >= current_lease_period, Error::::LeasePeriodInPast); - } - - // Bump the counter. - let n = AuctionCounter::::mutate(|n| { - *n += 1; - *n - }); - - // Set the information. - let ending = frame_system::Pallet::::block_number().saturating_add(duration); - AuctionInfo::::put((lease_period_index, ending)); - - Self::deposit_event(Event::::AuctionStarted { - auction_index: n, - lease_period: lease_period_index, - ending, - }); - Ok(()) - } - - /// Actually place a bid in the current auction. - /// - /// - `bidder`: The account that will be funding this bid. - /// - `auction_index`: The auction index of the bid. For this to succeed, must equal - /// the current value of `AuctionCounter`. - /// - `first_slot`: The first lease period index of the range to be bid on. - /// - `last_slot`: The last lease period index of the range to be bid on (inclusive). - /// - `amount`: The total amount to be the bid for deposit over the range. - pub fn handle_bid( - bidder: T::AccountId, - para: ParaId, - auction_index: u32, - first_slot: LeasePeriodOf, - last_slot: LeasePeriodOf, - amount: BalanceOf, - ) -> DispatchResult { - // Ensure para is registered before placing a bid on it. - ensure!(T::Registrar::is_registered(para), Error::::ParaNotRegistered); - // Bidding on latest auction. - ensure!(auction_index == AuctionCounter::::get(), Error::::NotCurrentAuction); - // Assume it's actually an auction (this should never fail because of above). - let (first_lease_period, _) = AuctionInfo::::get().ok_or(Error::::NotAuction)?; - - // Get the auction status and the current sample block. For the starting period, the sample - // block is zero. - let auction_status = Self::auction_status(frame_system::Pallet::::block_number()); - // The offset into the ending samples of the auction. - let offset = match auction_status { - AuctionStatus::NotStarted => return Err(Error::::AuctionEnded.into()), - AuctionStatus::StartingPeriod => Zero::zero(), - AuctionStatus::EndingPeriod(o, _) => o, - AuctionStatus::VrfDelay(_) => return Err(Error::::AuctionEnded.into()), - }; - - // We also make sure that the bid is not for any existing leases the para already has. - ensure!( - !T::Leaser::already_leased(para, first_slot, last_slot), - Error::::AlreadyLeasedOut - ); - - // Our range. - let range = SlotRange::new_bounded(first_lease_period, first_slot, last_slot)?; - // Range as an array index. - let range_index = range as u8 as usize; - - // The current winning ranges. - let mut current_winning = Winning::::get(offset) - .or_else(|| offset.checked_sub(&One::one()).and_then(Winning::::get)) - .unwrap_or([Self::EMPTY; SlotRange::SLOT_RANGE_COUNT]); - - // If this bid beat the previous winner of our range. - if current_winning[range_index].as_ref().map_or(true, |last| amount > last.2) { - // Ok; we are the new winner of this range - reserve the additional amount and record. - - // Get the amount already held on deposit if this is a renewal bid (i.e. there's - // an existing lease on the same para by the same leaser). - let existing_lease_deposit = T::Leaser::deposit_held(para, &bidder); - let reserve_required = amount.saturating_sub(existing_lease_deposit); - - // Get the amount already reserved in any prior and still active bids by us. - let bidder_para = (bidder.clone(), para); - let already_reserved = ReservedAmounts::::get(&bidder_para).unwrap_or_default(); - - // If these don't already cover the bid... - if let Some(additional) = reserve_required.checked_sub(&already_reserved) { - // ...then reserve some more funds from their account, failing if there's not - // enough funds. - CurrencyOf::::reserve(&bidder, additional)?; - // ...and record the amount reserved. - ReservedAmounts::::insert(&bidder_para, reserve_required); - - Self::deposit_event(Event::::Reserved { - bidder: bidder.clone(), - extra_reserved: additional, - total_amount: reserve_required, - }); - } - - // Return any funds reserved for the previous winner if we are not in the ending period - // and they no longer have any active bids. - let mut outgoing_winner = Some((bidder.clone(), para, amount)); - swap(&mut current_winning[range_index], &mut outgoing_winner); - if let Some((who, para, _amount)) = outgoing_winner { - if auction_status.is_starting() && - current_winning - .iter() - .filter_map(Option::as_ref) - .all(|&(ref other, other_para, _)| other != &who || other_para != para) - { - // Previous bidder is no longer winning any ranges: unreserve their funds. - if let Some(amount) = ReservedAmounts::::take(&(who.clone(), para)) { - // It really should be reserved; there's not much we can do here on fail. - let err_amt = CurrencyOf::::unreserve(&who, amount); - debug_assert!(err_amt.is_zero()); - Self::deposit_event(Event::::Unreserved { bidder: who, amount }); - } - } - } - - // Update the range winner. - Winning::::insert(offset, ¤t_winning); - Self::deposit_event(Event::::BidAccepted { - bidder, - para_id: para, - amount, - first_slot, - last_slot, - }); - } - Ok(()) - } - - /// Some when the auction's end is known (with the end block number). None if it is unknown. - /// If `Some` then the block number must be at most the previous block and at least the - /// previous block minus `T::EndingPeriod::get()`. - /// - /// This mutates the state, cleaning up `AuctionInfo` and `Winning` in the case of an auction - /// ending. An immediately subsequent call with the same argument will always return `None`. - fn check_auction_end(now: BlockNumberFor) -> Option<(WinningData, LeasePeriodOf)> { - if let Some((lease_period_index, early_end)) = AuctionInfo::::get() { - let ending_period = T::EndingPeriod::get(); - let late_end = early_end.saturating_add(ending_period); - let is_ended = now >= late_end; - if is_ended { - // auction definitely ended. - // check to see if we can determine the actual ending point. - let (raw_offset, known_since) = T::Randomness::random(&b"para_auction"[..]); - - if late_end <= known_since { - // Our random seed was known only after the auction ended. Good to use. - let raw_offset_block_number = >::decode( - &mut raw_offset.as_ref(), - ) - .expect("secure hashes should always be bigger than the block number; qed"); - let offset = (raw_offset_block_number % ending_period) / - T::SampleLength::get().max(One::one()); - - let auction_counter = AuctionCounter::::get(); - Self::deposit_event(Event::::WinningOffset { - auction_index: auction_counter, - block_number: offset, - }); - let res = Winning::::get(offset) - .unwrap_or([Self::EMPTY; SlotRange::SLOT_RANGE_COUNT]); - // This `remove_all` statement should remove at most `EndingPeriod` / - // `SampleLength` items, which should be bounded and sensibly configured in the - // runtime. - #[allow(deprecated)] - Winning::::remove_all(None); - AuctionInfo::::kill(); - return Some((res, lease_period_index)) - } - } - } - None - } - - /// Auction just ended. We have the current lease period, the auction's lease period (which - /// is guaranteed to be at least the current period) and the bidders that were winning each - /// range at the time of the auction's close. - fn manage_auction_end( - auction_lease_period_index: LeasePeriodOf, - winning_ranges: WinningData, - ) { - // First, unreserve all amounts that were reserved for the bids. We will later re-reserve - // the amounts from the bidders that ended up being assigned the slot so there's no need to - // special-case them here. - for ((bidder, _), amount) in ReservedAmounts::::drain() { - CurrencyOf::::unreserve(&bidder, amount); - } - - // Next, calculate the winning combination of slots and thus the final winners of the - // auction. - let winners = Self::calculate_winners(winning_ranges); - - // Go through those winners and re-reserve their bid, updating our table of deposits - // accordingly. - for (leaser, para, amount, range) in winners.into_iter() { - let begin_offset = LeasePeriodOf::::from(range.as_pair().0 as u32); - let period_begin = auction_lease_period_index + begin_offset; - let period_count = LeasePeriodOf::::from(range.len() as u32); - - match T::Leaser::lease_out(para, &leaser, amount, period_begin, period_count) { - Err(LeaseError::ReserveFailed) | - Err(LeaseError::AlreadyEnded) | - Err(LeaseError::NoLeasePeriod) => { - // Should never happen since we just unreserved this amount (and our offset is - // from the present period). But if it does, there's not much we can do. - }, - Err(LeaseError::AlreadyLeased) => { - // The leaser attempted to get a second lease on the same para ID, possibly - // griefing us. Let's keep the amount reserved and let governance sort it out. - if CurrencyOf::::reserve(&leaser, amount).is_ok() { - Self::deposit_event(Event::::ReserveConfiscated { - para_id: para, - leaser, - amount, - }); - } - }, - Ok(()) => {}, // Nothing to report. - } - } - - Self::deposit_event(Event::::AuctionClosed { - auction_index: AuctionCounter::::get(), - }); - } - - /// Calculate the final winners from the winning slots. - /// - /// This is a simple dynamic programming algorithm designed by Al, the original code is at: - /// `https://github.com/w3f/consensus/blob/master/NPoS/auctiondynamicthing.py` - fn calculate_winners(mut winning: WinningData) -> WinnersData { - let winning_ranges = { - let mut best_winners_ending_at: [(Vec, BalanceOf); - SlotRange::LEASE_PERIODS_PER_SLOT] = Default::default(); - let best_bid = |range: SlotRange| { - winning[range as u8 as usize] - .as_ref() - .map(|(_, _, amount)| *amount * (range.len() as u32).into()) - }; - for i in 0..SlotRange::LEASE_PERIODS_PER_SLOT { - let r = SlotRange::new_bounded(0, 0, i as u32).expect("`i < LPPS`; qed"); - if let Some(bid) = best_bid(r) { - best_winners_ending_at[i] = (vec![r], bid); - } - for j in 0..i { - let r = SlotRange::new_bounded(0, j as u32 + 1, i as u32) - .expect("`i < LPPS`; `j < i`; `j + 1 < LPPS`; qed"); - if let Some(mut bid) = best_bid(r) { - bid += best_winners_ending_at[j].1; - if bid > best_winners_ending_at[i].1 { - let mut new_winners = best_winners_ending_at[j].0.clone(); - new_winners.push(r); - best_winners_ending_at[i] = (new_winners, bid); - } - } else { - if best_winners_ending_at[j].1 > best_winners_ending_at[i].1 { - best_winners_ending_at[i] = best_winners_ending_at[j].clone(); - } - } - } - } - best_winners_ending_at[SlotRange::LEASE_PERIODS_PER_SLOT - 1].0.clone() - }; - - winning_ranges - .into_iter() - .filter_map(|range| { - winning[range as u8 as usize] - .take() - .map(|(bidder, para, amount)| (bidder, para, amount, range)) - }) - .collect::>() - } -} - -#[cfg(test)] -mod mock; - -#[cfg(test)] -mod tests; - -#[cfg(feature = "runtime-benchmarks")] -mod benchmarking; diff --git a/polkadot/runtime/common/src/auctions/tests.rs b/polkadot/runtime/common/src/auctions/tests.rs deleted file mode 100644 index 07574eeb295d..000000000000 --- a/polkadot/runtime/common/src/auctions/tests.rs +++ /dev/null @@ -1,821 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -//! Tests for the auctions pallet. - -#[cfg(test)] -use super::*; -use crate::{auctions::mock::*, mock::TestRegistrar}; -use frame_support::{assert_noop, assert_ok, assert_storage_noop}; -use pallet_balances; -use polkadot_primitives::Id as ParaId; -use polkadot_primitives_test_helpers::{dummy_hash, dummy_head_data, dummy_validation_code}; -use sp_core::H256; -use sp_runtime::DispatchError::BadOrigin; - -#[test] -fn basic_setup_works() { - new_test_ext().execute_with(|| { - assert_eq!(AuctionCounter::::get(), 0); - assert_eq!(TestLeaser::deposit_held(0u32.into(), &1), 0); - assert_eq!( - Auctions::auction_status(System::block_number()), - AuctionStatus::::NotStarted - ); - - run_to_block(10); - - assert_eq!(AuctionCounter::::get(), 0); - assert_eq!(TestLeaser::deposit_held(0u32.into(), &1), 0); - assert_eq!( - Auctions::auction_status(System::block_number()), - AuctionStatus::::NotStarted - ); - }); -} - -#[test] -fn can_start_auction() { - new_test_ext().execute_with(|| { - run_to_block(1); - - assert_noop!(Auctions::new_auction(RuntimeOrigin::signed(1), 5, 1), BadOrigin); - assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1)); - - assert_eq!(AuctionCounter::::get(), 1); - assert_eq!( - Auctions::auction_status(System::block_number()), - AuctionStatus::::StartingPeriod - ); - }); -} - -#[test] -fn bidding_works() { - new_test_ext().execute_with(|| { - run_to_block(1); - assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1)); - assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 1, 1, 4, 5)); - - assert_eq!(Balances::reserved_balance(1), 5); - assert_eq!(Balances::free_balance(1), 5); - assert_eq!( - Winning::::get(0).unwrap()[SlotRange::ZeroThree as u8 as usize], - Some((1, 0.into(), 5)) - ); - }); -} - -#[test] -fn under_bidding_works() { - new_test_ext().execute_with(|| { - run_to_block(1); - assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1)); - - assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 1, 1, 4, 5)); - - assert_storage_noop!({ - assert_ok!(Auctions::bid(RuntimeOrigin::signed(2), 0.into(), 1, 1, 4, 1)); - }); - }); -} - -#[test] -fn over_bidding_works() { - new_test_ext().execute_with(|| { - run_to_block(1); - assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1)); - assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 1, 1, 4, 5)); - assert_ok!(Auctions::bid(RuntimeOrigin::signed(2), 0.into(), 1, 1, 4, 6)); - - assert_eq!(Balances::reserved_balance(1), 0); - assert_eq!(Balances::free_balance(1), 10); - assert_eq!(Balances::reserved_balance(2), 6); - assert_eq!(Balances::free_balance(2), 14); - assert_eq!( - Winning::::get(0).unwrap()[SlotRange::ZeroThree as u8 as usize], - Some((2, 0.into(), 6)) - ); - }); -} - -#[test] -fn auction_proceeds_correctly() { - new_test_ext().execute_with(|| { - run_to_block(1); - - assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1)); - - assert_eq!(AuctionCounter::::get(), 1); - assert_eq!( - Auctions::auction_status(System::block_number()), - AuctionStatus::::StartingPeriod - ); - - run_to_block(2); - assert_eq!( - Auctions::auction_status(System::block_number()), - AuctionStatus::::StartingPeriod - ); - - run_to_block(3); - assert_eq!( - Auctions::auction_status(System::block_number()), - AuctionStatus::::StartingPeriod - ); - - run_to_block(4); - assert_eq!( - Auctions::auction_status(System::block_number()), - AuctionStatus::::StartingPeriod - ); - - run_to_block(5); - assert_eq!( - Auctions::auction_status(System::block_number()), - AuctionStatus::::StartingPeriod - ); - - run_to_block(6); - assert_eq!( - Auctions::auction_status(System::block_number()), - AuctionStatus::::EndingPeriod(0, 0) - ); - - run_to_block(7); - assert_eq!( - Auctions::auction_status(System::block_number()), - AuctionStatus::::EndingPeriod(1, 0) - ); - - run_to_block(8); - assert_eq!( - Auctions::auction_status(System::block_number()), - AuctionStatus::::EndingPeriod(2, 0) - ); - - run_to_block(9); - assert_eq!( - Auctions::auction_status(System::block_number()), - AuctionStatus::::NotStarted - ); - }); -} - -#[test] -fn can_win_auction() { - new_test_ext().execute_with(|| { - run_to_block(1); - assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1)); - assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 1, 1, 4, 1)); - assert_eq!(Balances::reserved_balance(1), 1); - assert_eq!(Balances::free_balance(1), 9); - run_to_block(9); - - assert_eq!( - leases(), - vec![ - ((0.into(), 1), LeaseData { leaser: 1, amount: 1 }), - ((0.into(), 2), LeaseData { leaser: 1, amount: 1 }), - ((0.into(), 3), LeaseData { leaser: 1, amount: 1 }), - ((0.into(), 4), LeaseData { leaser: 1, amount: 1 }), - ] - ); - assert_eq!(TestLeaser::deposit_held(0.into(), &1), 1); - }); -} - -#[test] -fn can_win_auction_with_late_randomness() { - new_test_ext().execute_with(|| { - run_to_block(1); - assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1)); - assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 1, 1, 4, 1)); - assert_eq!(Balances::reserved_balance(1), 1); - assert_eq!(Balances::free_balance(1), 9); - assert_eq!( - Auctions::auction_status(System::block_number()), - AuctionStatus::::StartingPeriod - ); - run_to_block(8); - // Auction has not yet ended. - assert_eq!(leases(), vec![]); - assert_eq!( - Auctions::auction_status(System::block_number()), - AuctionStatus::::EndingPeriod(2, 0) - ); - // This will prevent the auction's winner from being decided in the next block, since - // the random seed was known before the final bids were made. - set_last_random(H256::zero(), 8); - // Auction definitely ended now, but we don't know exactly when in the last 3 blocks yet - // since no randomness available yet. - run_to_block(9); - // Auction has now ended... But auction winner still not yet decided, so no leases yet. - assert_eq!( - Auctions::auction_status(System::block_number()), - AuctionStatus::::VrfDelay(0) - ); - assert_eq!(leases(), vec![]); - - // Random seed now updated to a value known at block 9, when the auction ended. This - // means that the winner can now be chosen. - set_last_random(H256::zero(), 9); - run_to_block(10); - // Auction ended and winner selected - assert_eq!( - Auctions::auction_status(System::block_number()), - AuctionStatus::::NotStarted - ); - assert_eq!( - leases(), - vec![ - ((0.into(), 1), LeaseData { leaser: 1, amount: 1 }), - ((0.into(), 2), LeaseData { leaser: 1, amount: 1 }), - ((0.into(), 3), LeaseData { leaser: 1, amount: 1 }), - ((0.into(), 4), LeaseData { leaser: 1, amount: 1 }), - ] - ); - assert_eq!(TestLeaser::deposit_held(0.into(), &1), 1); - }); -} - -#[test] -fn can_win_incomplete_auction() { - new_test_ext().execute_with(|| { - run_to_block(1); - assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1)); - assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 1, 4, 4, 5)); - run_to_block(9); - - assert_eq!(leases(), vec![((0.into(), 4), LeaseData { leaser: 1, amount: 5 }),]); - assert_eq!(TestLeaser::deposit_held(0.into(), &1), 5); - }); -} - -#[test] -fn should_choose_best_combination() { - new_test_ext().execute_with(|| { - run_to_block(1); - assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1)); - assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 1, 1, 1, 1)); - assert_ok!(Auctions::bid(RuntimeOrigin::signed(2), 0.into(), 1, 2, 3, 4)); - assert_ok!(Auctions::bid(RuntimeOrigin::signed(3), 0.into(), 1, 4, 4, 2)); - assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 1.into(), 1, 1, 4, 2)); - run_to_block(9); - - assert_eq!( - leases(), - vec![ - ((0.into(), 1), LeaseData { leaser: 1, amount: 1 }), - ((0.into(), 2), LeaseData { leaser: 2, amount: 4 }), - ((0.into(), 3), LeaseData { leaser: 2, amount: 4 }), - ((0.into(), 4), LeaseData { leaser: 3, amount: 2 }), - ] - ); - assert_eq!(TestLeaser::deposit_held(0.into(), &1), 1); - assert_eq!(TestLeaser::deposit_held(1.into(), &1), 0); - assert_eq!(TestLeaser::deposit_held(0.into(), &2), 4); - assert_eq!(TestLeaser::deposit_held(0.into(), &3), 2); - }); -} - -#[test] -fn gap_bid_works() { - new_test_ext().execute_with(|| { - run_to_block(1); - assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1)); - - // User 1 will make a bid for period 1 and 4 for the same Para 0 - assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 1, 1, 1, 1)); - assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 1, 4, 4, 4)); - - // User 2 and 3 will make a bid for para 1 on period 2 and 3 respectively - assert_ok!(Auctions::bid(RuntimeOrigin::signed(2), 1.into(), 1, 2, 2, 2)); - assert_ok!(Auctions::bid(RuntimeOrigin::signed(3), 1.into(), 1, 3, 3, 3)); - - // Total reserved should be the max of the two - assert_eq!(Balances::reserved_balance(1), 4); - - // Other people are reserved correctly too - assert_eq!(Balances::reserved_balance(2), 2); - assert_eq!(Balances::reserved_balance(3), 3); - - // End the auction. - run_to_block(9); - - assert_eq!( - leases(), - vec![ - ((0.into(), 1), LeaseData { leaser: 1, amount: 1 }), - ((0.into(), 4), LeaseData { leaser: 1, amount: 4 }), - ((1.into(), 2), LeaseData { leaser: 2, amount: 2 }), - ((1.into(), 3), LeaseData { leaser: 3, amount: 3 }), - ] - ); - assert_eq!(TestLeaser::deposit_held(0.into(), &1), 4); - assert_eq!(TestLeaser::deposit_held(1.into(), &2), 2); - assert_eq!(TestLeaser::deposit_held(1.into(), &3), 3); - }); -} - -#[test] -fn deposit_credit_should_work() { - new_test_ext().execute_with(|| { - run_to_block(1); - assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1)); - assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 1, 1, 1, 5)); - assert_eq!(Balances::reserved_balance(1), 5); - run_to_block(10); - - assert_eq!(leases(), vec![((0.into(), 1), LeaseData { leaser: 1, amount: 5 }),]); - assert_eq!(TestLeaser::deposit_held(0.into(), &1), 5); - - assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 2)); - assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 2, 2, 2, 6)); - // Only 1 reserved since we have a deposit credit of 5. - assert_eq!(Balances::reserved_balance(1), 1); - run_to_block(20); - - assert_eq!( - leases(), - vec![ - ((0.into(), 1), LeaseData { leaser: 1, amount: 5 }), - ((0.into(), 2), LeaseData { leaser: 1, amount: 6 }), - ] - ); - assert_eq!(TestLeaser::deposit_held(0.into(), &1), 6); - }); -} - -#[test] -fn deposit_credit_on_alt_para_should_not_count() { - new_test_ext().execute_with(|| { - run_to_block(1); - assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1)); - assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 1, 1, 1, 5)); - assert_eq!(Balances::reserved_balance(1), 5); - run_to_block(10); - - assert_eq!(leases(), vec![((0.into(), 1), LeaseData { leaser: 1, amount: 5 }),]); - assert_eq!(TestLeaser::deposit_held(0.into(), &1), 5); - - assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 2)); - assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 1.into(), 2, 2, 2, 6)); - // 6 reserved since we are bidding on a new para; only works because we don't - assert_eq!(Balances::reserved_balance(1), 6); - run_to_block(20); - - assert_eq!( - leases(), - vec![ - ((0.into(), 1), LeaseData { leaser: 1, amount: 5 }), - ((1.into(), 2), LeaseData { leaser: 1, amount: 6 }), - ] - ); - assert_eq!(TestLeaser::deposit_held(0.into(), &1), 5); - assert_eq!(TestLeaser::deposit_held(1.into(), &1), 6); - }); -} - -#[test] -fn multiple_bids_work_pre_ending() { - new_test_ext().execute_with(|| { - run_to_block(1); - - assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1)); - - for i in 1..6u64 { - run_to_block(i as _); - assert_ok!(Auctions::bid(RuntimeOrigin::signed(i), 0.into(), 1, 1, 4, i)); - for j in 1..6 { - assert_eq!(Balances::reserved_balance(j), if j == i { j } else { 0 }); - assert_eq!(Balances::free_balance(j), if j == i { j * 9 } else { j * 10 }); - } - } - - run_to_block(9); - assert_eq!( - leases(), - vec![ - ((0.into(), 1), LeaseData { leaser: 5, amount: 5 }), - ((0.into(), 2), LeaseData { leaser: 5, amount: 5 }), - ((0.into(), 3), LeaseData { leaser: 5, amount: 5 }), - ((0.into(), 4), LeaseData { leaser: 5, amount: 5 }), - ] - ); - }); -} - -#[test] -fn multiple_bids_work_post_ending() { - new_test_ext().execute_with(|| { - run_to_block(1); - - assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 0, 1)); - - for i in 1..6u64 { - run_to_block(((i - 1) / 2 + 1) as _); - assert_ok!(Auctions::bid(RuntimeOrigin::signed(i), 0.into(), 1, 1, 4, i)); - for j in 1..6 { - assert_eq!(Balances::reserved_balance(j), if j <= i { j } else { 0 }); - assert_eq!(Balances::free_balance(j), if j <= i { j * 9 } else { j * 10 }); - } - } - for i in 1..6u64 { - assert_eq!(ReservedAmounts::::get((i, ParaId::from(0))).unwrap(), i); - } - - run_to_block(5); - assert_eq!( - leases(), - (1..=4) - .map(|i| ((0.into(), i), LeaseData { leaser: 2, amount: 2 })) - .collect::>() - ); - }); -} - -#[test] -fn incomplete_calculate_winners_works() { - let mut winning = [None; SlotRange::SLOT_RANGE_COUNT]; - winning[SlotRange::ThreeThree as u8 as usize] = Some((1, 0.into(), 1)); - - let winners = vec![(1, 0.into(), 1, SlotRange::ThreeThree)]; - - assert_eq!(Auctions::calculate_winners(winning), winners); -} - -#[test] -fn first_incomplete_calculate_winners_works() { - let mut winning = [None; SlotRange::SLOT_RANGE_COUNT]; - winning[0] = Some((1, 0.into(), 1)); - - let winners = vec![(1, 0.into(), 1, SlotRange::ZeroZero)]; - - assert_eq!(Auctions::calculate_winners(winning), winners); -} - -#[test] -fn calculate_winners_works() { - let mut winning = [None; SlotRange::SLOT_RANGE_COUNT]; - winning[SlotRange::ZeroZero as u8 as usize] = Some((2, 0.into(), 2)); - winning[SlotRange::ZeroThree as u8 as usize] = Some((1, 100.into(), 1)); - winning[SlotRange::OneOne as u8 as usize] = Some((3, 1.into(), 1)); - winning[SlotRange::TwoTwo as u8 as usize] = Some((1, 2.into(), 53)); - winning[SlotRange::ThreeThree as u8 as usize] = Some((5, 3.into(), 1)); - - let winners = vec![ - (2, 0.into(), 2, SlotRange::ZeroZero), - (3, 1.into(), 1, SlotRange::OneOne), - (1, 2.into(), 53, SlotRange::TwoTwo), - (5, 3.into(), 1, SlotRange::ThreeThree), - ]; - assert_eq!(Auctions::calculate_winners(winning), winners); - - winning[SlotRange::ZeroOne as u8 as usize] = Some((4, 10.into(), 3)); - let winners = vec![ - (4, 10.into(), 3, SlotRange::ZeroOne), - (1, 2.into(), 53, SlotRange::TwoTwo), - (5, 3.into(), 1, SlotRange::ThreeThree), - ]; - assert_eq!(Auctions::calculate_winners(winning), winners); - - winning[SlotRange::ZeroThree as u8 as usize] = Some((1, 100.into(), 100)); - let winners = vec![(1, 100.into(), 100, SlotRange::ZeroThree)]; - assert_eq!(Auctions::calculate_winners(winning), winners); -} - -#[test] -fn lower_bids_are_correctly_refunded() { - new_test_ext().execute_with(|| { - run_to_block(1); - assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 1, 1)); - let para_1 = ParaId::from(1_u32); - let para_2 = ParaId::from(2_u32); - - // Make a bid and reserve a balance - assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), para_1, 1, 1, 4, 9)); - assert_eq!(Balances::reserved_balance(1), 9); - assert_eq!(ReservedAmounts::::get((1, para_1)), Some(9)); - assert_eq!(Balances::reserved_balance(2), 0); - assert_eq!(ReservedAmounts::::get((2, para_2)), None); - - // Bigger bid, reserves new balance and returns funds - assert_ok!(Auctions::bid(RuntimeOrigin::signed(2), para_2, 1, 1, 4, 19)); - assert_eq!(Balances::reserved_balance(1), 0); - assert_eq!(ReservedAmounts::::get((1, para_1)), None); - assert_eq!(Balances::reserved_balance(2), 19); - assert_eq!(ReservedAmounts::::get((2, para_2)), Some(19)); - }); -} - -#[test] -fn initialize_winners_in_ending_period_works() { - new_test_ext().execute_with(|| { - let ed: u64 = ::ExistentialDeposit::get(); - assert_eq!(ed, 1); - run_to_block(1); - assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 9, 1)); - let para_1 = ParaId::from(1_u32); - let para_2 = ParaId::from(2_u32); - let para_3 = ParaId::from(3_u32); - - // Make bids - assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), para_1, 1, 1, 4, 9)); - assert_ok!(Auctions::bid(RuntimeOrigin::signed(2), para_2, 1, 3, 4, 19)); - - assert_eq!( - Auctions::auction_status(System::block_number()), - AuctionStatus::::StartingPeriod - ); - let mut winning = [None; SlotRange::SLOT_RANGE_COUNT]; - winning[SlotRange::ZeroThree as u8 as usize] = Some((1, para_1, 9)); - winning[SlotRange::TwoThree as u8 as usize] = Some((2, para_2, 19)); - assert_eq!(Winning::::get(0), Some(winning)); - - run_to_block(9); - assert_eq!( - Auctions::auction_status(System::block_number()), - AuctionStatus::::StartingPeriod - ); - - run_to_block(10); - assert_eq!( - Auctions::auction_status(System::block_number()), - AuctionStatus::::EndingPeriod(0, 0) - ); - assert_eq!(Winning::::get(0), Some(winning)); - - run_to_block(11); - assert_eq!( - Auctions::auction_status(System::block_number()), - AuctionStatus::::EndingPeriod(1, 0) - ); - assert_eq!(Winning::::get(1), Some(winning)); - assert_ok!(Auctions::bid(RuntimeOrigin::signed(3), para_3, 1, 3, 4, 29)); - - run_to_block(12); - assert_eq!( - Auctions::auction_status(System::block_number()), - AuctionStatus::::EndingPeriod(2, 0) - ); - winning[SlotRange::TwoThree as u8 as usize] = Some((3, para_3, 29)); - assert_eq!(Winning::::get(2), Some(winning)); - }); -} - -#[test] -fn handle_bid_requires_registered_para() { - new_test_ext().execute_with(|| { - run_to_block(1); - assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1)); - assert_noop!( - Auctions::bid(RuntimeOrigin::signed(1), 1337.into(), 1, 1, 4, 1), - Error::::ParaNotRegistered - ); - assert_ok!(TestRegistrar::::register( - 1, - 1337.into(), - dummy_head_data(), - dummy_validation_code() - )); - assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 1337.into(), 1, 1, 4, 1)); - }); -} - -#[test] -fn handle_bid_checks_existing_lease_periods() { - new_test_ext().execute_with(|| { - run_to_block(1); - assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1)); - assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 1, 2, 3, 1)); - assert_eq!(Balances::reserved_balance(1), 1); - assert_eq!(Balances::free_balance(1), 9); - run_to_block(9); - - assert_eq!( - leases(), - vec![ - ((0.into(), 2), LeaseData { leaser: 1, amount: 1 }), - ((0.into(), 3), LeaseData { leaser: 1, amount: 1 }), - ] - ); - assert_eq!(TestLeaser::deposit_held(0.into(), &1), 1); - - // Para 1 just won an auction above and won some lease periods. - // No bids can work which overlap these periods. - assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1)); - assert_noop!( - Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 2, 1, 4, 1), - Error::::AlreadyLeasedOut, - ); - assert_noop!( - Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 2, 1, 2, 1), - Error::::AlreadyLeasedOut, - ); - assert_noop!( - Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 2, 3, 4, 1), - Error::::AlreadyLeasedOut, - ); - // This is okay, not an overlapping bid. - assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 2, 1, 1, 1)); - }); -} - -// Here we will test that taking only 10 samples during the ending period works as expected. -#[test] -fn less_winning_samples_work() { - new_test_ext().execute_with(|| { - let ed: u64 = ::ExistentialDeposit::get(); - assert_eq!(ed, 1); - EndingPeriod::set(30); - SampleLength::set(10); - - run_to_block(1); - assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 9, 11)); - let para_1 = ParaId::from(1_u32); - let para_2 = ParaId::from(2_u32); - let para_3 = ParaId::from(3_u32); - - // Make bids - assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), para_1, 1, 11, 14, 9)); - assert_ok!(Auctions::bid(RuntimeOrigin::signed(2), para_2, 1, 13, 14, 19)); - - assert_eq!( - Auctions::auction_status(System::block_number()), - AuctionStatus::::StartingPeriod - ); - let mut winning = [None; SlotRange::SLOT_RANGE_COUNT]; - winning[SlotRange::ZeroThree as u8 as usize] = Some((1, para_1, 9)); - winning[SlotRange::TwoThree as u8 as usize] = Some((2, para_2, 19)); - assert_eq!(Winning::::get(0), Some(winning)); - - run_to_block(9); - assert_eq!( - Auctions::auction_status(System::block_number()), - AuctionStatus::::StartingPeriod - ); - - run_to_block(10); - assert_eq!( - Auctions::auction_status(System::block_number()), - AuctionStatus::::EndingPeriod(0, 0) - ); - assert_eq!(Winning::::get(0), Some(winning)); - - // New bids update the current winning - assert_ok!(Auctions::bid(RuntimeOrigin::signed(3), para_3, 1, 14, 14, 29)); - winning[SlotRange::ThreeThree as u8 as usize] = Some((3, para_3, 29)); - assert_eq!(Winning::::get(0), Some(winning)); - - run_to_block(20); - assert_eq!( - Auctions::auction_status(System::block_number()), - AuctionStatus::::EndingPeriod(1, 0) - ); - assert_eq!(Winning::::get(1), Some(winning)); - run_to_block(25); - // Overbid mid sample - assert_ok!(Auctions::bid(RuntimeOrigin::signed(3), para_3, 1, 13, 14, 29)); - winning[SlotRange::TwoThree as u8 as usize] = Some((3, para_3, 29)); - assert_eq!(Winning::::get(1), Some(winning)); - - run_to_block(30); - assert_eq!( - Auctions::auction_status(System::block_number()), - AuctionStatus::::EndingPeriod(2, 0) - ); - assert_eq!(Winning::::get(2), Some(winning)); - - set_last_random(H256::from([254; 32]), 40); - run_to_block(40); - // Auction ended and winner selected - assert_eq!( - Auctions::auction_status(System::block_number()), - AuctionStatus::::NotStarted - ); - assert_eq!( - leases(), - vec![ - ((3.into(), 13), LeaseData { leaser: 3, amount: 29 }), - ((3.into(), 14), LeaseData { leaser: 3, amount: 29 }), - ] - ); - }); -} - -#[test] -fn auction_status_works() { - new_test_ext().execute_with(|| { - EndingPeriod::set(30); - SampleLength::set(10); - set_last_random(dummy_hash(), 0); - - assert_eq!( - Auctions::auction_status(System::block_number()), - AuctionStatus::::NotStarted - ); - - run_to_block(1); - assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 9, 11)); - - run_to_block(9); - assert_eq!( - Auctions::auction_status(System::block_number()), - AuctionStatus::::StartingPeriod - ); - - run_to_block(10); - assert_eq!( - Auctions::auction_status(System::block_number()), - AuctionStatus::::EndingPeriod(0, 0) - ); - - run_to_block(11); - assert_eq!( - Auctions::auction_status(System::block_number()), - AuctionStatus::::EndingPeriod(0, 1) - ); - - run_to_block(19); - assert_eq!( - Auctions::auction_status(System::block_number()), - AuctionStatus::::EndingPeriod(0, 9) - ); - - run_to_block(20); - assert_eq!( - Auctions::auction_status(System::block_number()), - AuctionStatus::::EndingPeriod(1, 0) - ); - - run_to_block(25); - assert_eq!( - Auctions::auction_status(System::block_number()), - AuctionStatus::::EndingPeriod(1, 5) - ); - - run_to_block(30); - assert_eq!( - Auctions::auction_status(System::block_number()), - AuctionStatus::::EndingPeriod(2, 0) - ); - - run_to_block(39); - assert_eq!( - Auctions::auction_status(System::block_number()), - AuctionStatus::::EndingPeriod(2, 9) - ); - - run_to_block(40); - assert_eq!( - Auctions::auction_status(System::block_number()), - AuctionStatus::::VrfDelay(0) - ); - - run_to_block(44); - assert_eq!( - Auctions::auction_status(System::block_number()), - AuctionStatus::::VrfDelay(4) - ); - - set_last_random(dummy_hash(), 45); - run_to_block(45); - assert_eq!( - Auctions::auction_status(System::block_number()), - AuctionStatus::::NotStarted - ); - }); -} - -#[test] -fn can_cancel_auction() { - new_test_ext().execute_with(|| { - run_to_block(1); - assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1)); - assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 1, 1, 4, 1)); - assert_eq!(Balances::reserved_balance(1), 1); - assert_eq!(Balances::free_balance(1), 9); - - assert_noop!(Auctions::cancel_auction(RuntimeOrigin::signed(6)), BadOrigin); - assert_ok!(Auctions::cancel_auction(RuntimeOrigin::root())); - - assert!(AuctionInfo::::get().is_none()); - assert_eq!(Balances::reserved_balance(1), 0); - assert_eq!(ReservedAmounts::::iter().count(), 0); - assert_eq!(Winning::::iter().count(), 0); - }); -} diff --git a/polkadot/runtime/common/src/claims.rs b/polkadot/runtime/common/src/claims.rs new file mode 100644 index 000000000000..b77cbfeff77c --- /dev/null +++ b/polkadot/runtime/common/src/claims.rs @@ -0,0 +1,1755 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Pallet to process claims from Ethereum addresses. + +#[cfg(not(feature = "std"))] +use alloc::{format, string::String}; +use alloc::{vec, vec::Vec}; +use codec::{Decode, Encode, MaxEncodedLen}; +use core::fmt::Debug; +use frame_support::{ + ensure, + traits::{Currency, Get, IsSubType, VestingSchedule}, + weights::Weight, + DefaultNoBound, +}; +pub use pallet::*; +use polkadot_primitives::ValidityError; +use scale_info::TypeInfo; +use serde::{self, Deserialize, Deserializer, Serialize, Serializer}; +use sp_io::{crypto::secp256k1_ecdsa_recover, hashing::keccak_256}; +use sp_runtime::{ + impl_tx_ext_default, + traits::{ + AsSystemOriginSigner, AsTransactionAuthorizedOrigin, CheckedSub, DispatchInfoOf, + Dispatchable, TransactionExtension, Zero, + }, + transaction_validity::{ + InvalidTransaction, TransactionValidity, TransactionValidityError, ValidTransaction, + }, + RuntimeDebug, +}; + +type CurrencyOf = <::VestingSchedule as VestingSchedule< + ::AccountId, +>>::Currency; +type BalanceOf = as Currency<::AccountId>>::Balance; + +pub trait WeightInfo { + fn claim() -> Weight; + fn mint_claim() -> Weight; + fn claim_attest() -> Weight; + fn attest() -> Weight; + fn move_claim() -> Weight; + fn prevalidate_attests() -> Weight; +} + +pub struct TestWeightInfo; +impl WeightInfo for TestWeightInfo { + fn claim() -> Weight { + Weight::zero() + } + fn mint_claim() -> Weight { + Weight::zero() + } + fn claim_attest() -> Weight { + Weight::zero() + } + fn attest() -> Weight { + Weight::zero() + } + fn move_claim() -> Weight { + Weight::zero() + } + fn prevalidate_attests() -> Weight { + Weight::zero() + } +} + +/// The kind of statement an account needs to make for a claim to be valid. +#[derive( + Encode, + Decode, + Clone, + Copy, + Eq, + PartialEq, + RuntimeDebug, + TypeInfo, + Serialize, + Deserialize, + MaxEncodedLen, +)] +pub enum StatementKind { + /// Statement required to be made by non-SAFT holders. + Regular, + /// Statement required to be made by SAFT holders. + Saft, +} + +impl StatementKind { + /// Convert this to the (English) statement it represents. + fn to_text(self) -> &'static [u8] { + match self { + StatementKind::Regular => + &b"I hereby agree to the terms of the statement whose SHA-256 multihash is \ + Qmc1XYqT6S39WNp2UeiRUrZichUWUPpGEThDE6dAb3f6Ny. (This may be found at the URL: \ + https://statement.polkadot.network/regular.html)"[..], + StatementKind::Saft => + &b"I hereby agree to the terms of the statement whose SHA-256 multihash is \ + QmXEkMahfhHJPzT3RjkXiZVFi77ZeVeuxtAjhojGRNYckz. (This may be found at the URL: \ + https://statement.polkadot.network/saft.html)"[..], + } + } +} + +impl Default for StatementKind { + fn default() -> Self { + StatementKind::Regular + } +} + +/// An Ethereum address (i.e. 20 bytes, used to represent an Ethereum account). +/// +/// This gets serialized to the 0x-prefixed hex representation. +#[derive( + Clone, Copy, PartialEq, Eq, Encode, Decode, Default, RuntimeDebug, TypeInfo, MaxEncodedLen, +)] +pub struct EthereumAddress([u8; 20]); + +impl Serialize for EthereumAddress { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + let hex: String = rustc_hex::ToHex::to_hex(&self.0[..]); + serializer.serialize_str(&format!("0x{}", hex)) + } +} + +impl<'de> Deserialize<'de> for EthereumAddress { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let base_string = String::deserialize(deserializer)?; + let offset = if base_string.starts_with("0x") { 2 } else { 0 }; + let s = &base_string[offset..]; + if s.len() != 40 { + Err(serde::de::Error::custom( + "Bad length of Ethereum address (should be 42 including '0x')", + ))?; + } + let raw: Vec = rustc_hex::FromHex::from_hex(s) + .map_err(|e| serde::de::Error::custom(format!("{:?}", e)))?; + let mut r = Self::default(); + r.0.copy_from_slice(&raw); + Ok(r) + } +} + +#[derive(Encode, Decode, Clone, TypeInfo, MaxEncodedLen)] +pub struct EcdsaSignature(pub [u8; 65]); + +impl PartialEq for EcdsaSignature { + fn eq(&self, other: &Self) -> bool { + &self.0[..] == &other.0[..] + } +} + +impl core::fmt::Debug for EcdsaSignature { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + write!(f, "EcdsaSignature({:?})", &self.0[..]) + } +} + +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + #[pallet::pallet] + pub struct Pallet(_); + + /// Configuration trait. + #[pallet::config] + pub trait Config: frame_system::Config { + /// The overarching event type. + type RuntimeEvent: From> + IsType<::RuntimeEvent>; + type VestingSchedule: VestingSchedule>; + #[pallet::constant] + type Prefix: Get<&'static [u8]>; + type MoveClaimOrigin: EnsureOrigin; + type WeightInfo: WeightInfo; + } + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + /// Someone claimed some DOTs. + Claimed { who: T::AccountId, ethereum_address: EthereumAddress, amount: BalanceOf }, + } + + #[pallet::error] + pub enum Error { + /// Invalid Ethereum signature. + InvalidEthereumSignature, + /// Ethereum address has no claim. + SignerHasNoClaim, + /// Account ID sending transaction has no claim. + SenderHasNoClaim, + /// There's not enough in the pot to pay out some unvested amount. Generally implies a + /// logic error. + PotUnderflow, + /// A needed statement was not included. + InvalidStatement, + /// The account already has a vested balance. + VestedBalanceExists, + } + + #[pallet::storage] + pub type Claims = StorageMap<_, Identity, EthereumAddress, BalanceOf>; + + #[pallet::storage] + pub type Total = StorageValue<_, BalanceOf, ValueQuery>; + + /// Vesting schedule for a claim. + /// First balance is the total amount that should be held for vesting. + /// Second balance is how much should be unlocked per block. + /// The block number is when the vesting should start. + #[pallet::storage] + pub type Vesting = + StorageMap<_, Identity, EthereumAddress, (BalanceOf, BalanceOf, BlockNumberFor)>; + + /// The statement kind that must be signed, if any. + #[pallet::storage] + pub(super) type Signing = StorageMap<_, Identity, EthereumAddress, StatementKind>; + + /// Pre-claimed Ethereum accounts, by the Account ID that they are claimed to. + #[pallet::storage] + pub(super) type Preclaims = StorageMap<_, Identity, T::AccountId, EthereumAddress>; + + #[pallet::genesis_config] + #[derive(DefaultNoBound)] + pub struct GenesisConfig { + pub claims: + Vec<(EthereumAddress, BalanceOf, Option, Option)>, + pub vesting: Vec<(EthereumAddress, (BalanceOf, BalanceOf, BlockNumberFor))>, + } + + #[pallet::genesis_build] + impl BuildGenesisConfig for GenesisConfig { + fn build(&self) { + // build `Claims` + self.claims.iter().map(|(a, b, _, _)| (*a, *b)).for_each(|(a, b)| { + Claims::::insert(a, b); + }); + // build `Total` + Total::::put( + self.claims + .iter() + .fold(Zero::zero(), |acc: BalanceOf, &(_, b, _, _)| acc + b), + ); + // build `Vesting` + self.vesting.iter().for_each(|(k, v)| { + Vesting::::insert(k, v); + }); + // build `Signing` + self.claims + .iter() + .filter_map(|(a, _, _, s)| Some((*a, (*s)?))) + .for_each(|(a, s)| { + Signing::::insert(a, s); + }); + // build `Preclaims` + self.claims.iter().filter_map(|(a, _, i, _)| Some((i.clone()?, *a))).for_each( + |(i, a)| { + Preclaims::::insert(i, a); + }, + ); + } + } + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet { + /// Make a claim to collect your DOTs. + /// + /// The dispatch origin for this call must be _None_. + /// + /// Unsigned Validation: + /// A call to claim is deemed valid if the signature provided matches + /// the expected signed message of: + /// + /// > Ethereum Signed Message: + /// > (configured prefix string)(address) + /// + /// and `address` matches the `dest` account. + /// + /// Parameters: + /// - `dest`: The destination account to payout the claim. + /// - `ethereum_signature`: The signature of an ethereum signed message matching the format + /// described above. + /// + /// + /// The weight of this call is invariant over the input parameters. + /// Weight includes logic to validate unsigned `claim` call. + /// + /// Total Complexity: O(1) + /// + #[pallet::call_index(0)] + #[pallet::weight(T::WeightInfo::claim())] + pub fn claim( + origin: OriginFor, + dest: T::AccountId, + ethereum_signature: EcdsaSignature, + ) -> DispatchResult { + ensure_none(origin)?; + + let data = dest.using_encoded(to_ascii_hex); + let signer = Self::eth_recover(ðereum_signature, &data, &[][..]) + .ok_or(Error::::InvalidEthereumSignature)?; + ensure!(Signing::::get(&signer).is_none(), Error::::InvalidStatement); + + Self::process_claim(signer, dest)?; + Ok(()) + } + + /// Mint a new claim to collect DOTs. + /// + /// The dispatch origin for this call must be _Root_. + /// + /// Parameters: + /// - `who`: The Ethereum address allowed to collect this claim. + /// - `value`: The number of DOTs that will be claimed. + /// - `vesting_schedule`: An optional vesting schedule for these DOTs. + /// + /// + /// The weight of this call is invariant over the input parameters. + /// We assume worst case that both vesting and statement is being inserted. + /// + /// Total Complexity: O(1) + /// + #[pallet::call_index(1)] + #[pallet::weight(T::WeightInfo::mint_claim())] + pub fn mint_claim( + origin: OriginFor, + who: EthereumAddress, + value: BalanceOf, + vesting_schedule: Option<(BalanceOf, BalanceOf, BlockNumberFor)>, + statement: Option, + ) -> DispatchResult { + ensure_root(origin)?; + + Total::::mutate(|t| *t += value); + Claims::::insert(who, value); + if let Some(vs) = vesting_schedule { + Vesting::::insert(who, vs); + } + if let Some(s) = statement { + Signing::::insert(who, s); + } + Ok(()) + } + + /// Make a claim to collect your DOTs by signing a statement. + /// + /// The dispatch origin for this call must be _None_. + /// + /// Unsigned Validation: + /// A call to `claim_attest` is deemed valid if the signature provided matches + /// the expected signed message of: + /// + /// > Ethereum Signed Message: + /// > (configured prefix string)(address)(statement) + /// + /// and `address` matches the `dest` account; the `statement` must match that which is + /// expected according to your purchase arrangement. + /// + /// Parameters: + /// - `dest`: The destination account to payout the claim. + /// - `ethereum_signature`: The signature of an ethereum signed message matching the format + /// described above. + /// - `statement`: The identity of the statement which is being attested to in the + /// signature. + /// + /// + /// The weight of this call is invariant over the input parameters. + /// Weight includes logic to validate unsigned `claim_attest` call. + /// + /// Total Complexity: O(1) + /// + #[pallet::call_index(2)] + #[pallet::weight(T::WeightInfo::claim_attest())] + pub fn claim_attest( + origin: OriginFor, + dest: T::AccountId, + ethereum_signature: EcdsaSignature, + statement: Vec, + ) -> DispatchResult { + ensure_none(origin)?; + + let data = dest.using_encoded(to_ascii_hex); + let signer = Self::eth_recover(ðereum_signature, &data, &statement) + .ok_or(Error::::InvalidEthereumSignature)?; + if let Some(s) = Signing::::get(signer) { + ensure!(s.to_text() == &statement[..], Error::::InvalidStatement); + } + Self::process_claim(signer, dest)?; + Ok(()) + } + + /// Attest to a statement, needed to finalize the claims process. + /// + /// WARNING: Insecure unless your chain includes `PrevalidateAttests` as a + /// `TransactionExtension`. + /// + /// Unsigned Validation: + /// A call to attest is deemed valid if the sender has a `Preclaim` registered + /// and provides a `statement` which is expected for the account. + /// + /// Parameters: + /// - `statement`: The identity of the statement which is being attested to in the + /// signature. + /// + /// + /// The weight of this call is invariant over the input parameters. + /// Weight includes logic to do pre-validation on `attest` call. + /// + /// Total Complexity: O(1) + /// + #[pallet::call_index(3)] + #[pallet::weight(( + T::WeightInfo::attest(), + DispatchClass::Normal, + Pays::No + ))] + pub fn attest(origin: OriginFor, statement: Vec) -> DispatchResult { + let who = ensure_signed(origin)?; + let signer = Preclaims::::get(&who).ok_or(Error::::SenderHasNoClaim)?; + if let Some(s) = Signing::::get(signer) { + ensure!(s.to_text() == &statement[..], Error::::InvalidStatement); + } + Self::process_claim(signer, who.clone())?; + Preclaims::::remove(&who); + Ok(()) + } + + #[pallet::call_index(4)] + #[pallet::weight(T::WeightInfo::move_claim())] + pub fn move_claim( + origin: OriginFor, + old: EthereumAddress, + new: EthereumAddress, + maybe_preclaim: Option, + ) -> DispatchResultWithPostInfo { + T::MoveClaimOrigin::try_origin(origin).map(|_| ()).or_else(ensure_root)?; + + Claims::::take(&old).map(|c| Claims::::insert(&new, c)); + Vesting::::take(&old).map(|c| Vesting::::insert(&new, c)); + Signing::::take(&old).map(|c| Signing::::insert(&new, c)); + maybe_preclaim.map(|preclaim| { + Preclaims::::mutate(&preclaim, |maybe_o| { + if maybe_o.as_ref().map_or(false, |o| o == &old) { + *maybe_o = Some(new) + } + }) + }); + Ok(Pays::No.into()) + } + } + + #[pallet::validate_unsigned] + impl ValidateUnsigned for Pallet { + type Call = Call; + + fn validate_unsigned(_source: TransactionSource, call: &Self::Call) -> TransactionValidity { + const PRIORITY: u64 = 100; + + let (maybe_signer, maybe_statement) = match call { + // + // The weight of this logic is included in the `claim` dispatchable. + // + Call::claim { dest: account, ethereum_signature } => { + let data = account.using_encoded(to_ascii_hex); + (Self::eth_recover(ðereum_signature, &data, &[][..]), None) + }, + // + // The weight of this logic is included in the `claim_attest` dispatchable. + // + Call::claim_attest { dest: account, ethereum_signature, statement } => { + let data = account.using_encoded(to_ascii_hex); + ( + Self::eth_recover(ðereum_signature, &data, &statement), + Some(statement.as_slice()), + ) + }, + _ => return Err(InvalidTransaction::Call.into()), + }; + + let signer = maybe_signer.ok_or(InvalidTransaction::Custom( + ValidityError::InvalidEthereumSignature.into(), + ))?; + + let e = InvalidTransaction::Custom(ValidityError::SignerHasNoClaim.into()); + ensure!(Claims::::contains_key(&signer), e); + + let e = InvalidTransaction::Custom(ValidityError::InvalidStatement.into()); + match Signing::::get(signer) { + None => ensure!(maybe_statement.is_none(), e), + Some(s) => ensure!(Some(s.to_text()) == maybe_statement, e), + } + + Ok(ValidTransaction { + priority: PRIORITY, + requires: vec![], + provides: vec![("claims", signer).encode()], + longevity: TransactionLongevity::max_value(), + propagate: true, + }) + } + } +} + +/// Converts the given binary data into ASCII-encoded hex. It will be twice the length. +fn to_ascii_hex(data: &[u8]) -> Vec { + let mut r = Vec::with_capacity(data.len() * 2); + let mut push_nibble = |n| r.push(if n < 10 { b'0' + n } else { b'a' - 10 + n }); + for &b in data.iter() { + push_nibble(b / 16); + push_nibble(b % 16); + } + r +} + +impl Pallet { + // Constructs the message that Ethereum RPC's `personal_sign` and `eth_sign` would sign. + fn ethereum_signable_message(what: &[u8], extra: &[u8]) -> Vec { + let prefix = T::Prefix::get(); + let mut l = prefix.len() + what.len() + extra.len(); + let mut rev = Vec::new(); + while l > 0 { + rev.push(b'0' + (l % 10) as u8); + l /= 10; + } + let mut v = b"\x19Ethereum Signed Message:\n".to_vec(); + v.extend(rev.into_iter().rev()); + v.extend_from_slice(prefix); + v.extend_from_slice(what); + v.extend_from_slice(extra); + v + } + + // Attempts to recover the Ethereum address from a message signature signed by using + // the Ethereum RPC's `personal_sign` and `eth_sign`. + fn eth_recover(s: &EcdsaSignature, what: &[u8], extra: &[u8]) -> Option { + let msg = keccak_256(&Self::ethereum_signable_message(what, extra)); + let mut res = EthereumAddress::default(); + res.0 + .copy_from_slice(&keccak_256(&secp256k1_ecdsa_recover(&s.0, &msg).ok()?[..])[12..]); + Some(res) + } + + fn process_claim(signer: EthereumAddress, dest: T::AccountId) -> sp_runtime::DispatchResult { + let balance_due = Claims::::get(&signer).ok_or(Error::::SignerHasNoClaim)?; + + let new_total = + Total::::get().checked_sub(&balance_due).ok_or(Error::::PotUnderflow)?; + + let vesting = Vesting::::get(&signer); + if vesting.is_some() && T::VestingSchedule::vesting_balance(&dest).is_some() { + return Err(Error::::VestedBalanceExists.into()) + } + + // We first need to deposit the balance to ensure that the account exists. + let _ = CurrencyOf::::deposit_creating(&dest, balance_due); + + // Check if this claim should have a vesting schedule. + if let Some(vs) = vesting { + // This can only fail if the account already has a vesting schedule, + // but this is checked above. + T::VestingSchedule::add_vesting_schedule(&dest, vs.0, vs.1, vs.2) + .expect("No other vesting schedule exists, as checked above; qed"); + } + + Total::::put(new_total); + Claims::::remove(&signer); + Vesting::::remove(&signer); + Signing::::remove(&signer); + + // Let's deposit an event to let the outside world know this happened. + Self::deposit_event(Event::::Claimed { + who: dest, + ethereum_address: signer, + amount: balance_due, + }); + + Ok(()) + } +} + +/// Validate `attest` calls prior to execution. Needed to avoid a DoS attack since they are +/// otherwise free to place on chain. +#[derive(Encode, Decode, Clone, Eq, PartialEq, TypeInfo)] +#[scale_info(skip_type_params(T))] +pub struct PrevalidateAttests(core::marker::PhantomData); + +impl Debug for PrevalidateAttests +where + ::RuntimeCall: IsSubType>, +{ + #[cfg(feature = "std")] + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + write!(f, "PrevalidateAttests") + } + + #[cfg(not(feature = "std"))] + fn fmt(&self, _: &mut core::fmt::Formatter) -> core::fmt::Result { + Ok(()) + } +} + +impl PrevalidateAttests +where + ::RuntimeCall: IsSubType>, +{ + /// Create new `TransactionExtension` to check runtime version. + pub fn new() -> Self { + Self(core::marker::PhantomData) + } +} + +impl TransactionExtension for PrevalidateAttests +where + ::RuntimeCall: IsSubType>, + <::RuntimeCall as Dispatchable>::RuntimeOrigin: + AsSystemOriginSigner + AsTransactionAuthorizedOrigin + Clone, +{ + const IDENTIFIER: &'static str = "PrevalidateAttests"; + type Implicit = (); + type Pre = (); + type Val = (); + + fn weight(&self, call: &T::RuntimeCall) -> Weight { + if let Some(Call::attest { .. }) = call.is_sub_type() { + T::WeightInfo::prevalidate_attests() + } else { + Weight::zero() + } + } + + fn validate( + &self, + origin: ::RuntimeOrigin, + call: &T::RuntimeCall, + _info: &DispatchInfoOf, + _len: usize, + _self_implicit: Self::Implicit, + _inherited_implication: &impl Encode, + ) -> Result< + (ValidTransaction, Self::Val, ::RuntimeOrigin), + TransactionValidityError, + > { + if let Some(Call::attest { statement: attested_statement }) = call.is_sub_type() { + let who = origin.as_system_origin_signer().ok_or(InvalidTransaction::BadSigner)?; + let signer = Preclaims::::get(who) + .ok_or(InvalidTransaction::Custom(ValidityError::SignerHasNoClaim.into()))?; + if let Some(s) = Signing::::get(signer) { + let e = InvalidTransaction::Custom(ValidityError::InvalidStatement.into()); + ensure!(&attested_statement[..] == s.to_text(), e); + } + } + Ok((ValidTransaction::default(), (), origin)) + } + + impl_tx_ext_default!(T::RuntimeCall; prepare); +} + +#[cfg(any(test, feature = "runtime-benchmarks"))] +mod secp_utils { + use super::*; + + pub fn public(secret: &libsecp256k1::SecretKey) -> libsecp256k1::PublicKey { + libsecp256k1::PublicKey::from_secret_key(secret) + } + pub fn eth(secret: &libsecp256k1::SecretKey) -> EthereumAddress { + let mut res = EthereumAddress::default(); + res.0.copy_from_slice(&keccak_256(&public(secret).serialize()[1..65])[12..]); + res + } + pub fn sig( + secret: &libsecp256k1::SecretKey, + what: &[u8], + extra: &[u8], + ) -> EcdsaSignature { + let msg = keccak_256(&super::Pallet::::ethereum_signable_message( + &to_ascii_hex(what)[..], + extra, + )); + let (sig, recovery_id) = libsecp256k1::sign(&libsecp256k1::Message::parse(&msg), secret); + let mut r = [0u8; 65]; + r[0..64].copy_from_slice(&sig.serialize()[..]); + r[64] = recovery_id.serialize(); + EcdsaSignature(r) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use hex_literal::hex; + use secp_utils::*; + + use codec::Encode; + // The testing primitives are very useful for avoiding having to work with signatures + // or public keys. `u64` is used as the `AccountId` and no `Signature`s are required. + use crate::claims; + use claims::Call as ClaimsCall; + use frame_support::{ + assert_err, assert_noop, assert_ok, derive_impl, + dispatch::{GetDispatchInfo, Pays}, + ord_parameter_types, parameter_types, + traits::{ExistenceRequirement, WithdrawReasons}, + }; + use pallet_balances; + use sp_runtime::{ + traits::{DispatchTransaction, Identity}, + transaction_validity::TransactionLongevity, + BuildStorage, + DispatchError::BadOrigin, + TokenError, + }; + + type Block = frame_system::mocking::MockBlock; + + frame_support::construct_runtime!( + pub enum Test + { + System: frame_system, + Balances: pallet_balances, + Vesting: pallet_vesting, + Claims: claims, + } + ); + + #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] + impl frame_system::Config for Test { + type RuntimeOrigin = RuntimeOrigin; + type RuntimeCall = RuntimeCall; + type Block = Block; + type RuntimeEvent = RuntimeEvent; + type AccountData = pallet_balances::AccountData; + type MaxConsumers = frame_support::traits::ConstU32<16>; + } + + #[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] + impl pallet_balances::Config for Test { + type AccountStore = System; + } + + parameter_types! { + pub const MinVestedTransfer: u64 = 1; + pub UnvestedFundsAllowedWithdrawReasons: WithdrawReasons = + WithdrawReasons::except(WithdrawReasons::TRANSFER | WithdrawReasons::RESERVE); + } + + impl pallet_vesting::Config for Test { + type RuntimeEvent = RuntimeEvent; + type Currency = Balances; + type BlockNumberToBalance = Identity; + type MinVestedTransfer = MinVestedTransfer; + type WeightInfo = (); + type UnvestedFundsAllowedWithdrawReasons = UnvestedFundsAllowedWithdrawReasons; + type BlockNumberProvider = System; + const MAX_VESTING_SCHEDULES: u32 = 28; + } + + parameter_types! { + pub Prefix: &'static [u8] = b"Pay RUSTs to the TEST account:"; + } + ord_parameter_types! { + pub const Six: u64 = 6; + } + + impl Config for Test { + type RuntimeEvent = RuntimeEvent; + type VestingSchedule = Vesting; + type Prefix = Prefix; + type MoveClaimOrigin = frame_system::EnsureSignedBy; + type WeightInfo = TestWeightInfo; + } + + fn alice() -> libsecp256k1::SecretKey { + libsecp256k1::SecretKey::parse(&keccak_256(b"Alice")).unwrap() + } + fn bob() -> libsecp256k1::SecretKey { + libsecp256k1::SecretKey::parse(&keccak_256(b"Bob")).unwrap() + } + fn dave() -> libsecp256k1::SecretKey { + libsecp256k1::SecretKey::parse(&keccak_256(b"Dave")).unwrap() + } + fn eve() -> libsecp256k1::SecretKey { + libsecp256k1::SecretKey::parse(&keccak_256(b"Eve")).unwrap() + } + fn frank() -> libsecp256k1::SecretKey { + libsecp256k1::SecretKey::parse(&keccak_256(b"Frank")).unwrap() + } + + // This function basically just builds a genesis storage key/value store according to + // our desired mockup. + pub fn new_test_ext() -> sp_io::TestExternalities { + let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); + // We use default for brevity, but you can configure as desired if needed. + pallet_balances::GenesisConfig::::default() + .assimilate_storage(&mut t) + .unwrap(); + claims::GenesisConfig:: { + claims: vec![ + (eth(&alice()), 100, None, None), + (eth(&dave()), 200, None, Some(StatementKind::Regular)), + (eth(&eve()), 300, Some(42), Some(StatementKind::Saft)), + (eth(&frank()), 400, Some(43), None), + ], + vesting: vec![(eth(&alice()), (50, 10, 1))], + } + .assimilate_storage(&mut t) + .unwrap(); + t.into() + } + + fn total_claims() -> u64 { + 100 + 200 + 300 + 400 + } + + #[test] + fn basic_setup_works() { + new_test_ext().execute_with(|| { + assert_eq!(claims::Total::::get(), total_claims()); + assert_eq!(claims::Claims::::get(ð(&alice())), Some(100)); + assert_eq!(claims::Claims::::get(ð(&dave())), Some(200)); + assert_eq!(claims::Claims::::get(ð(&eve())), Some(300)); + assert_eq!(claims::Claims::::get(ð(&frank())), Some(400)); + assert_eq!(claims::Claims::::get(&EthereumAddress::default()), None); + assert_eq!(claims::Vesting::::get(ð(&alice())), Some((50, 10, 1))); + }); + } + + #[test] + fn serde_works() { + let x = EthereumAddress(hex!["0123456789abcdef0123456789abcdef01234567"]); + let y = serde_json::to_string(&x).unwrap(); + assert_eq!(y, "\"0x0123456789abcdef0123456789abcdef01234567\""); + let z: EthereumAddress = serde_json::from_str(&y).unwrap(); + assert_eq!(x, z); + } + + #[test] + fn claiming_works() { + new_test_ext().execute_with(|| { + assert_eq!(Balances::free_balance(42), 0); + assert_ok!(Claims::claim( + RuntimeOrigin::none(), + 42, + sig::(&alice(), &42u64.encode(), &[][..]) + )); + assert_eq!(Balances::free_balance(&42), 100); + assert_eq!(Vesting::vesting_balance(&42), Some(50)); + assert_eq!(claims::Total::::get(), total_claims() - 100); + }); + } + + #[test] + fn basic_claim_moving_works() { + new_test_ext().execute_with(|| { + assert_eq!(Balances::free_balance(42), 0); + assert_noop!( + Claims::move_claim(RuntimeOrigin::signed(1), eth(&alice()), eth(&bob()), None), + BadOrigin + ); + assert_ok!(Claims::move_claim( + RuntimeOrigin::signed(6), + eth(&alice()), + eth(&bob()), + None + )); + assert_noop!( + Claims::claim( + RuntimeOrigin::none(), + 42, + sig::(&alice(), &42u64.encode(), &[][..]) + ), + Error::::SignerHasNoClaim + ); + assert_ok!(Claims::claim( + RuntimeOrigin::none(), + 42, + sig::(&bob(), &42u64.encode(), &[][..]) + )); + assert_eq!(Balances::free_balance(&42), 100); + assert_eq!(Vesting::vesting_balance(&42), Some(50)); + assert_eq!(claims::Total::::get(), total_claims() - 100); + }); + } + + #[test] + fn claim_attest_moving_works() { + new_test_ext().execute_with(|| { + assert_ok!(Claims::move_claim( + RuntimeOrigin::signed(6), + eth(&dave()), + eth(&bob()), + None + )); + let s = sig::(&bob(), &42u64.encode(), StatementKind::Regular.to_text()); + assert_ok!(Claims::claim_attest( + RuntimeOrigin::none(), + 42, + s, + StatementKind::Regular.to_text().to_vec() + )); + assert_eq!(Balances::free_balance(&42), 200); + }); + } + + #[test] + fn attest_moving_works() { + new_test_ext().execute_with(|| { + assert_ok!(Claims::move_claim( + RuntimeOrigin::signed(6), + eth(&eve()), + eth(&bob()), + Some(42) + )); + assert_ok!(Claims::attest( + RuntimeOrigin::signed(42), + StatementKind::Saft.to_text().to_vec() + )); + assert_eq!(Balances::free_balance(&42), 300); + }); + } + + #[test] + fn claiming_does_not_bypass_signing() { + new_test_ext().execute_with(|| { + assert_ok!(Claims::claim( + RuntimeOrigin::none(), + 42, + sig::(&alice(), &42u64.encode(), &[][..]) + )); + assert_noop!( + Claims::claim( + RuntimeOrigin::none(), + 42, + sig::(&dave(), &42u64.encode(), &[][..]) + ), + Error::::InvalidStatement, + ); + assert_noop!( + Claims::claim( + RuntimeOrigin::none(), + 42, + sig::(&eve(), &42u64.encode(), &[][..]) + ), + Error::::InvalidStatement, + ); + assert_ok!(Claims::claim( + RuntimeOrigin::none(), + 42, + sig::(&frank(), &42u64.encode(), &[][..]) + )); + }); + } + + #[test] + fn attest_claiming_works() { + new_test_ext().execute_with(|| { + assert_eq!(Balances::free_balance(42), 0); + let s = sig::(&dave(), &42u64.encode(), StatementKind::Saft.to_text()); + let r = Claims::claim_attest( + RuntimeOrigin::none(), + 42, + s.clone(), + StatementKind::Saft.to_text().to_vec(), + ); + assert_noop!(r, Error::::InvalidStatement); + + let r = Claims::claim_attest( + RuntimeOrigin::none(), + 42, + s, + StatementKind::Regular.to_text().to_vec(), + ); + assert_noop!(r, Error::::SignerHasNoClaim); + // ^^^ we use ecdsa_recover, so an invalid signature just results in a random signer id + // being recovered, which realistically will never have a claim. + + let s = sig::(&dave(), &42u64.encode(), StatementKind::Regular.to_text()); + assert_ok!(Claims::claim_attest( + RuntimeOrigin::none(), + 42, + s, + StatementKind::Regular.to_text().to_vec() + )); + assert_eq!(Balances::free_balance(&42), 200); + assert_eq!(claims::Total::::get(), total_claims() - 200); + + let s = sig::(&dave(), &42u64.encode(), StatementKind::Regular.to_text()); + let r = Claims::claim_attest( + RuntimeOrigin::none(), + 42, + s, + StatementKind::Regular.to_text().to_vec(), + ); + assert_noop!(r, Error::::SignerHasNoClaim); + }); + } + + #[test] + fn attesting_works() { + new_test_ext().execute_with(|| { + assert_eq!(Balances::free_balance(42), 0); + assert_noop!( + Claims::attest(RuntimeOrigin::signed(69), StatementKind::Saft.to_text().to_vec()), + Error::::SenderHasNoClaim + ); + assert_noop!( + Claims::attest( + RuntimeOrigin::signed(42), + StatementKind::Regular.to_text().to_vec() + ), + Error::::InvalidStatement + ); + assert_ok!(Claims::attest( + RuntimeOrigin::signed(42), + StatementKind::Saft.to_text().to_vec() + )); + assert_eq!(Balances::free_balance(&42), 300); + assert_eq!(claims::Total::::get(), total_claims() - 300); + }); + } + + #[test] + fn claim_cannot_clobber_preclaim() { + new_test_ext().execute_with(|| { + assert_eq!(Balances::free_balance(42), 0); + // Alice's claim is 100 + assert_ok!(Claims::claim( + RuntimeOrigin::none(), + 42, + sig::(&alice(), &42u64.encode(), &[][..]) + )); + assert_eq!(Balances::free_balance(&42), 100); + // Eve's claim is 300 through Account 42 + assert_ok!(Claims::attest( + RuntimeOrigin::signed(42), + StatementKind::Saft.to_text().to_vec() + )); + assert_eq!(Balances::free_balance(&42), 100 + 300); + assert_eq!(claims::Total::::get(), total_claims() - 400); + }); + } + + #[test] + fn valid_attest_transactions_are_free() { + new_test_ext().execute_with(|| { + let p = PrevalidateAttests::::new(); + let c = RuntimeCall::Claims(ClaimsCall::attest { + statement: StatementKind::Saft.to_text().to_vec(), + }); + let di = c.get_dispatch_info(); + assert_eq!(di.pays_fee, Pays::No); + let r = p.validate_only(Some(42).into(), &c, &di, 20); + assert_eq!(r.unwrap().0, ValidTransaction::default()); + }); + } + + #[test] + fn invalid_attest_transactions_are_recognized() { + new_test_ext().execute_with(|| { + let p = PrevalidateAttests::::new(); + let c = RuntimeCall::Claims(ClaimsCall::attest { + statement: StatementKind::Regular.to_text().to_vec(), + }); + let di = c.get_dispatch_info(); + let r = p.validate_only(Some(42).into(), &c, &di, 20); + assert!(r.is_err()); + let c = RuntimeCall::Claims(ClaimsCall::attest { + statement: StatementKind::Saft.to_text().to_vec(), + }); + let di = c.get_dispatch_info(); + let r = p.validate_only(Some(69).into(), &c, &di, 20); + assert!(r.is_err()); + }); + } + + #[test] + fn cannot_bypass_attest_claiming() { + new_test_ext().execute_with(|| { + assert_eq!(Balances::free_balance(42), 0); + let s = sig::(&dave(), &42u64.encode(), &[]); + let r = Claims::claim(RuntimeOrigin::none(), 42, s.clone()); + assert_noop!(r, Error::::InvalidStatement); + }); + } + + #[test] + fn add_claim_works() { + new_test_ext().execute_with(|| { + assert_noop!( + Claims::mint_claim(RuntimeOrigin::signed(42), eth(&bob()), 200, None, None), + sp_runtime::traits::BadOrigin, + ); + assert_eq!(Balances::free_balance(42), 0); + assert_noop!( + Claims::claim( + RuntimeOrigin::none(), + 69, + sig::(&bob(), &69u64.encode(), &[][..]) + ), + Error::::SignerHasNoClaim, + ); + assert_ok!(Claims::mint_claim(RuntimeOrigin::root(), eth(&bob()), 200, None, None)); + assert_eq!(claims::Total::::get(), total_claims() + 200); + assert_ok!(Claims::claim( + RuntimeOrigin::none(), + 69, + sig::(&bob(), &69u64.encode(), &[][..]) + )); + assert_eq!(Balances::free_balance(&69), 200); + assert_eq!(Vesting::vesting_balance(&69), None); + assert_eq!(claims::Total::::get(), total_claims()); + }); + } + + #[test] + fn add_claim_with_vesting_works() { + new_test_ext().execute_with(|| { + assert_noop!( + Claims::mint_claim( + RuntimeOrigin::signed(42), + eth(&bob()), + 200, + Some((50, 10, 1)), + None + ), + sp_runtime::traits::BadOrigin, + ); + assert_eq!(Balances::free_balance(42), 0); + assert_noop!( + Claims::claim( + RuntimeOrigin::none(), + 69, + sig::(&bob(), &69u64.encode(), &[][..]) + ), + Error::::SignerHasNoClaim, + ); + assert_ok!(Claims::mint_claim( + RuntimeOrigin::root(), + eth(&bob()), + 200, + Some((50, 10, 1)), + None + )); + assert_ok!(Claims::claim( + RuntimeOrigin::none(), + 69, + sig::(&bob(), &69u64.encode(), &[][..]) + )); + assert_eq!(Balances::free_balance(&69), 200); + assert_eq!(Vesting::vesting_balance(&69), Some(50)); + + // Make sure we can not transfer the vested balance. + assert_err!( + >::transfer( + &69, + &80, + 180, + ExistenceRequirement::AllowDeath + ), + TokenError::Frozen, + ); + }); + } + + #[test] + fn add_claim_with_statement_works() { + new_test_ext().execute_with(|| { + assert_noop!( + Claims::mint_claim( + RuntimeOrigin::signed(42), + eth(&bob()), + 200, + None, + Some(StatementKind::Regular) + ), + sp_runtime::traits::BadOrigin, + ); + assert_eq!(Balances::free_balance(42), 0); + let signature = sig::(&bob(), &69u64.encode(), StatementKind::Regular.to_text()); + assert_noop!( + Claims::claim_attest( + RuntimeOrigin::none(), + 69, + signature.clone(), + StatementKind::Regular.to_text().to_vec() + ), + Error::::SignerHasNoClaim + ); + assert_ok!(Claims::mint_claim( + RuntimeOrigin::root(), + eth(&bob()), + 200, + None, + Some(StatementKind::Regular) + )); + assert_noop!( + Claims::claim_attest(RuntimeOrigin::none(), 69, signature.clone(), vec![],), + Error::::SignerHasNoClaim + ); + assert_ok!(Claims::claim_attest( + RuntimeOrigin::none(), + 69, + signature.clone(), + StatementKind::Regular.to_text().to_vec() + )); + assert_eq!(Balances::free_balance(&69), 200); + }); + } + + #[test] + fn origin_signed_claiming_fail() { + new_test_ext().execute_with(|| { + assert_eq!(Balances::free_balance(42), 0); + assert_err!( + Claims::claim( + RuntimeOrigin::signed(42), + 42, + sig::(&alice(), &42u64.encode(), &[][..]) + ), + sp_runtime::traits::BadOrigin, + ); + }); + } + + #[test] + fn double_claiming_doesnt_work() { + new_test_ext().execute_with(|| { + assert_eq!(Balances::free_balance(42), 0); + assert_ok!(Claims::claim( + RuntimeOrigin::none(), + 42, + sig::(&alice(), &42u64.encode(), &[][..]) + )); + assert_noop!( + Claims::claim( + RuntimeOrigin::none(), + 42, + sig::(&alice(), &42u64.encode(), &[][..]) + ), + Error::::SignerHasNoClaim + ); + }); + } + + #[test] + fn claiming_while_vested_doesnt_work() { + new_test_ext().execute_with(|| { + CurrencyOf::::make_free_balance_be(&69, total_claims()); + assert_eq!(Balances::free_balance(69), total_claims()); + // A user is already vested + assert_ok!(::VestingSchedule::add_vesting_schedule( + &69, + total_claims(), + 100, + 10 + )); + assert_ok!(Claims::mint_claim( + RuntimeOrigin::root(), + eth(&bob()), + 200, + Some((50, 10, 1)), + None + )); + // New total + assert_eq!(claims::Total::::get(), total_claims() + 200); + + // They should not be able to claim + assert_noop!( + Claims::claim( + RuntimeOrigin::none(), + 69, + sig::(&bob(), &69u64.encode(), &[][..]) + ), + Error::::VestedBalanceExists, + ); + }); + } + + #[test] + fn non_sender_sig_doesnt_work() { + new_test_ext().execute_with(|| { + assert_eq!(Balances::free_balance(42), 0); + assert_noop!( + Claims::claim( + RuntimeOrigin::none(), + 42, + sig::(&alice(), &69u64.encode(), &[][..]) + ), + Error::::SignerHasNoClaim + ); + }); + } + + #[test] + fn non_claimant_doesnt_work() { + new_test_ext().execute_with(|| { + assert_eq!(Balances::free_balance(42), 0); + assert_noop!( + Claims::claim( + RuntimeOrigin::none(), + 42, + sig::(&bob(), &69u64.encode(), &[][..]) + ), + Error::::SignerHasNoClaim + ); + }); + } + + #[test] + fn real_eth_sig_works() { + new_test_ext().execute_with(|| { + // "Pay RUSTs to the TEST account:2a00000000000000" + let sig = hex!["444023e89b67e67c0562ed0305d252a5dd12b2af5ac51d6d3cb69a0b486bc4b3191401802dc29d26d586221f7256cd3329fe82174bdf659baea149a40e1c495d1c"]; + let sig = EcdsaSignature(sig); + let who = 42u64.using_encoded(to_ascii_hex); + let signer = Claims::eth_recover(&sig, &who, &[][..]).unwrap(); + assert_eq!(signer.0, hex!["6d31165d5d932d571f3b44695653b46dcc327e84"]); + }); + } + + #[test] + fn validate_unsigned_works() { + use sp_runtime::traits::ValidateUnsigned; + let source = sp_runtime::transaction_validity::TransactionSource::External; + + new_test_ext().execute_with(|| { + assert_eq!( + Pallet::::validate_unsigned( + source, + &ClaimsCall::claim { + dest: 1, + ethereum_signature: sig::(&alice(), &1u64.encode(), &[][..]) + } + ), + Ok(ValidTransaction { + priority: 100, + requires: vec![], + provides: vec![("claims", eth(&alice())).encode()], + longevity: TransactionLongevity::max_value(), + propagate: true, + }) + ); + assert_eq!( + Pallet::::validate_unsigned( + source, + &ClaimsCall::claim { dest: 0, ethereum_signature: EcdsaSignature([0; 65]) } + ), + InvalidTransaction::Custom(ValidityError::InvalidEthereumSignature.into()).into(), + ); + assert_eq!( + Pallet::::validate_unsigned( + source, + &ClaimsCall::claim { + dest: 1, + ethereum_signature: sig::(&bob(), &1u64.encode(), &[][..]) + } + ), + InvalidTransaction::Custom(ValidityError::SignerHasNoClaim.into()).into(), + ); + let s = sig::(&dave(), &1u64.encode(), StatementKind::Regular.to_text()); + let call = ClaimsCall::claim_attest { + dest: 1, + ethereum_signature: s, + statement: StatementKind::Regular.to_text().to_vec(), + }; + assert_eq!( + Pallet::::validate_unsigned(source, &call), + Ok(ValidTransaction { + priority: 100, + requires: vec![], + provides: vec![("claims", eth(&dave())).encode()], + longevity: TransactionLongevity::max_value(), + propagate: true, + }) + ); + assert_eq!( + Pallet::::validate_unsigned( + source, + &ClaimsCall::claim_attest { + dest: 1, + ethereum_signature: EcdsaSignature([0; 65]), + statement: StatementKind::Regular.to_text().to_vec() + } + ), + InvalidTransaction::Custom(ValidityError::InvalidEthereumSignature.into()).into(), + ); + + let s = sig::(&bob(), &1u64.encode(), StatementKind::Regular.to_text()); + let call = ClaimsCall::claim_attest { + dest: 1, + ethereum_signature: s, + statement: StatementKind::Regular.to_text().to_vec(), + }; + assert_eq!( + Pallet::::validate_unsigned(source, &call), + InvalidTransaction::Custom(ValidityError::SignerHasNoClaim.into()).into(), + ); + + let s = sig::(&dave(), &1u64.encode(), StatementKind::Saft.to_text()); + let call = ClaimsCall::claim_attest { + dest: 1, + ethereum_signature: s, + statement: StatementKind::Regular.to_text().to_vec(), + }; + assert_eq!( + Pallet::::validate_unsigned(source, &call), + InvalidTransaction::Custom(ValidityError::SignerHasNoClaim.into()).into(), + ); + + let s = sig::(&dave(), &1u64.encode(), StatementKind::Saft.to_text()); + let call = ClaimsCall::claim_attest { + dest: 1, + ethereum_signature: s, + statement: StatementKind::Saft.to_text().to_vec(), + }; + assert_eq!( + Pallet::::validate_unsigned(source, &call), + InvalidTransaction::Custom(ValidityError::InvalidStatement.into()).into(), + ); + }); + } +} + +#[cfg(feature = "runtime-benchmarks")] +mod benchmarking { + use super::*; + use crate::claims::Call; + use frame_benchmarking::v2::*; + use frame_support::{ + dispatch::{DispatchInfo, GetDispatchInfo}, + traits::UnfilteredDispatchable, + }; + use frame_system::RawOrigin; + use secp_utils::*; + use sp_runtime::{ + traits::{DispatchTransaction, ValidateUnsigned}, + DispatchResult, + }; + + const SEED: u32 = 0; + + const MAX_CLAIMS: u32 = 10_000; + const VALUE: u32 = 1_000_000; + + fn create_claim(input: u32) -> DispatchResult { + let secret_key = libsecp256k1::SecretKey::parse(&keccak_256(&input.encode())).unwrap(); + let eth_address = eth(&secret_key); + let vesting = Some((100_000u32.into(), 1_000u32.into(), 100u32.into())); + super::Pallet::::mint_claim( + RawOrigin::Root.into(), + eth_address, + VALUE.into(), + vesting, + None, + )?; + Ok(()) + } + + fn create_claim_attest(input: u32) -> DispatchResult { + let secret_key = libsecp256k1::SecretKey::parse(&keccak_256(&input.encode())).unwrap(); + let eth_address = eth(&secret_key); + let vesting = Some((100_000u32.into(), 1_000u32.into(), 100u32.into())); + super::Pallet::::mint_claim( + RawOrigin::Root.into(), + eth_address, + VALUE.into(), + vesting, + Some(Default::default()), + )?; + Ok(()) + } + + #[benchmarks( + where + ::RuntimeCall: IsSubType> + From>, + ::RuntimeCall: Dispatchable + GetDispatchInfo, + <::RuntimeCall as Dispatchable>::RuntimeOrigin: AsSystemOriginSigner + AsTransactionAuthorizedOrigin + Clone, + <::RuntimeCall as Dispatchable>::PostInfo: Default, + )] + mod benchmarks { + use super::*; + + // Benchmark `claim` including `validate_unsigned` logic. + #[benchmark] + fn claim() -> Result<(), BenchmarkError> { + let c = MAX_CLAIMS; + for _ in 0..c / 2 { + create_claim::(c)?; + create_claim_attest::(u32::MAX - c)?; + } + let secret_key = libsecp256k1::SecretKey::parse(&keccak_256(&c.encode())).unwrap(); + let eth_address = eth(&secret_key); + let account: T::AccountId = account("user", c, SEED); + let vesting = Some((100_000u32.into(), 1_000u32.into(), 100u32.into())); + let signature = sig::(&secret_key, &account.encode(), &[][..]); + super::Pallet::::mint_claim( + RawOrigin::Root.into(), + eth_address, + VALUE.into(), + vesting, + None, + )?; + assert_eq!(Claims::::get(eth_address), Some(VALUE.into())); + let source = sp_runtime::transaction_validity::TransactionSource::External; + let call_enc = + Call::::claim { dest: account.clone(), ethereum_signature: signature.clone() } + .encode(); + + #[block] + { + let call = as Decode>::decode(&mut &*call_enc) + .expect("call is encoded above, encoding must be correct"); + super::Pallet::::validate_unsigned(source, &call) + .map_err(|e| -> &'static str { e.into() })?; + call.dispatch_bypass_filter(RawOrigin::None.into())?; + } + + assert_eq!(Claims::::get(eth_address), None); + Ok(()) + } + + // Benchmark `mint_claim` when there already exists `c` claims in storage. + #[benchmark] + fn mint_claim() -> Result<(), BenchmarkError> { + let c = MAX_CLAIMS; + for _ in 0..c / 2 { + create_claim::(c)?; + create_claim_attest::(u32::MAX - c)?; + } + let eth_address = account("eth_address", 0, SEED); + let vesting = Some((100_000u32.into(), 1_000u32.into(), 100u32.into())); + let statement = StatementKind::Regular; + + #[extrinsic_call] + _(RawOrigin::Root, eth_address, VALUE.into(), vesting, Some(statement)); + + assert_eq!(Claims::::get(eth_address), Some(VALUE.into())); + Ok(()) + } + + // Benchmark `claim_attest` including `validate_unsigned` logic. + #[benchmark] + fn claim_attest() -> Result<(), BenchmarkError> { + let c = MAX_CLAIMS; + for _ in 0..c / 2 { + create_claim::(c)?; + create_claim_attest::(u32::MAX - c)?; + } + // Crate signature + let attest_c = u32::MAX - c; + let secret_key = + libsecp256k1::SecretKey::parse(&keccak_256(&attest_c.encode())).unwrap(); + let eth_address = eth(&secret_key); + let account: T::AccountId = account("user", c, SEED); + let vesting = Some((100_000u32.into(), 1_000u32.into(), 100u32.into())); + let statement = StatementKind::Regular; + let signature = sig::(&secret_key, &account.encode(), statement.to_text()); + super::Pallet::::mint_claim( + RawOrigin::Root.into(), + eth_address, + VALUE.into(), + vesting, + Some(statement), + )?; + assert_eq!(Claims::::get(eth_address), Some(VALUE.into())); + let call_enc = Call::::claim_attest { + dest: account.clone(), + ethereum_signature: signature.clone(), + statement: StatementKind::Regular.to_text().to_vec(), + } + .encode(); + let source = sp_runtime::transaction_validity::TransactionSource::External; + + #[block] + { + let call = as Decode>::decode(&mut &*call_enc) + .expect("call is encoded above, encoding must be correct"); + super::Pallet::::validate_unsigned(source, &call) + .map_err(|e| -> &'static str { e.into() })?; + call.dispatch_bypass_filter(RawOrigin::None.into())?; + } + + assert_eq!(Claims::::get(eth_address), None); + Ok(()) + } + + // Benchmark `attest` including prevalidate logic. + #[benchmark] + fn attest() -> Result<(), BenchmarkError> { + let c = MAX_CLAIMS; + for _ in 0..c / 2 { + create_claim::(c)?; + create_claim_attest::(u32::MAX - c)?; + } + let attest_c = u32::MAX - c; + let secret_key = + libsecp256k1::SecretKey::parse(&keccak_256(&attest_c.encode())).unwrap(); + let eth_address = eth(&secret_key); + let account: T::AccountId = account("user", c, SEED); + let vesting = Some((100_000u32.into(), 1_000u32.into(), 100u32.into())); + let statement = StatementKind::Regular; + super::Pallet::::mint_claim( + RawOrigin::Root.into(), + eth_address, + VALUE.into(), + vesting, + Some(statement), + )?; + Preclaims::::insert(&account, eth_address); + assert_eq!(Claims::::get(eth_address), Some(VALUE.into())); + + let stmt = StatementKind::Regular.to_text().to_vec(); + + #[extrinsic_call] + _(RawOrigin::Signed(account), stmt); + + assert_eq!(Claims::::get(eth_address), None); + Ok(()) + } + + #[benchmark] + fn move_claim() -> Result<(), BenchmarkError> { + let c = MAX_CLAIMS; + for _ in 0..c / 2 { + create_claim::(c)?; + create_claim_attest::(u32::MAX - c)?; + } + let attest_c = u32::MAX - c; + let secret_key = + libsecp256k1::SecretKey::parse(&keccak_256(&attest_c.encode())).unwrap(); + let eth_address = eth(&secret_key); + + let new_secret_key = + libsecp256k1::SecretKey::parse(&keccak_256(&(u32::MAX / 2).encode())).unwrap(); + let new_eth_address = eth(&new_secret_key); + + let account: T::AccountId = account("user", c, SEED); + Preclaims::::insert(&account, eth_address); + + assert!(Claims::::contains_key(eth_address)); + assert!(!Claims::::contains_key(new_eth_address)); + + #[extrinsic_call] + _(RawOrigin::Root, eth_address, new_eth_address, Some(account)); + + assert!(!Claims::::contains_key(eth_address)); + assert!(Claims::::contains_key(new_eth_address)); + Ok(()) + } + + // Benchmark the time it takes to do `repeat` number of keccak256 hashes + #[benchmark(extra)] + fn keccak256(i: Linear<0, 10_000>) { + let bytes = (i).encode(); + + #[block] + { + for _ in 0..i { + let _hash = keccak_256(&bytes); + } + } + } + + // Benchmark the time it takes to do `repeat` number of `eth_recover` + #[benchmark(extra)] + fn eth_recover(i: Linear<0, 1_000>) { + // Crate signature + let secret_key = libsecp256k1::SecretKey::parse(&keccak_256(&i.encode())).unwrap(); + let account: T::AccountId = account("user", i, SEED); + let signature = sig::(&secret_key, &account.encode(), &[][..]); + let data = account.using_encoded(to_ascii_hex); + let extra = StatementKind::default().to_text(); + + #[block] + { + for _ in 0..i { + assert!(super::Pallet::::eth_recover(&signature, &data, extra).is_some()); + } + } + } + + #[benchmark] + fn prevalidate_attests() -> Result<(), BenchmarkError> { + let c = MAX_CLAIMS; + for _ in 0..c / 2 { + create_claim::(c)?; + create_claim_attest::(u32::MAX - c)?; + } + let ext = PrevalidateAttests::::new(); + let call = super::Call::attest { statement: StatementKind::Regular.to_text().to_vec() }; + let call: ::RuntimeCall = call.into(); + let info = call.get_dispatch_info(); + let attest_c = u32::MAX - c; + let secret_key = + libsecp256k1::SecretKey::parse(&keccak_256(&attest_c.encode())).unwrap(); + let eth_address = eth(&secret_key); + let account: T::AccountId = account("user", c, SEED); + let vesting = Some((100_000u32.into(), 1_000u32.into(), 100u32.into())); + let statement = StatementKind::Regular; + super::Pallet::::mint_claim( + RawOrigin::Root.into(), + eth_address, + VALUE.into(), + vesting, + Some(statement), + )?; + Preclaims::::insert(&account, eth_address); + assert_eq!(Claims::::get(eth_address), Some(VALUE.into())); + + #[block] + { + assert!(ext + .test_run(RawOrigin::Signed(account).into(), &call, &info, 0, |_| { + Ok(Default::default()) + }) + .unwrap() + .is_ok()); + } + + Ok(()) + } + + impl_benchmark_test_suite!( + Pallet, + crate::claims::tests::new_test_ext(), + crate::claims::tests::Test, + ); + } +} diff --git a/polkadot/runtime/common/src/claims/benchmarking.rs b/polkadot/runtime/common/src/claims/benchmarking.rs deleted file mode 100644 index f9150f7980e5..000000000000 --- a/polkadot/runtime/common/src/claims/benchmarking.rs +++ /dev/null @@ -1,318 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -//! Benchmarking for claims pallet - -#[cfg(feature = "runtime-benchmarks")] -use super::*; -use crate::claims::Call; -use frame_benchmarking::v2::*; -use frame_support::{ - dispatch::{DispatchInfo, GetDispatchInfo}, - traits::UnfilteredDispatchable, -}; -use frame_system::RawOrigin; -use secp_utils::*; -use sp_runtime::{ - traits::{DispatchTransaction, ValidateUnsigned}, - DispatchResult, -}; - -const SEED: u32 = 0; - -const MAX_CLAIMS: u32 = 10_000; -const VALUE: u32 = 1_000_000; - -fn create_claim(input: u32) -> DispatchResult { - let secret_key = libsecp256k1::SecretKey::parse(&keccak_256(&input.encode())).unwrap(); - let eth_address = eth(&secret_key); - let vesting = Some((100_000u32.into(), 1_000u32.into(), 100u32.into())); - super::Pallet::::mint_claim( - RawOrigin::Root.into(), - eth_address, - VALUE.into(), - vesting, - None, - )?; - Ok(()) -} - -fn create_claim_attest(input: u32) -> DispatchResult { - let secret_key = libsecp256k1::SecretKey::parse(&keccak_256(&input.encode())).unwrap(); - let eth_address = eth(&secret_key); - let vesting = Some((100_000u32.into(), 1_000u32.into(), 100u32.into())); - super::Pallet::::mint_claim( - RawOrigin::Root.into(), - eth_address, - VALUE.into(), - vesting, - Some(Default::default()), - )?; - Ok(()) -} - -#[benchmarks( - where - ::RuntimeCall: IsSubType> + From>, - ::RuntimeCall: Dispatchable + GetDispatchInfo, - <::RuntimeCall as Dispatchable>::RuntimeOrigin: AsSystemOriginSigner + AsTransactionAuthorizedOrigin + Clone, - <::RuntimeCall as Dispatchable>::PostInfo: Default, - )] -mod benchmarks { - use super::*; - - // Benchmark `claim` including `validate_unsigned` logic. - #[benchmark] - fn claim() -> Result<(), BenchmarkError> { - let c = MAX_CLAIMS; - for _ in 0..c / 2 { - create_claim::(c)?; - create_claim_attest::(u32::MAX - c)?; - } - let secret_key = libsecp256k1::SecretKey::parse(&keccak_256(&c.encode())).unwrap(); - let eth_address = eth(&secret_key); - let account: T::AccountId = account("user", c, SEED); - let vesting = Some((100_000u32.into(), 1_000u32.into(), 100u32.into())); - let signature = sig::(&secret_key, &account.encode(), &[][..]); - super::Pallet::::mint_claim( - RawOrigin::Root.into(), - eth_address, - VALUE.into(), - vesting, - None, - )?; - assert_eq!(Claims::::get(eth_address), Some(VALUE.into())); - let source = sp_runtime::transaction_validity::TransactionSource::External; - let call_enc = - Call::::claim { dest: account.clone(), ethereum_signature: signature.clone() } - .encode(); - - #[block] - { - let call = as Decode>::decode(&mut &*call_enc) - .expect("call is encoded above, encoding must be correct"); - super::Pallet::::validate_unsigned(source, &call) - .map_err(|e| -> &'static str { e.into() })?; - call.dispatch_bypass_filter(RawOrigin::None.into())?; - } - - assert_eq!(Claims::::get(eth_address), None); - Ok(()) - } - - // Benchmark `mint_claim` when there already exists `c` claims in storage. - #[benchmark] - fn mint_claim() -> Result<(), BenchmarkError> { - let c = MAX_CLAIMS; - for _ in 0..c / 2 { - create_claim::(c)?; - create_claim_attest::(u32::MAX - c)?; - } - let eth_address = account("eth_address", 0, SEED); - let vesting = Some((100_000u32.into(), 1_000u32.into(), 100u32.into())); - let statement = StatementKind::Regular; - - #[extrinsic_call] - _(RawOrigin::Root, eth_address, VALUE.into(), vesting, Some(statement)); - - assert_eq!(Claims::::get(eth_address), Some(VALUE.into())); - Ok(()) - } - - // Benchmark `claim_attest` including `validate_unsigned` logic. - #[benchmark] - fn claim_attest() -> Result<(), BenchmarkError> { - let c = MAX_CLAIMS; - for _ in 0..c / 2 { - create_claim::(c)?; - create_claim_attest::(u32::MAX - c)?; - } - // Crate signature - let attest_c = u32::MAX - c; - let secret_key = libsecp256k1::SecretKey::parse(&keccak_256(&attest_c.encode())).unwrap(); - let eth_address = eth(&secret_key); - let account: T::AccountId = account("user", c, SEED); - let vesting = Some((100_000u32.into(), 1_000u32.into(), 100u32.into())); - let statement = StatementKind::Regular; - let signature = sig::(&secret_key, &account.encode(), statement.to_text()); - super::Pallet::::mint_claim( - RawOrigin::Root.into(), - eth_address, - VALUE.into(), - vesting, - Some(statement), - )?; - assert_eq!(Claims::::get(eth_address), Some(VALUE.into())); - let call_enc = Call::::claim_attest { - dest: account.clone(), - ethereum_signature: signature.clone(), - statement: StatementKind::Regular.to_text().to_vec(), - } - .encode(); - let source = sp_runtime::transaction_validity::TransactionSource::External; - - #[block] - { - let call = as Decode>::decode(&mut &*call_enc) - .expect("call is encoded above, encoding must be correct"); - super::Pallet::::validate_unsigned(source, &call) - .map_err(|e| -> &'static str { e.into() })?; - call.dispatch_bypass_filter(RawOrigin::None.into())?; - } - - assert_eq!(Claims::::get(eth_address), None); - Ok(()) - } - - // Benchmark `attest` including prevalidate logic. - #[benchmark] - fn attest() -> Result<(), BenchmarkError> { - let c = MAX_CLAIMS; - for _ in 0..c / 2 { - create_claim::(c)?; - create_claim_attest::(u32::MAX - c)?; - } - let attest_c = u32::MAX - c; - let secret_key = libsecp256k1::SecretKey::parse(&keccak_256(&attest_c.encode())).unwrap(); - let eth_address = eth(&secret_key); - let account: T::AccountId = account("user", c, SEED); - let vesting = Some((100_000u32.into(), 1_000u32.into(), 100u32.into())); - let statement = StatementKind::Regular; - super::Pallet::::mint_claim( - RawOrigin::Root.into(), - eth_address, - VALUE.into(), - vesting, - Some(statement), - )?; - Preclaims::::insert(&account, eth_address); - assert_eq!(Claims::::get(eth_address), Some(VALUE.into())); - - let stmt = StatementKind::Regular.to_text().to_vec(); - - #[extrinsic_call] - _(RawOrigin::Signed(account), stmt); - - assert_eq!(Claims::::get(eth_address), None); - Ok(()) - } - - #[benchmark] - fn move_claim() -> Result<(), BenchmarkError> { - let c = MAX_CLAIMS; - for _ in 0..c / 2 { - create_claim::(c)?; - create_claim_attest::(u32::MAX - c)?; - } - let attest_c = u32::MAX - c; - let secret_key = libsecp256k1::SecretKey::parse(&keccak_256(&attest_c.encode())).unwrap(); - let eth_address = eth(&secret_key); - - let new_secret_key = - libsecp256k1::SecretKey::parse(&keccak_256(&(u32::MAX / 2).encode())).unwrap(); - let new_eth_address = eth(&new_secret_key); - - let account: T::AccountId = account("user", c, SEED); - Preclaims::::insert(&account, eth_address); - - assert!(Claims::::contains_key(eth_address)); - assert!(!Claims::::contains_key(new_eth_address)); - - #[extrinsic_call] - _(RawOrigin::Root, eth_address, new_eth_address, Some(account)); - - assert!(!Claims::::contains_key(eth_address)); - assert!(Claims::::contains_key(new_eth_address)); - Ok(()) - } - - // Benchmark the time it takes to do `repeat` number of keccak256 hashes - #[benchmark(extra)] - fn keccak256(i: Linear<0, 10_000>) { - let bytes = (i).encode(); - - #[block] - { - for _ in 0..i { - let _hash = keccak_256(&bytes); - } - } - } - - // Benchmark the time it takes to do `repeat` number of `eth_recover` - #[benchmark(extra)] - fn eth_recover(i: Linear<0, 1_000>) { - // Crate signature - let secret_key = libsecp256k1::SecretKey::parse(&keccak_256(&i.encode())).unwrap(); - let account: T::AccountId = account("user", i, SEED); - let signature = sig::(&secret_key, &account.encode(), &[][..]); - let data = account.using_encoded(to_ascii_hex); - let extra = StatementKind::default().to_text(); - - #[block] - { - for _ in 0..i { - assert!(super::Pallet::::eth_recover(&signature, &data, extra).is_some()); - } - } - } - - #[benchmark] - fn prevalidate_attests() -> Result<(), BenchmarkError> { - let c = MAX_CLAIMS; - for _ in 0..c / 2 { - create_claim::(c)?; - create_claim_attest::(u32::MAX - c)?; - } - let ext = PrevalidateAttests::::new(); - let call = super::Call::attest { statement: StatementKind::Regular.to_text().to_vec() }; - let call: ::RuntimeCall = call.into(); - let info = call.get_dispatch_info(); - let attest_c = u32::MAX - c; - let secret_key = libsecp256k1::SecretKey::parse(&keccak_256(&attest_c.encode())).unwrap(); - let eth_address = eth(&secret_key); - let account: T::AccountId = account("user", c, SEED); - let vesting = Some((100_000u32.into(), 1_000u32.into(), 100u32.into())); - let statement = StatementKind::Regular; - super::Pallet::::mint_claim( - RawOrigin::Root.into(), - eth_address, - VALUE.into(), - vesting, - Some(statement), - )?; - Preclaims::::insert(&account, eth_address); - assert_eq!(Claims::::get(eth_address), Some(VALUE.into())); - - #[block] - { - assert!(ext - .test_run(RawOrigin::Signed(account).into(), &call, &info, 0, 0, |_| { - Ok(Default::default()) - }) - .unwrap() - .is_ok()); - } - - Ok(()) - } - - impl_benchmark_test_suite!( - Pallet, - crate::claims::mock::new_test_ext(), - crate::claims::mock::Test, - ); -} diff --git a/polkadot/runtime/common/src/claims/mock.rs b/polkadot/runtime/common/src/claims/mock.rs deleted file mode 100644 index 640df6ec6a8a..000000000000 --- a/polkadot/runtime/common/src/claims/mock.rs +++ /dev/null @@ -1,129 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -//! Mocking utilities for testing in claims pallet. - -#[cfg(test)] -use super::*; -use secp_utils::*; - -// The testing primitives are very useful for avoiding having to work with signatures -// or public keys. `u64` is used as the `AccountId` and no `Signature`s are required. -use crate::claims; -use frame_support::{derive_impl, ord_parameter_types, parameter_types, traits::WithdrawReasons}; -use pallet_balances; -use sp_runtime::{traits::Identity, BuildStorage}; - -type Block = frame_system::mocking::MockBlock; - -frame_support::construct_runtime!( - pub enum Test - { - System: frame_system, - Balances: pallet_balances, - Vesting: pallet_vesting, - Claims: claims, - } -); - -#[derive_impl(frame_system::config_preludes::TestDefaultConfig)] -impl frame_system::Config for Test { - type RuntimeOrigin = RuntimeOrigin; - type RuntimeCall = RuntimeCall; - type Block = Block; - type RuntimeEvent = RuntimeEvent; - type AccountData = pallet_balances::AccountData; - type MaxConsumers = frame_support::traits::ConstU32<16>; -} - -#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] -impl pallet_balances::Config for Test { - type AccountStore = System; -} - -parameter_types! { - pub const MinVestedTransfer: u64 = 1; - pub UnvestedFundsAllowedWithdrawReasons: WithdrawReasons = - WithdrawReasons::except(WithdrawReasons::TRANSFER | WithdrawReasons::RESERVE); -} - -impl pallet_vesting::Config for Test { - type RuntimeEvent = RuntimeEvent; - type Currency = Balances; - type BlockNumberToBalance = Identity; - type MinVestedTransfer = MinVestedTransfer; - type WeightInfo = (); - type UnvestedFundsAllowedWithdrawReasons = UnvestedFundsAllowedWithdrawReasons; - type BlockNumberProvider = System; - const MAX_VESTING_SCHEDULES: u32 = 28; -} - -parameter_types! { - pub Prefix: &'static [u8] = b"Pay RUSTs to the TEST account:"; -} -ord_parameter_types! { - pub const Six: u64 = 6; -} - -impl Config for Test { - type RuntimeEvent = RuntimeEvent; - type VestingSchedule = Vesting; - type Prefix = Prefix; - type MoveClaimOrigin = frame_system::EnsureSignedBy; - type WeightInfo = TestWeightInfo; -} - -pub fn alice() -> libsecp256k1::SecretKey { - libsecp256k1::SecretKey::parse(&keccak_256(b"Alice")).unwrap() -} -pub fn bob() -> libsecp256k1::SecretKey { - libsecp256k1::SecretKey::parse(&keccak_256(b"Bob")).unwrap() -} -pub fn dave() -> libsecp256k1::SecretKey { - libsecp256k1::SecretKey::parse(&keccak_256(b"Dave")).unwrap() -} -pub fn eve() -> libsecp256k1::SecretKey { - libsecp256k1::SecretKey::parse(&keccak_256(b"Eve")).unwrap() -} -pub fn frank() -> libsecp256k1::SecretKey { - libsecp256k1::SecretKey::parse(&keccak_256(b"Frank")).unwrap() -} - -// This function basically just builds a genesis storage key/value store according to -// our desired mockup. -pub fn new_test_ext() -> sp_io::TestExternalities { - let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); - // We use default for brevity, but you can configure as desired if needed. - pallet_balances::GenesisConfig::::default() - .assimilate_storage(&mut t) - .unwrap(); - claims::GenesisConfig:: { - claims: vec![ - (eth(&alice()), 100, None, None), - (eth(&dave()), 200, None, Some(StatementKind::Regular)), - (eth(&eve()), 300, Some(42), Some(StatementKind::Saft)), - (eth(&frank()), 400, Some(43), None), - ], - vesting: vec![(eth(&alice()), (50, 10, 1))], - } - .assimilate_storage(&mut t) - .unwrap(); - t.into() -} - -pub fn total_claims() -> u64 { - 100 + 200 + 300 + 400 -} diff --git a/polkadot/runtime/common/src/claims/mod.rs b/polkadot/runtime/common/src/claims/mod.rs deleted file mode 100644 index f48e40ee1887..000000000000 --- a/polkadot/runtime/common/src/claims/mod.rs +++ /dev/null @@ -1,723 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -//! Pallet to process claims from Ethereum addresses. - -#[cfg(not(feature = "std"))] -use alloc::{format, string::String}; -use alloc::{vec, vec::Vec}; -use codec::{Decode, Encode, MaxEncodedLen}; -use core::fmt::Debug; -use frame_support::{ - ensure, - traits::{Currency, Get, IsSubType, VestingSchedule}, - weights::Weight, - DefaultNoBound, -}; -pub use pallet::*; -use polkadot_primitives::ValidityError; -use scale_info::TypeInfo; -use serde::{self, Deserialize, Deserializer, Serialize, Serializer}; -use sp_io::{crypto::secp256k1_ecdsa_recover, hashing::keccak_256}; -use sp_runtime::{ - impl_tx_ext_default, - traits::{ - AsSystemOriginSigner, AsTransactionAuthorizedOrigin, CheckedSub, DispatchInfoOf, - Dispatchable, TransactionExtension, Zero, - }, - transaction_validity::{ - InvalidTransaction, TransactionSource, TransactionValidity, TransactionValidityError, - ValidTransaction, - }, - RuntimeDebug, -}; - -type CurrencyOf = <::VestingSchedule as VestingSchedule< - ::AccountId, ->>::Currency; -type BalanceOf = as Currency<::AccountId>>::Balance; - -pub trait WeightInfo { - fn claim() -> Weight; - fn mint_claim() -> Weight; - fn claim_attest() -> Weight; - fn attest() -> Weight; - fn move_claim() -> Weight; - fn prevalidate_attests() -> Weight; -} - -pub struct TestWeightInfo; -impl WeightInfo for TestWeightInfo { - fn claim() -> Weight { - Weight::zero() - } - fn mint_claim() -> Weight { - Weight::zero() - } - fn claim_attest() -> Weight { - Weight::zero() - } - fn attest() -> Weight { - Weight::zero() - } - fn move_claim() -> Weight { - Weight::zero() - } - fn prevalidate_attests() -> Weight { - Weight::zero() - } -} - -/// The kind of statement an account needs to make for a claim to be valid. -#[derive( - Encode, - Decode, - Clone, - Copy, - Eq, - PartialEq, - RuntimeDebug, - TypeInfo, - Serialize, - Deserialize, - MaxEncodedLen, -)] -pub enum StatementKind { - /// Statement required to be made by non-SAFT holders. - Regular, - /// Statement required to be made by SAFT holders. - Saft, -} - -impl StatementKind { - /// Convert this to the (English) statement it represents. - fn to_text(self) -> &'static [u8] { - match self { - StatementKind::Regular => - &b"I hereby agree to the terms of the statement whose SHA-256 multihash is \ - Qmc1XYqT6S39WNp2UeiRUrZichUWUPpGEThDE6dAb3f6Ny. (This may be found at the URL: \ - https://statement.polkadot.network/regular.html)"[..], - StatementKind::Saft => - &b"I hereby agree to the terms of the statement whose SHA-256 multihash is \ - QmXEkMahfhHJPzT3RjkXiZVFi77ZeVeuxtAjhojGRNYckz. (This may be found at the URL: \ - https://statement.polkadot.network/saft.html)"[..], - } - } -} - -impl Default for StatementKind { - fn default() -> Self { - StatementKind::Regular - } -} - -/// An Ethereum address (i.e. 20 bytes, used to represent an Ethereum account). -/// -/// This gets serialized to the 0x-prefixed hex representation. -#[derive( - Clone, Copy, PartialEq, Eq, Encode, Decode, Default, RuntimeDebug, TypeInfo, MaxEncodedLen, -)] -pub struct EthereumAddress([u8; 20]); - -impl Serialize for EthereumAddress { - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - let hex: String = rustc_hex::ToHex::to_hex(&self.0[..]); - serializer.serialize_str(&format!("0x{}", hex)) - } -} - -impl<'de> Deserialize<'de> for EthereumAddress { - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - let base_string = String::deserialize(deserializer)?; - let offset = if base_string.starts_with("0x") { 2 } else { 0 }; - let s = &base_string[offset..]; - if s.len() != 40 { - Err(serde::de::Error::custom( - "Bad length of Ethereum address (should be 42 including '0x')", - ))?; - } - let raw: Vec = rustc_hex::FromHex::from_hex(s) - .map_err(|e| serde::de::Error::custom(format!("{:?}", e)))?; - let mut r = Self::default(); - r.0.copy_from_slice(&raw); - Ok(r) - } -} - -#[derive(Encode, Decode, Clone, TypeInfo, MaxEncodedLen)] -pub struct EcdsaSignature(pub [u8; 65]); - -impl PartialEq for EcdsaSignature { - fn eq(&self, other: &Self) -> bool { - &self.0[..] == &other.0[..] - } -} - -impl core::fmt::Debug for EcdsaSignature { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - write!(f, "EcdsaSignature({:?})", &self.0[..]) - } -} - -#[frame_support::pallet] -pub mod pallet { - use super::*; - use frame_support::pallet_prelude::*; - use frame_system::pallet_prelude::*; - - #[pallet::pallet] - pub struct Pallet(_); - - /// Configuration trait. - #[pallet::config] - pub trait Config: frame_system::Config { - /// The overarching event type. - type RuntimeEvent: From> + IsType<::RuntimeEvent>; - type VestingSchedule: VestingSchedule>; - #[pallet::constant] - type Prefix: Get<&'static [u8]>; - type MoveClaimOrigin: EnsureOrigin; - type WeightInfo: WeightInfo; - } - - #[pallet::event] - #[pallet::generate_deposit(pub(super) fn deposit_event)] - pub enum Event { - /// Someone claimed some DOTs. - Claimed { who: T::AccountId, ethereum_address: EthereumAddress, amount: BalanceOf }, - } - - #[pallet::error] - pub enum Error { - /// Invalid Ethereum signature. - InvalidEthereumSignature, - /// Ethereum address has no claim. - SignerHasNoClaim, - /// Account ID sending transaction has no claim. - SenderHasNoClaim, - /// There's not enough in the pot to pay out some unvested amount. Generally implies a - /// logic error. - PotUnderflow, - /// A needed statement was not included. - InvalidStatement, - /// The account already has a vested balance. - VestedBalanceExists, - } - - #[pallet::storage] - pub type Claims = StorageMap<_, Identity, EthereumAddress, BalanceOf>; - - #[pallet::storage] - pub type Total = StorageValue<_, BalanceOf, ValueQuery>; - - /// Vesting schedule for a claim. - /// First balance is the total amount that should be held for vesting. - /// Second balance is how much should be unlocked per block. - /// The block number is when the vesting should start. - #[pallet::storage] - pub type Vesting = - StorageMap<_, Identity, EthereumAddress, (BalanceOf, BalanceOf, BlockNumberFor)>; - - /// The statement kind that must be signed, if any. - #[pallet::storage] - pub(super) type Signing = StorageMap<_, Identity, EthereumAddress, StatementKind>; - - /// Pre-claimed Ethereum accounts, by the Account ID that they are claimed to. - #[pallet::storage] - pub(super) type Preclaims = StorageMap<_, Identity, T::AccountId, EthereumAddress>; - - #[pallet::genesis_config] - #[derive(DefaultNoBound)] - pub struct GenesisConfig { - pub claims: - Vec<(EthereumAddress, BalanceOf, Option, Option)>, - pub vesting: Vec<(EthereumAddress, (BalanceOf, BalanceOf, BlockNumberFor))>, - } - - #[pallet::genesis_build] - impl BuildGenesisConfig for GenesisConfig { - fn build(&self) { - // build `Claims` - self.claims.iter().map(|(a, b, _, _)| (*a, *b)).for_each(|(a, b)| { - Claims::::insert(a, b); - }); - // build `Total` - Total::::put( - self.claims - .iter() - .fold(Zero::zero(), |acc: BalanceOf, &(_, b, _, _)| acc + b), - ); - // build `Vesting` - self.vesting.iter().for_each(|(k, v)| { - Vesting::::insert(k, v); - }); - // build `Signing` - self.claims - .iter() - .filter_map(|(a, _, _, s)| Some((*a, (*s)?))) - .for_each(|(a, s)| { - Signing::::insert(a, s); - }); - // build `Preclaims` - self.claims.iter().filter_map(|(a, _, i, _)| Some((i.clone()?, *a))).for_each( - |(i, a)| { - Preclaims::::insert(i, a); - }, - ); - } - } - - #[pallet::hooks] - impl Hooks> for Pallet {} - - #[pallet::call] - impl Pallet { - /// Make a claim to collect your DOTs. - /// - /// The dispatch origin for this call must be _None_. - /// - /// Unsigned Validation: - /// A call to claim is deemed valid if the signature provided matches - /// the expected signed message of: - /// - /// > Ethereum Signed Message: - /// > (configured prefix string)(address) - /// - /// and `address` matches the `dest` account. - /// - /// Parameters: - /// - `dest`: The destination account to payout the claim. - /// - `ethereum_signature`: The signature of an ethereum signed message matching the format - /// described above. - /// - /// - /// The weight of this call is invariant over the input parameters. - /// Weight includes logic to validate unsigned `claim` call. - /// - /// Total Complexity: O(1) - /// - #[pallet::call_index(0)] - #[pallet::weight(T::WeightInfo::claim())] - pub fn claim( - origin: OriginFor, - dest: T::AccountId, - ethereum_signature: EcdsaSignature, - ) -> DispatchResult { - ensure_none(origin)?; - - let data = dest.using_encoded(to_ascii_hex); - let signer = Self::eth_recover(ðereum_signature, &data, &[][..]) - .ok_or(Error::::InvalidEthereumSignature)?; - ensure!(Signing::::get(&signer).is_none(), Error::::InvalidStatement); - - Self::process_claim(signer, dest)?; - Ok(()) - } - - /// Mint a new claim to collect DOTs. - /// - /// The dispatch origin for this call must be _Root_. - /// - /// Parameters: - /// - `who`: The Ethereum address allowed to collect this claim. - /// - `value`: The number of DOTs that will be claimed. - /// - `vesting_schedule`: An optional vesting schedule for these DOTs. - /// - /// - /// The weight of this call is invariant over the input parameters. - /// We assume worst case that both vesting and statement is being inserted. - /// - /// Total Complexity: O(1) - /// - #[pallet::call_index(1)] - #[pallet::weight(T::WeightInfo::mint_claim())] - pub fn mint_claim( - origin: OriginFor, - who: EthereumAddress, - value: BalanceOf, - vesting_schedule: Option<(BalanceOf, BalanceOf, BlockNumberFor)>, - statement: Option, - ) -> DispatchResult { - ensure_root(origin)?; - - Total::::mutate(|t| *t += value); - Claims::::insert(who, value); - if let Some(vs) = vesting_schedule { - Vesting::::insert(who, vs); - } - if let Some(s) = statement { - Signing::::insert(who, s); - } - Ok(()) - } - - /// Make a claim to collect your DOTs by signing a statement. - /// - /// The dispatch origin for this call must be _None_. - /// - /// Unsigned Validation: - /// A call to `claim_attest` is deemed valid if the signature provided matches - /// the expected signed message of: - /// - /// > Ethereum Signed Message: - /// > (configured prefix string)(address)(statement) - /// - /// and `address` matches the `dest` account; the `statement` must match that which is - /// expected according to your purchase arrangement. - /// - /// Parameters: - /// - `dest`: The destination account to payout the claim. - /// - `ethereum_signature`: The signature of an ethereum signed message matching the format - /// described above. - /// - `statement`: The identity of the statement which is being attested to in the - /// signature. - /// - /// - /// The weight of this call is invariant over the input parameters. - /// Weight includes logic to validate unsigned `claim_attest` call. - /// - /// Total Complexity: O(1) - /// - #[pallet::call_index(2)] - #[pallet::weight(T::WeightInfo::claim_attest())] - pub fn claim_attest( - origin: OriginFor, - dest: T::AccountId, - ethereum_signature: EcdsaSignature, - statement: Vec, - ) -> DispatchResult { - ensure_none(origin)?; - - let data = dest.using_encoded(to_ascii_hex); - let signer = Self::eth_recover(ðereum_signature, &data, &statement) - .ok_or(Error::::InvalidEthereumSignature)?; - if let Some(s) = Signing::::get(signer) { - ensure!(s.to_text() == &statement[..], Error::::InvalidStatement); - } - Self::process_claim(signer, dest)?; - Ok(()) - } - - /// Attest to a statement, needed to finalize the claims process. - /// - /// WARNING: Insecure unless your chain includes `PrevalidateAttests` as a - /// `TransactionExtension`. - /// - /// Unsigned Validation: - /// A call to attest is deemed valid if the sender has a `Preclaim` registered - /// and provides a `statement` which is expected for the account. - /// - /// Parameters: - /// - `statement`: The identity of the statement which is being attested to in the - /// signature. - /// - /// - /// The weight of this call is invariant over the input parameters. - /// Weight includes logic to do pre-validation on `attest` call. - /// - /// Total Complexity: O(1) - /// - #[pallet::call_index(3)] - #[pallet::weight(( - T::WeightInfo::attest(), - DispatchClass::Normal, - Pays::No - ))] - pub fn attest(origin: OriginFor, statement: Vec) -> DispatchResult { - let who = ensure_signed(origin)?; - let signer = Preclaims::::get(&who).ok_or(Error::::SenderHasNoClaim)?; - if let Some(s) = Signing::::get(signer) { - ensure!(s.to_text() == &statement[..], Error::::InvalidStatement); - } - Self::process_claim(signer, who.clone())?; - Preclaims::::remove(&who); - Ok(()) - } - - #[pallet::call_index(4)] - #[pallet::weight(T::WeightInfo::move_claim())] - pub fn move_claim( - origin: OriginFor, - old: EthereumAddress, - new: EthereumAddress, - maybe_preclaim: Option, - ) -> DispatchResultWithPostInfo { - T::MoveClaimOrigin::try_origin(origin).map(|_| ()).or_else(ensure_root)?; - - Claims::::take(&old).map(|c| Claims::::insert(&new, c)); - Vesting::::take(&old).map(|c| Vesting::::insert(&new, c)); - Signing::::take(&old).map(|c| Signing::::insert(&new, c)); - maybe_preclaim.map(|preclaim| { - Preclaims::::mutate(&preclaim, |maybe_o| { - if maybe_o.as_ref().map_or(false, |o| o == &old) { - *maybe_o = Some(new) - } - }) - }); - Ok(Pays::No.into()) - } - } - - #[pallet::validate_unsigned] - impl ValidateUnsigned for Pallet { - type Call = Call; - - fn validate_unsigned(_source: TransactionSource, call: &Self::Call) -> TransactionValidity { - const PRIORITY: u64 = 100; - - let (maybe_signer, maybe_statement) = match call { - // - // The weight of this logic is included in the `claim` dispatchable. - // - Call::claim { dest: account, ethereum_signature } => { - let data = account.using_encoded(to_ascii_hex); - (Self::eth_recover(ðereum_signature, &data, &[][..]), None) - }, - // - // The weight of this logic is included in the `claim_attest` dispatchable. - // - Call::claim_attest { dest: account, ethereum_signature, statement } => { - let data = account.using_encoded(to_ascii_hex); - ( - Self::eth_recover(ðereum_signature, &data, &statement), - Some(statement.as_slice()), - ) - }, - _ => return Err(InvalidTransaction::Call.into()), - }; - - let signer = maybe_signer.ok_or(InvalidTransaction::Custom( - ValidityError::InvalidEthereumSignature.into(), - ))?; - - let e = InvalidTransaction::Custom(ValidityError::SignerHasNoClaim.into()); - ensure!(Claims::::contains_key(&signer), e); - - let e = InvalidTransaction::Custom(ValidityError::InvalidStatement.into()); - match Signing::::get(signer) { - None => ensure!(maybe_statement.is_none(), e), - Some(s) => ensure!(Some(s.to_text()) == maybe_statement, e), - } - - Ok(ValidTransaction { - priority: PRIORITY, - requires: vec![], - provides: vec![("claims", signer).encode()], - longevity: TransactionLongevity::max_value(), - propagate: true, - }) - } - } -} - -/// Converts the given binary data into ASCII-encoded hex. It will be twice the length. -fn to_ascii_hex(data: &[u8]) -> Vec { - let mut r = Vec::with_capacity(data.len() * 2); - let mut push_nibble = |n| r.push(if n < 10 { b'0' + n } else { b'a' - 10 + n }); - for &b in data.iter() { - push_nibble(b / 16); - push_nibble(b % 16); - } - r -} - -impl Pallet { - // Constructs the message that Ethereum RPC's `personal_sign` and `eth_sign` would sign. - fn ethereum_signable_message(what: &[u8], extra: &[u8]) -> Vec { - let prefix = T::Prefix::get(); - let mut l = prefix.len() + what.len() + extra.len(); - let mut rev = Vec::new(); - while l > 0 { - rev.push(b'0' + (l % 10) as u8); - l /= 10; - } - let mut v = b"\x19Ethereum Signed Message:\n".to_vec(); - v.extend(rev.into_iter().rev()); - v.extend_from_slice(prefix); - v.extend_from_slice(what); - v.extend_from_slice(extra); - v - } - - // Attempts to recover the Ethereum address from a message signature signed by using - // the Ethereum RPC's `personal_sign` and `eth_sign`. - fn eth_recover(s: &EcdsaSignature, what: &[u8], extra: &[u8]) -> Option { - let msg = keccak_256(&Self::ethereum_signable_message(what, extra)); - let mut res = EthereumAddress::default(); - res.0 - .copy_from_slice(&keccak_256(&secp256k1_ecdsa_recover(&s.0, &msg).ok()?[..])[12..]); - Some(res) - } - - fn process_claim(signer: EthereumAddress, dest: T::AccountId) -> sp_runtime::DispatchResult { - let balance_due = Claims::::get(&signer).ok_or(Error::::SignerHasNoClaim)?; - - let new_total = - Total::::get().checked_sub(&balance_due).ok_or(Error::::PotUnderflow)?; - - let vesting = Vesting::::get(&signer); - if vesting.is_some() && T::VestingSchedule::vesting_balance(&dest).is_some() { - return Err(Error::::VestedBalanceExists.into()) - } - - // We first need to deposit the balance to ensure that the account exists. - let _ = CurrencyOf::::deposit_creating(&dest, balance_due); - - // Check if this claim should have a vesting schedule. - if let Some(vs) = vesting { - // This can only fail if the account already has a vesting schedule, - // but this is checked above. - T::VestingSchedule::add_vesting_schedule(&dest, vs.0, vs.1, vs.2) - .expect("No other vesting schedule exists, as checked above; qed"); - } - - Total::::put(new_total); - Claims::::remove(&signer); - Vesting::::remove(&signer); - Signing::::remove(&signer); - - // Let's deposit an event to let the outside world know this happened. - Self::deposit_event(Event::::Claimed { - who: dest, - ethereum_address: signer, - amount: balance_due, - }); - - Ok(()) - } -} - -/// Validate `attest` calls prior to execution. Needed to avoid a DoS attack since they are -/// otherwise free to place on chain. -#[derive(Encode, Decode, Clone, Eq, PartialEq, TypeInfo)] -#[scale_info(skip_type_params(T))] -pub struct PrevalidateAttests(core::marker::PhantomData); - -impl Debug for PrevalidateAttests -where - ::RuntimeCall: IsSubType>, -{ - #[cfg(feature = "std")] - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - write!(f, "PrevalidateAttests") - } - - #[cfg(not(feature = "std"))] - fn fmt(&self, _: &mut core::fmt::Formatter) -> core::fmt::Result { - Ok(()) - } -} - -impl PrevalidateAttests -where - ::RuntimeCall: IsSubType>, -{ - /// Create new `TransactionExtension` to check runtime version. - pub fn new() -> Self { - Self(core::marker::PhantomData) - } -} - -impl TransactionExtension for PrevalidateAttests -where - ::RuntimeCall: IsSubType>, - <::RuntimeCall as Dispatchable>::RuntimeOrigin: - AsSystemOriginSigner + AsTransactionAuthorizedOrigin + Clone, -{ - const IDENTIFIER: &'static str = "PrevalidateAttests"; - type Implicit = (); - type Pre = (); - type Val = (); - - fn weight(&self, call: &T::RuntimeCall) -> Weight { - if let Some(Call::attest { .. }) = call.is_sub_type() { - T::WeightInfo::prevalidate_attests() - } else { - Weight::zero() - } - } - - fn validate( - &self, - origin: ::RuntimeOrigin, - call: &T::RuntimeCall, - _info: &DispatchInfoOf, - _len: usize, - _self_implicit: Self::Implicit, - _inherited_implication: &impl Encode, - _source: TransactionSource, - ) -> Result< - (ValidTransaction, Self::Val, ::RuntimeOrigin), - TransactionValidityError, - > { - if let Some(Call::attest { statement: attested_statement }) = call.is_sub_type() { - let who = origin.as_system_origin_signer().ok_or(InvalidTransaction::BadSigner)?; - let signer = Preclaims::::get(who) - .ok_or(InvalidTransaction::Custom(ValidityError::SignerHasNoClaim.into()))?; - if let Some(s) = Signing::::get(signer) { - let e = InvalidTransaction::Custom(ValidityError::InvalidStatement.into()); - ensure!(&attested_statement[..] == s.to_text(), e); - } - } - Ok((ValidTransaction::default(), (), origin)) - } - - impl_tx_ext_default!(T::RuntimeCall; prepare); -} - -#[cfg(any(test, feature = "runtime-benchmarks"))] -mod secp_utils { - use super::*; - - pub fn public(secret: &libsecp256k1::SecretKey) -> libsecp256k1::PublicKey { - libsecp256k1::PublicKey::from_secret_key(secret) - } - pub fn eth(secret: &libsecp256k1::SecretKey) -> EthereumAddress { - let mut res = EthereumAddress::default(); - res.0.copy_from_slice(&keccak_256(&public(secret).serialize()[1..65])[12..]); - res - } - pub fn sig( - secret: &libsecp256k1::SecretKey, - what: &[u8], - extra: &[u8], - ) -> EcdsaSignature { - let msg = keccak_256(&super::Pallet::::ethereum_signable_message( - &to_ascii_hex(what)[..], - extra, - )); - let (sig, recovery_id) = libsecp256k1::sign(&libsecp256k1::Message::parse(&msg), secret); - let mut r = [0u8; 65]; - r[0..64].copy_from_slice(&sig.serialize()[..]); - r[64] = recovery_id.serialize(); - EcdsaSignature(r) - } -} - -#[cfg(test)] -mod mock; - -#[cfg(test)] -mod tests; - -#[cfg(feature = "runtime-benchmarks")] -mod benchmarking; diff --git a/polkadot/runtime/common/src/claims/tests.rs b/polkadot/runtime/common/src/claims/tests.rs deleted file mode 100644 index dff2623cb934..000000000000 --- a/polkadot/runtime/common/src/claims/tests.rs +++ /dev/null @@ -1,666 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -//! Tests for the claims pallet. - -#[cfg(test)] -use super::*; -use crate::{claims, claims::mock::*}; -use claims::Call as ClaimsCall; -use hex_literal::hex; -use secp_utils::*; -use sp_runtime::transaction_validity::TransactionSource::External; - -use codec::Encode; -// The testing primitives are very useful for avoiding having to work with signatures -// or public keys. `u64` is used as the `AccountId` and no `Signature`s are required. -use frame_support::{ - assert_err, assert_noop, assert_ok, - dispatch::{GetDispatchInfo, Pays}, - traits::ExistenceRequirement, -}; -use sp_runtime::{ - traits::DispatchTransaction, transaction_validity::TransactionLongevity, - DispatchError::BadOrigin, TokenError, -}; - -#[test] -fn basic_setup_works() { - new_test_ext().execute_with(|| { - assert_eq!(claims::Total::::get(), total_claims()); - assert_eq!(claims::Claims::::get(ð(&alice())), Some(100)); - assert_eq!(claims::Claims::::get(ð(&dave())), Some(200)); - assert_eq!(claims::Claims::::get(ð(&eve())), Some(300)); - assert_eq!(claims::Claims::::get(ð(&frank())), Some(400)); - assert_eq!(claims::Claims::::get(&EthereumAddress::default()), None); - assert_eq!(claims::Vesting::::get(ð(&alice())), Some((50, 10, 1))); - }); -} - -#[test] -fn serde_works() { - let x = EthereumAddress(hex!["0123456789abcdef0123456789abcdef01234567"]); - let y = serde_json::to_string(&x).unwrap(); - assert_eq!(y, "\"0x0123456789abcdef0123456789abcdef01234567\""); - let z: EthereumAddress = serde_json::from_str(&y).unwrap(); - assert_eq!(x, z); -} - -#[test] -fn claiming_works() { - new_test_ext().execute_with(|| { - assert_eq!(Balances::free_balance(42), 0); - assert_ok!(claims::mock::Claims::claim( - RuntimeOrigin::none(), - 42, - sig::(&alice(), &42u64.encode(), &[][..]) - )); - assert_eq!(Balances::free_balance(&42), 100); - assert_eq!(claims::mock::Vesting::vesting_balance(&42), Some(50)); - assert_eq!(claims::Total::::get(), total_claims() - 100); - }); -} - -#[test] -fn basic_claim_moving_works() { - new_test_ext().execute_with(|| { - assert_eq!(Balances::free_balance(42), 0); - assert_noop!( - claims::mock::Claims::move_claim( - RuntimeOrigin::signed(1), - eth(&alice()), - eth(&bob()), - None - ), - BadOrigin - ); - assert_ok!(claims::mock::Claims::move_claim( - RuntimeOrigin::signed(6), - eth(&alice()), - eth(&bob()), - None - )); - assert_noop!( - claims::mock::Claims::claim( - RuntimeOrigin::none(), - 42, - sig::(&alice(), &42u64.encode(), &[][..]) - ), - Error::::SignerHasNoClaim - ); - assert_ok!(claims::mock::Claims::claim( - RuntimeOrigin::none(), - 42, - sig::(&bob(), &42u64.encode(), &[][..]) - )); - assert_eq!(Balances::free_balance(&42), 100); - assert_eq!(claims::mock::Vesting::vesting_balance(&42), Some(50)); - assert_eq!(claims::Total::::get(), total_claims() - 100); - }); -} - -#[test] -fn claim_attest_moving_works() { - new_test_ext().execute_with(|| { - assert_ok!(claims::mock::Claims::move_claim( - RuntimeOrigin::signed(6), - eth(&dave()), - eth(&bob()), - None - )); - let s = sig::(&bob(), &42u64.encode(), StatementKind::Regular.to_text()); - assert_ok!(claims::mock::Claims::claim_attest( - RuntimeOrigin::none(), - 42, - s, - StatementKind::Regular.to_text().to_vec() - )); - assert_eq!(Balances::free_balance(&42), 200); - }); -} - -#[test] -fn attest_moving_works() { - new_test_ext().execute_with(|| { - assert_ok!(claims::mock::Claims::move_claim( - RuntimeOrigin::signed(6), - eth(&eve()), - eth(&bob()), - Some(42) - )); - assert_ok!(claims::mock::Claims::attest( - RuntimeOrigin::signed(42), - StatementKind::Saft.to_text().to_vec() - )); - assert_eq!(Balances::free_balance(&42), 300); - }); -} - -#[test] -fn claiming_does_not_bypass_signing() { - new_test_ext().execute_with(|| { - assert_ok!(claims::mock::Claims::claim( - RuntimeOrigin::none(), - 42, - sig::(&alice(), &42u64.encode(), &[][..]) - )); - assert_noop!( - claims::mock::Claims::claim( - RuntimeOrigin::none(), - 42, - sig::(&dave(), &42u64.encode(), &[][..]) - ), - Error::::InvalidStatement, - ); - assert_noop!( - claims::mock::Claims::claim( - RuntimeOrigin::none(), - 42, - sig::(&eve(), &42u64.encode(), &[][..]) - ), - Error::::InvalidStatement, - ); - assert_ok!(claims::mock::Claims::claim( - RuntimeOrigin::none(), - 42, - sig::(&frank(), &42u64.encode(), &[][..]) - )); - }); -} - -#[test] -fn attest_claiming_works() { - new_test_ext().execute_with(|| { - assert_eq!(Balances::free_balance(42), 0); - let s = sig::(&dave(), &42u64.encode(), StatementKind::Saft.to_text()); - let r = claims::mock::Claims::claim_attest( - RuntimeOrigin::none(), - 42, - s.clone(), - StatementKind::Saft.to_text().to_vec(), - ); - assert_noop!(r, Error::::InvalidStatement); - - let r = claims::mock::Claims::claim_attest( - RuntimeOrigin::none(), - 42, - s, - StatementKind::Regular.to_text().to_vec(), - ); - assert_noop!(r, Error::::SignerHasNoClaim); - // ^^^ we use ecdsa_recover, so an invalid signature just results in a random signer id - // being recovered, which realistically will never have a claim. - - let s = sig::(&dave(), &42u64.encode(), StatementKind::Regular.to_text()); - assert_ok!(claims::mock::Claims::claim_attest( - RuntimeOrigin::none(), - 42, - s, - StatementKind::Regular.to_text().to_vec() - )); - assert_eq!(Balances::free_balance(&42), 200); - assert_eq!(claims::Total::::get(), total_claims() - 200); - - let s = sig::(&dave(), &42u64.encode(), StatementKind::Regular.to_text()); - let r = claims::mock::Claims::claim_attest( - RuntimeOrigin::none(), - 42, - s, - StatementKind::Regular.to_text().to_vec(), - ); - assert_noop!(r, Error::::SignerHasNoClaim); - }); -} - -#[test] -fn attesting_works() { - new_test_ext().execute_with(|| { - assert_eq!(Balances::free_balance(42), 0); - assert_noop!( - claims::mock::Claims::attest( - RuntimeOrigin::signed(69), - StatementKind::Saft.to_text().to_vec() - ), - Error::::SenderHasNoClaim - ); - assert_noop!( - claims::mock::Claims::attest( - RuntimeOrigin::signed(42), - StatementKind::Regular.to_text().to_vec() - ), - Error::::InvalidStatement - ); - assert_ok!(claims::mock::Claims::attest( - RuntimeOrigin::signed(42), - StatementKind::Saft.to_text().to_vec() - )); - assert_eq!(Balances::free_balance(&42), 300); - assert_eq!(claims::Total::::get(), total_claims() - 300); - }); -} - -#[test] -fn claim_cannot_clobber_preclaim() { - new_test_ext().execute_with(|| { - assert_eq!(Balances::free_balance(42), 0); - // Alice's claim is 100 - assert_ok!(claims::mock::Claims::claim( - RuntimeOrigin::none(), - 42, - sig::(&alice(), &42u64.encode(), &[][..]) - )); - assert_eq!(Balances::free_balance(&42), 100); - // Eve's claim is 300 through Account 42 - assert_ok!(claims::mock::Claims::attest( - RuntimeOrigin::signed(42), - StatementKind::Saft.to_text().to_vec() - )); - assert_eq!(Balances::free_balance(&42), 100 + 300); - assert_eq!(claims::Total::::get(), total_claims() - 400); - }); -} - -#[test] -fn valid_attest_transactions_are_free() { - new_test_ext().execute_with(|| { - let p = PrevalidateAttests::::new(); - let c = claims::mock::RuntimeCall::Claims(ClaimsCall::attest { - statement: StatementKind::Saft.to_text().to_vec(), - }); - let di = c.get_dispatch_info(); - assert_eq!(di.pays_fee, Pays::No); - let r = p.validate_only(Some(42).into(), &c, &di, 20, External, 0); - assert_eq!(r.unwrap().0, ValidTransaction::default()); - }); -} - -#[test] -fn invalid_attest_transactions_are_recognized() { - new_test_ext().execute_with(|| { - let p = PrevalidateAttests::::new(); - let c = claims::mock::RuntimeCall::Claims(ClaimsCall::attest { - statement: StatementKind::Regular.to_text().to_vec(), - }); - let di = c.get_dispatch_info(); - let r = p.validate_only(Some(42).into(), &c, &di, 20, External, 0); - assert!(r.is_err()); - let c = claims::mock::RuntimeCall::Claims(ClaimsCall::attest { - statement: StatementKind::Saft.to_text().to_vec(), - }); - let di = c.get_dispatch_info(); - let r = p.validate_only(Some(69).into(), &c, &di, 20, External, 0); - assert!(r.is_err()); - }); -} - -#[test] -fn cannot_bypass_attest_claiming() { - new_test_ext().execute_with(|| { - assert_eq!(Balances::free_balance(42), 0); - let s = sig::(&dave(), &42u64.encode(), &[]); - let r = claims::mock::Claims::claim(RuntimeOrigin::none(), 42, s.clone()); - assert_noop!(r, Error::::InvalidStatement); - }); -} - -#[test] -fn add_claim_works() { - new_test_ext().execute_with(|| { - assert_noop!( - claims::mock::Claims::mint_claim( - RuntimeOrigin::signed(42), - eth(&bob()), - 200, - None, - None - ), - sp_runtime::traits::BadOrigin, - ); - assert_eq!(Balances::free_balance(42), 0); - assert_noop!( - claims::mock::Claims::claim( - RuntimeOrigin::none(), - 69, - sig::(&bob(), &69u64.encode(), &[][..]) - ), - Error::::SignerHasNoClaim, - ); - assert_ok!(claims::mock::Claims::mint_claim( - RuntimeOrigin::root(), - eth(&bob()), - 200, - None, - None - )); - assert_eq!(claims::Total::::get(), total_claims() + 200); - assert_ok!(claims::mock::Claims::claim( - RuntimeOrigin::none(), - 69, - sig::(&bob(), &69u64.encode(), &[][..]) - )); - assert_eq!(Balances::free_balance(&69), 200); - assert_eq!(claims::mock::Vesting::vesting_balance(&69), None); - assert_eq!(claims::Total::::get(), total_claims()); - }); -} - -#[test] -fn add_claim_with_vesting_works() { - new_test_ext().execute_with(|| { - assert_noop!( - claims::mock::Claims::mint_claim( - RuntimeOrigin::signed(42), - eth(&bob()), - 200, - Some((50, 10, 1)), - None - ), - sp_runtime::traits::BadOrigin, - ); - assert_eq!(Balances::free_balance(42), 0); - assert_noop!( - claims::mock::Claims::claim( - RuntimeOrigin::none(), - 69, - sig::(&bob(), &69u64.encode(), &[][..]) - ), - Error::::SignerHasNoClaim, - ); - assert_ok!(claims::mock::Claims::mint_claim( - RuntimeOrigin::root(), - eth(&bob()), - 200, - Some((50, 10, 1)), - None - )); - assert_ok!(claims::mock::Claims::claim( - RuntimeOrigin::none(), - 69, - sig::(&bob(), &69u64.encode(), &[][..]) - )); - assert_eq!(Balances::free_balance(&69), 200); - assert_eq!(claims::mock::Vesting::vesting_balance(&69), Some(50)); - - // Make sure we can not transfer the vested balance. - assert_err!( - >::transfer(&69, &80, 180, ExistenceRequirement::AllowDeath), - TokenError::Frozen, - ); - }); -} - -#[test] -fn add_claim_with_statement_works() { - new_test_ext().execute_with(|| { - assert_noop!( - claims::mock::Claims::mint_claim( - RuntimeOrigin::signed(42), - eth(&bob()), - 200, - None, - Some(StatementKind::Regular) - ), - sp_runtime::traits::BadOrigin, - ); - assert_eq!(Balances::free_balance(42), 0); - let signature = sig::(&bob(), &69u64.encode(), StatementKind::Regular.to_text()); - assert_noop!( - claims::mock::Claims::claim_attest( - RuntimeOrigin::none(), - 69, - signature.clone(), - StatementKind::Regular.to_text().to_vec() - ), - Error::::SignerHasNoClaim - ); - assert_ok!(claims::mock::Claims::mint_claim( - RuntimeOrigin::root(), - eth(&bob()), - 200, - None, - Some(StatementKind::Regular) - )); - assert_noop!( - claims::mock::Claims::claim_attest( - RuntimeOrigin::none(), - 69, - signature.clone(), - vec![], - ), - Error::::SignerHasNoClaim - ); - assert_ok!(claims::mock::Claims::claim_attest( - RuntimeOrigin::none(), - 69, - signature.clone(), - StatementKind::Regular.to_text().to_vec() - )); - assert_eq!(Balances::free_balance(&69), 200); - }); -} - -#[test] -fn origin_signed_claiming_fail() { - new_test_ext().execute_with(|| { - assert_eq!(Balances::free_balance(42), 0); - assert_err!( - claims::mock::Claims::claim( - RuntimeOrigin::signed(42), - 42, - sig::(&alice(), &42u64.encode(), &[][..]) - ), - sp_runtime::traits::BadOrigin, - ); - }); -} - -#[test] -fn double_claiming_doesnt_work() { - new_test_ext().execute_with(|| { - assert_eq!(Balances::free_balance(42), 0); - assert_ok!(claims::mock::Claims::claim( - RuntimeOrigin::none(), - 42, - sig::(&alice(), &42u64.encode(), &[][..]) - )); - assert_noop!( - claims::mock::Claims::claim( - RuntimeOrigin::none(), - 42, - sig::(&alice(), &42u64.encode(), &[][..]) - ), - Error::::SignerHasNoClaim - ); - }); -} - -#[test] -fn claiming_while_vested_doesnt_work() { - new_test_ext().execute_with(|| { - CurrencyOf::::make_free_balance_be(&69, total_claims()); - assert_eq!(Balances::free_balance(69), total_claims()); - // A user is already vested - assert_ok!(::VestingSchedule::add_vesting_schedule( - &69, - total_claims(), - 100, - 10 - )); - assert_ok!(claims::mock::Claims::mint_claim( - RuntimeOrigin::root(), - eth(&bob()), - 200, - Some((50, 10, 1)), - None - )); - // New total - assert_eq!(claims::Total::::get(), total_claims() + 200); - - // They should not be able to claim - assert_noop!( - claims::mock::Claims::claim( - RuntimeOrigin::none(), - 69, - sig::(&bob(), &69u64.encode(), &[][..]) - ), - Error::::VestedBalanceExists, - ); - }); -} - -#[test] -fn non_sender_sig_doesnt_work() { - new_test_ext().execute_with(|| { - assert_eq!(Balances::free_balance(42), 0); - assert_noop!( - claims::mock::Claims::claim( - RuntimeOrigin::none(), - 42, - sig::(&alice(), &69u64.encode(), &[][..]) - ), - Error::::SignerHasNoClaim - ); - }); -} - -#[test] -fn non_claimant_doesnt_work() { - new_test_ext().execute_with(|| { - assert_eq!(Balances::free_balance(42), 0); - assert_noop!( - claims::mock::Claims::claim( - RuntimeOrigin::none(), - 42, - sig::(&bob(), &69u64.encode(), &[][..]) - ), - Error::::SignerHasNoClaim - ); - }); -} - -#[test] -fn real_eth_sig_works() { - new_test_ext().execute_with(|| { - // "Pay RUSTs to the TEST account:2a00000000000000" - let sig = hex!["444023e89b67e67c0562ed0305d252a5dd12b2af5ac51d6d3cb69a0b486bc4b3191401802dc29d26d586221f7256cd3329fe82174bdf659baea149a40e1c495d1c"]; - let sig = EcdsaSignature(sig); - let who = 42u64.using_encoded(to_ascii_hex); - let signer = claims::mock::Claims::eth_recover(&sig, &who, &[][..]).unwrap(); - assert_eq!(signer.0, hex!["6d31165d5d932d571f3b44695653b46dcc327e84"]); - }); -} - -#[test] -fn validate_unsigned_works() { - use sp_runtime::traits::ValidateUnsigned; - let source = sp_runtime::transaction_validity::TransactionSource::External; - - new_test_ext().execute_with(|| { - assert_eq!( - Pallet::::validate_unsigned( - source, - &ClaimsCall::claim { - dest: 1, - ethereum_signature: sig::(&alice(), &1u64.encode(), &[][..]) - } - ), - Ok(ValidTransaction { - priority: 100, - requires: vec![], - provides: vec![("claims", eth(&alice())).encode()], - longevity: TransactionLongevity::max_value(), - propagate: true, - }) - ); - assert_eq!( - Pallet::::validate_unsigned( - source, - &ClaimsCall::claim { dest: 0, ethereum_signature: EcdsaSignature([0; 65]) } - ), - InvalidTransaction::Custom(ValidityError::InvalidEthereumSignature.into()).into(), - ); - assert_eq!( - Pallet::::validate_unsigned( - source, - &ClaimsCall::claim { - dest: 1, - ethereum_signature: sig::(&bob(), &1u64.encode(), &[][..]) - } - ), - InvalidTransaction::Custom(ValidityError::SignerHasNoClaim.into()).into(), - ); - let s = sig::(&dave(), &1u64.encode(), StatementKind::Regular.to_text()); - let call = ClaimsCall::claim_attest { - dest: 1, - ethereum_signature: s, - statement: StatementKind::Regular.to_text().to_vec(), - }; - assert_eq!( - Pallet::::validate_unsigned(source, &call), - Ok(ValidTransaction { - priority: 100, - requires: vec![], - provides: vec![("claims", eth(&dave())).encode()], - longevity: TransactionLongevity::max_value(), - propagate: true, - }) - ); - assert_eq!( - Pallet::::validate_unsigned( - source, - &ClaimsCall::claim_attest { - dest: 1, - ethereum_signature: EcdsaSignature([0; 65]), - statement: StatementKind::Regular.to_text().to_vec() - } - ), - InvalidTransaction::Custom(ValidityError::InvalidEthereumSignature.into()).into(), - ); - - let s = sig::(&bob(), &1u64.encode(), StatementKind::Regular.to_text()); - let call = ClaimsCall::claim_attest { - dest: 1, - ethereum_signature: s, - statement: StatementKind::Regular.to_text().to_vec(), - }; - assert_eq!( - Pallet::::validate_unsigned(source, &call), - InvalidTransaction::Custom(ValidityError::SignerHasNoClaim.into()).into(), - ); - - let s = sig::(&dave(), &1u64.encode(), StatementKind::Saft.to_text()); - let call = ClaimsCall::claim_attest { - dest: 1, - ethereum_signature: s, - statement: StatementKind::Regular.to_text().to_vec(), - }; - assert_eq!( - Pallet::::validate_unsigned(source, &call), - InvalidTransaction::Custom(ValidityError::SignerHasNoClaim.into()).into(), - ); - - let s = sig::(&dave(), &1u64.encode(), StatementKind::Saft.to_text()); - let call = ClaimsCall::claim_attest { - dest: 1, - ethereum_signature: s, - statement: StatementKind::Saft.to_text().to_vec(), - }; - assert_eq!( - Pallet::::validate_unsigned(source, &call), - InvalidTransaction::Custom(ValidityError::InvalidStatement.into()).into(), - ); - }); -} diff --git a/polkadot/runtime/common/src/identity_migrator.rs b/polkadot/runtime/common/src/identity_migrator.rs index e3835b692526..126c886280e6 100644 --- a/polkadot/runtime/common/src/identity_migrator.rs +++ b/polkadot/runtime/common/src/identity_migrator.rs @@ -160,22 +160,12 @@ pub trait OnReapIdentity { /// - `bytes`: The byte size of `IdentityInfo`. /// - `subs`: The number of sub-accounts they had. fn on_reap_identity(who: &AccountId, bytes: u32, subs: u32) -> DispatchResult; - - /// Ensure that identity reaping will be succesful in benchmarking. - /// - /// Should setup the state in a way that the same call ot `[Self::on_reap_identity]` will be - /// successful. - #[cfg(feature = "runtime-benchmarks")] - fn ensure_successful_identity_reaping(who: &AccountId, bytes: u32, subs: u32); } impl OnReapIdentity for () { fn on_reap_identity(_who: &AccountId, _bytes: u32, _subs: u32) -> DispatchResult { Ok(()) } - - #[cfg(feature = "runtime-benchmarks")] - fn ensure_successful_identity_reaping(_: &AccountId, _: u32, _: u32) {} } #[cfg(feature = "runtime-benchmarks")] @@ -229,12 +219,6 @@ mod benchmarks { } Identity::::set_subs(target_origin.clone(), subs.clone())?; - T::ReapIdentityHandler::ensure_successful_identity_reaping( - &target, - info.encoded_size() as u32, - subs.len() as u32, - ); - // add registrars and provide judgements let registrar_origin = T::RegistrarOrigin::try_successful_origin() .expect("RegistrarOrigin has no successful origin required for the benchmark"); diff --git a/polkadot/runtime/common/src/paras_registrar/benchmarking.rs b/polkadot/runtime/common/src/paras_registrar/benchmarking.rs deleted file mode 100644 index 95df8a969576..000000000000 --- a/polkadot/runtime/common/src/paras_registrar/benchmarking.rs +++ /dev/null @@ -1,171 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -//! Benchmarking for paras_registrar pallet - -#[cfg(feature = "runtime-benchmarks")] -use super::{Pallet as Registrar, *}; -use crate::traits::Registrar as RegistrarT; -use frame_support::assert_ok; -use frame_system::RawOrigin; -use polkadot_primitives::{MAX_CODE_SIZE, MAX_HEAD_DATA_SIZE, MIN_CODE_SIZE}; -use polkadot_runtime_parachains::{paras, shared, Origin as ParaOrigin}; -use sp_runtime::traits::Bounded; - -use frame_benchmarking::{account, benchmarks, whitelisted_caller}; - -fn assert_last_event(generic_event: ::RuntimeEvent) { - let events = frame_system::Pallet::::events(); - let system_event: ::RuntimeEvent = generic_event.into(); - // compare to the last event record - let frame_system::EventRecord { event, .. } = &events[events.len() - 1]; - assert_eq!(event, &system_event); -} - -fn register_para(id: u32) -> ParaId { - let para = ParaId::from(id); - let genesis_head = Registrar::::worst_head_data(); - let validation_code = Registrar::::worst_validation_code(); - let caller: T::AccountId = whitelisted_caller(); - T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); - assert_ok!(Registrar::::reserve(RawOrigin::Signed(caller.clone()).into())); - assert_ok!(Registrar::::register( - RawOrigin::Signed(caller).into(), - para, - genesis_head, - validation_code.clone() - )); - assert_ok!(polkadot_runtime_parachains::paras::Pallet::::add_trusted_validation_code( - frame_system::Origin::::Root.into(), - validation_code, - )); - return para -} - -fn para_origin(id: u32) -> ParaOrigin { - ParaOrigin::Parachain(id.into()) -} - -// This function moves forward to the next scheduled session for parachain lifecycle upgrades. -fn next_scheduled_session() { - shared::Pallet::::set_session_index(shared::Pallet::::scheduled_session()); - paras::Pallet::::test_on_new_session(); -} - -benchmarks! { - where_clause { where ParaOrigin: Into<::RuntimeOrigin> } - - reserve { - let caller: T::AccountId = whitelisted_caller(); - T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); - }: _(RawOrigin::Signed(caller.clone())) - verify { - assert_last_event::(Event::::Reserved { para_id: LOWEST_PUBLIC_ID, who: caller }.into()); - assert!(Paras::::get(LOWEST_PUBLIC_ID).is_some()); - assert_eq!(paras::Pallet::::lifecycle(LOWEST_PUBLIC_ID), None); - } - - register { - let para = LOWEST_PUBLIC_ID; - let genesis_head = Registrar::::worst_head_data(); - let validation_code = Registrar::::worst_validation_code(); - let caller: T::AccountId = whitelisted_caller(); - T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); - assert_ok!(Registrar::::reserve(RawOrigin::Signed(caller.clone()).into())); - }: _(RawOrigin::Signed(caller.clone()), para, genesis_head, validation_code.clone()) - verify { - assert_last_event::(Event::::Registered{ para_id: para, manager: caller }.into()); - assert_eq!(paras::Pallet::::lifecycle(para), Some(ParaLifecycle::Onboarding)); - assert_ok!(polkadot_runtime_parachains::paras::Pallet::::add_trusted_validation_code( - frame_system::Origin::::Root.into(), - validation_code, - )); - next_scheduled_session::(); - assert_eq!(paras::Pallet::::lifecycle(para), Some(ParaLifecycle::Parathread)); - } - - force_register { - let manager: T::AccountId = account("manager", 0, 0); - let deposit = 0u32.into(); - let para = ParaId::from(69); - let genesis_head = Registrar::::worst_head_data(); - let validation_code = Registrar::::worst_validation_code(); - }: _(RawOrigin::Root, manager.clone(), deposit, para, genesis_head, validation_code.clone()) - verify { - assert_last_event::(Event::::Registered { para_id: para, manager }.into()); - assert_eq!(paras::Pallet::::lifecycle(para), Some(ParaLifecycle::Onboarding)); - assert_ok!(polkadot_runtime_parachains::paras::Pallet::::add_trusted_validation_code( - frame_system::Origin::::Root.into(), - validation_code, - )); - next_scheduled_session::(); - assert_eq!(paras::Pallet::::lifecycle(para), Some(ParaLifecycle::Parathread)); - } - - deregister { - let para = register_para::(LOWEST_PUBLIC_ID.into()); - next_scheduled_session::(); - let caller: T::AccountId = whitelisted_caller(); - }: _(RawOrigin::Signed(caller), para) - verify { - assert_last_event::(Event::::Deregistered { para_id: para }.into()); - } - - swap { - // On demand parachain - let parathread = register_para::(LOWEST_PUBLIC_ID.into()); - let parachain = register_para::((LOWEST_PUBLIC_ID + 1).into()); - - let parachain_origin = para_origin(parachain.into()); - - // Actually finish registration process - next_scheduled_session::(); - - // Upgrade the parachain - Registrar::::make_parachain(parachain)?; - next_scheduled_session::(); - - assert_eq!(paras::Pallet::::lifecycle(parachain), Some(ParaLifecycle::Parachain)); - assert_eq!(paras::Pallet::::lifecycle(parathread), Some(ParaLifecycle::Parathread)); - - let caller: T::AccountId = whitelisted_caller(); - Registrar::::swap(parachain_origin.into(), parachain, parathread)?; - }: _(RawOrigin::Signed(caller.clone()), parathread, parachain) - verify { - next_scheduled_session::(); - // Swapped! - assert_eq!(paras::Pallet::::lifecycle(parachain), Some(ParaLifecycle::Parathread)); - assert_eq!(paras::Pallet::::lifecycle(parathread), Some(ParaLifecycle::Parachain)); - } - - schedule_code_upgrade { - let b in MIN_CODE_SIZE .. MAX_CODE_SIZE; - let new_code = ValidationCode(vec![0; b as usize]); - let para_id = ParaId::from(1000); - }: _(RawOrigin::Root, para_id, new_code) - - set_current_head { - let b in 1 .. MAX_HEAD_DATA_SIZE; - let new_head = HeadData(vec![0; b as usize]); - let para_id = ParaId::from(1000); - }: _(RawOrigin::Root, para_id, new_head) - - impl_benchmark_test_suite!( - Registrar, - crate::integration_tests::new_test_ext(), - crate::integration_tests::Test, - ); -} diff --git a/polkadot/runtime/common/src/paras_registrar/mock.rs b/polkadot/runtime/common/src/paras_registrar/mock.rs deleted file mode 100644 index 1627fd70365d..000000000000 --- a/polkadot/runtime/common/src/paras_registrar/mock.rs +++ /dev/null @@ -1,254 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -//! Mocking utilities for testing in paras_registrar pallet. - -#[cfg(test)] -use super::*; -use crate::paras_registrar; -use alloc::collections::btree_map::BTreeMap; -use frame_support::{ - derive_impl, parameter_types, - traits::{OnFinalize, OnInitialize}, -}; -use frame_system::limits; -use polkadot_primitives::{Balance, BlockNumber, MAX_CODE_SIZE}; -use polkadot_runtime_parachains::{configuration, origin, shared}; -use sp_core::H256; -use sp_io::TestExternalities; -use sp_keyring::Sr25519Keyring; -use sp_runtime::{ - traits::{BlakeTwo256, IdentityLookup}, - transaction_validity::TransactionPriority, - BuildStorage, Perbill, -}; - -type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; -type Block = frame_system::mocking::MockBlockU32; - -frame_support::construct_runtime!( - pub enum Test - { - System: frame_system, - Balances: pallet_balances, - Configuration: configuration, - Parachains: paras, - ParasShared: shared, - Registrar: paras_registrar, - ParachainsOrigin: origin, - } -); - -impl frame_system::offchain::CreateTransactionBase for Test -where - RuntimeCall: From, -{ - type Extrinsic = UncheckedExtrinsic; - type RuntimeCall = RuntimeCall; -} - -impl frame_system::offchain::CreateInherent for Test -where - RuntimeCall: From, -{ - fn create_inherent(call: Self::RuntimeCall) -> Self::Extrinsic { - UncheckedExtrinsic::new_bare(call) - } -} - -const NORMAL_RATIO: Perbill = Perbill::from_percent(75); -parameter_types! { - pub BlockWeights: limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(Weight::from_parts(1024, u64::MAX)); - pub BlockLength: limits::BlockLength = - limits::BlockLength::max_with_normal_ratio(4 * 1024 * 1024, NORMAL_RATIO); -} - -#[derive_impl(frame_system::config_preludes::TestDefaultConfig)] -impl frame_system::Config for Test { - type BaseCallFilter = frame_support::traits::Everything; - type RuntimeOrigin = RuntimeOrigin; - type RuntimeCall = RuntimeCall; - type Nonce = u64; - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; - type Block = Block; - type RuntimeEvent = RuntimeEvent; - type DbWeight = (); - type BlockWeights = BlockWeights; - type BlockLength = BlockLength; - type Version = (); - type PalletInfo = PalletInfo; - type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - type SystemWeightInfo = (); - type SS58Prefix = (); - type OnSetCode = (); - type MaxConsumers = frame_support::traits::ConstU32<16>; -} - -parameter_types! { - pub const ExistentialDeposit: Balance = 1; -} - -#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] -impl pallet_balances::Config for Test { - type Balance = Balance; - type ExistentialDeposit = ExistentialDeposit; - type AccountStore = System; -} - -impl shared::Config for Test { - type DisabledValidators = (); -} - -impl origin::Config for Test {} - -parameter_types! { - pub const ParasUnsignedPriority: TransactionPriority = TransactionPriority::max_value(); -} - -impl paras::Config for Test { - type RuntimeEvent = RuntimeEvent; - type WeightInfo = paras::TestWeightInfo; - type UnsignedPriority = ParasUnsignedPriority; - type QueueFootprinter = (); - type NextSessionRotation = crate::mock::TestNextSessionRotation; - type OnNewHead = (); - type AssignCoretime = (); -} - -impl configuration::Config for Test { - type WeightInfo = configuration::TestWeightInfo; -} - -parameter_types! { - pub const ParaDeposit: Balance = 10; - pub const DataDepositPerByte: Balance = 1; - pub const MaxRetries: u32 = 3; -} - -impl Config for Test { - type RuntimeOrigin = RuntimeOrigin; - type RuntimeEvent = RuntimeEvent; - type Currency = Balances; - type OnSwap = MockSwap; - type ParaDeposit = ParaDeposit; - type DataDepositPerByte = DataDepositPerByte; - type WeightInfo = TestWeightInfo; -} - -pub fn new_test_ext() -> TestExternalities { - let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); - - configuration::GenesisConfig:: { - config: configuration::HostConfiguration { - max_code_size: MAX_CODE_SIZE, - max_head_data_size: 1 * 1024 * 1024, // 1 MB - ..Default::default() - }, - } - .assimilate_storage(&mut t) - .unwrap(); - - pallet_balances::GenesisConfig:: { - balances: vec![(1, 10_000_000), (2, 10_000_000), (3, 10_000_000)], - } - .assimilate_storage(&mut t) - .unwrap(); - - t.into() -} - -parameter_types! { - pub static SwapData: BTreeMap = BTreeMap::new(); -} - -pub struct MockSwap; -impl OnSwap for MockSwap { - fn on_swap(one: ParaId, other: ParaId) { - let mut swap_data = SwapData::get(); - let one_data = swap_data.remove(&one).unwrap_or_default(); - let other_data = swap_data.remove(&other).unwrap_or_default(); - swap_data.insert(one, other_data); - swap_data.insert(other, one_data); - SwapData::set(swap_data); - } -} - -pub const BLOCKS_PER_SESSION: u32 = 3; - -pub const VALIDATORS: &[Sr25519Keyring] = &[ - Sr25519Keyring::Alice, - Sr25519Keyring::Bob, - Sr25519Keyring::Charlie, - Sr25519Keyring::Dave, - Sr25519Keyring::Ferdie, -]; - -pub fn run_to_block(n: BlockNumber) { - // NOTE that this function only simulates modules of interest. Depending on new pallet may - // require adding it here. - assert!(System::block_number() < n); - while System::block_number() < n { - let b = System::block_number(); - - if System::block_number() > 1 { - System::on_finalize(System::block_number()); - } - // Session change every 3 blocks. - if (b + 1) % BLOCKS_PER_SESSION == 0 { - let session_index = shared::CurrentSessionIndex::::get() + 1; - let validators_pub_keys = VALIDATORS.iter().map(|v| v.public().into()).collect(); - - shared::Pallet::::set_session_index(session_index); - shared::Pallet::::set_active_validators_ascending(validators_pub_keys); - - Parachains::test_on_new_session(); - } - System::set_block_number(b + 1); - System::on_initialize(System::block_number()); - } -} - -pub fn run_to_session(n: BlockNumber) { - let block_number = n * BLOCKS_PER_SESSION; - run_to_block(block_number); -} - -pub fn test_genesis_head(size: usize) -> HeadData { - HeadData(vec![0u8; size]) -} - -pub fn test_validation_code(size: usize) -> ValidationCode { - let validation_code = vec![0u8; size as usize]; - ValidationCode(validation_code) -} - -pub fn para_origin(id: ParaId) -> RuntimeOrigin { - polkadot_runtime_parachains::Origin::Parachain(id).into() -} - -pub fn max_code_size() -> u32 { - configuration::ActiveConfig::::get().max_code_size -} - -pub fn max_head_size() -> u32 { - configuration::ActiveConfig::::get().max_head_data_size -} diff --git a/polkadot/runtime/common/src/paras_registrar/mod.rs b/polkadot/runtime/common/src/paras_registrar/mod.rs index aed0729c9d51..2ead621dedf0 100644 --- a/polkadot/runtime/common/src/paras_registrar/mod.rs +++ b/polkadot/runtime/common/src/paras_registrar/mod.rs @@ -561,16 +561,15 @@ impl Pallet { origin: ::RuntimeOrigin, id: ParaId, ) -> DispatchResult { - if let Ok(who) = ensure_signed(origin.clone()) { - let para_info = Paras::::get(id).ok_or(Error::::NotRegistered)?; - - if para_info.manager == who { + ensure_signed(origin.clone()) + .map_err(|e| e.into()) + .and_then(|who| -> DispatchResult { + let para_info = Paras::::get(id).ok_or(Error::::NotRegistered)?; ensure!(!para_info.is_locked(), Error::::ParaLocked); - return Ok(()) - } - } - - Self::ensure_root_or_para(origin, id) + ensure!(para_info.manager == who, Error::::NotOwner); + Ok(()) + }) + .or_else(|_| -> DispatchResult { Self::ensure_root_or_para(origin, id) }) } /// Ensure the origin is one of Root or the `para` itself. @@ -578,14 +577,14 @@ impl Pallet { origin: ::RuntimeOrigin, id: ParaId, ) -> DispatchResult { - if ensure_root(origin.clone()).is_ok() { - return Ok(()) + if let Ok(caller_id) = ensure_parachain(::RuntimeOrigin::from(origin.clone())) + { + // Check if matching para id... + ensure!(caller_id == id, Error::::NotOwner); + } else { + // Check if root... + ensure_root(origin.clone())?; } - - let caller_id = ensure_parachain(::RuntimeOrigin::from(origin))?; - // Check if matching para id... - ensure!(caller_id == id, Error::::NotOwner); - Ok(()) } @@ -714,10 +713,967 @@ impl OnNewHead for Pallet { } #[cfg(test)] -mod mock; +mod tests { + use super::*; + use crate::{ + mock::conclude_pvf_checking, paras_registrar, traits::Registrar as RegistrarTrait, + }; + use alloc::collections::btree_map::BTreeMap; + use frame_support::{ + assert_noop, assert_ok, derive_impl, parameter_types, + traits::{OnFinalize, OnInitialize}, + }; + use frame_system::limits; + use pallet_balances::Error as BalancesError; + use polkadot_primitives::{Balance, BlockNumber, SessionIndex, MAX_CODE_SIZE}; + use polkadot_runtime_parachains::{configuration, origin, shared}; + use sp_core::H256; + use sp_io::TestExternalities; + use sp_keyring::Sr25519Keyring; + use sp_runtime::{ + traits::{BadOrigin, BlakeTwo256, IdentityLookup}, + transaction_validity::TransactionPriority, + BuildStorage, Perbill, + }; + + type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; + type Block = frame_system::mocking::MockBlockU32; + + frame_support::construct_runtime!( + pub enum Test + { + System: frame_system, + Balances: pallet_balances, + Configuration: configuration, + Parachains: paras, + ParasShared: shared, + Registrar: paras_registrar, + ParachainsOrigin: origin, + } + ); + + impl frame_system::offchain::CreateTransactionBase for Test + where + RuntimeCall: From, + { + type Extrinsic = UncheckedExtrinsic; + type RuntimeCall = RuntimeCall; + } -#[cfg(test)] -mod tests; + impl frame_system::offchain::CreateInherent for Test + where + RuntimeCall: From, + { + fn create_inherent(call: Self::RuntimeCall) -> Self::Extrinsic { + UncheckedExtrinsic::new_bare(call) + } + } + + const NORMAL_RATIO: Perbill = Perbill::from_percent(75); + parameter_types! { + pub BlockWeights: limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(Weight::from_parts(1024, u64::MAX)); + pub BlockLength: limits::BlockLength = + limits::BlockLength::max_with_normal_ratio(4 * 1024 * 1024, NORMAL_RATIO); + } + + #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] + impl frame_system::Config for Test { + type BaseCallFilter = frame_support::traits::Everything; + type RuntimeOrigin = RuntimeOrigin; + type RuntimeCall = RuntimeCall; + type Nonce = u64; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type Block = Block; + type RuntimeEvent = RuntimeEvent; + type DbWeight = (); + type BlockWeights = BlockWeights; + type BlockLength = BlockLength; + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); + type MaxConsumers = frame_support::traits::ConstU32<16>; + } + + parameter_types! { + pub const ExistentialDeposit: Balance = 1; + } + + #[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] + impl pallet_balances::Config for Test { + type Balance = Balance; + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; + } + + impl shared::Config for Test { + type DisabledValidators = (); + } + + impl origin::Config for Test {} + + parameter_types! { + pub const ParasUnsignedPriority: TransactionPriority = TransactionPriority::max_value(); + } + + impl paras::Config for Test { + type RuntimeEvent = RuntimeEvent; + type WeightInfo = paras::TestWeightInfo; + type UnsignedPriority = ParasUnsignedPriority; + type QueueFootprinter = (); + type NextSessionRotation = crate::mock::TestNextSessionRotation; + type OnNewHead = (); + type AssignCoretime = (); + } + + impl configuration::Config for Test { + type WeightInfo = configuration::TestWeightInfo; + } + + parameter_types! { + pub const ParaDeposit: Balance = 10; + pub const DataDepositPerByte: Balance = 1; + pub const MaxRetries: u32 = 3; + } + + impl Config for Test { + type RuntimeOrigin = RuntimeOrigin; + type RuntimeEvent = RuntimeEvent; + type Currency = Balances; + type OnSwap = MockSwap; + type ParaDeposit = ParaDeposit; + type DataDepositPerByte = DataDepositPerByte; + type WeightInfo = TestWeightInfo; + } + + pub fn new_test_ext() -> TestExternalities { + let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); + + configuration::GenesisConfig:: { + config: configuration::HostConfiguration { + max_code_size: MAX_CODE_SIZE, + max_head_data_size: 1 * 1024 * 1024, // 1 MB + ..Default::default() + }, + } + .assimilate_storage(&mut t) + .unwrap(); + + pallet_balances::GenesisConfig:: { + balances: vec![(1, 10_000_000), (2, 10_000_000), (3, 10_000_000)], + } + .assimilate_storage(&mut t) + .unwrap(); + + t.into() + } + + parameter_types! { + pub static SwapData: BTreeMap = BTreeMap::new(); + } + + pub struct MockSwap; + impl OnSwap for MockSwap { + fn on_swap(one: ParaId, other: ParaId) { + let mut swap_data = SwapData::get(); + let one_data = swap_data.remove(&one).unwrap_or_default(); + let other_data = swap_data.remove(&other).unwrap_or_default(); + swap_data.insert(one, other_data); + swap_data.insert(other, one_data); + SwapData::set(swap_data); + } + } + + const BLOCKS_PER_SESSION: u32 = 3; + + const VALIDATORS: &[Sr25519Keyring] = &[ + Sr25519Keyring::Alice, + Sr25519Keyring::Bob, + Sr25519Keyring::Charlie, + Sr25519Keyring::Dave, + Sr25519Keyring::Ferdie, + ]; + + fn run_to_block(n: BlockNumber) { + // NOTE that this function only simulates modules of interest. Depending on new pallet may + // require adding it here. + assert!(System::block_number() < n); + while System::block_number() < n { + let b = System::block_number(); + + if System::block_number() > 1 { + System::on_finalize(System::block_number()); + } + // Session change every 3 blocks. + if (b + 1) % BLOCKS_PER_SESSION == 0 { + let session_index = shared::CurrentSessionIndex::::get() + 1; + let validators_pub_keys = VALIDATORS.iter().map(|v| v.public().into()).collect(); + + shared::Pallet::::set_session_index(session_index); + shared::Pallet::::set_active_validators_ascending(validators_pub_keys); + + Parachains::test_on_new_session(); + } + System::set_block_number(b + 1); + System::on_initialize(System::block_number()); + } + } + + fn run_to_session(n: BlockNumber) { + let block_number = n * BLOCKS_PER_SESSION; + run_to_block(block_number); + } + + fn test_genesis_head(size: usize) -> HeadData { + HeadData(vec![0u8; size]) + } + + fn test_validation_code(size: usize) -> ValidationCode { + let validation_code = vec![0u8; size as usize]; + ValidationCode(validation_code) + } + + fn para_origin(id: ParaId) -> RuntimeOrigin { + polkadot_runtime_parachains::Origin::Parachain(id).into() + } + + fn max_code_size() -> u32 { + configuration::ActiveConfig::::get().max_code_size + } + + fn max_head_size() -> u32 { + configuration::ActiveConfig::::get().max_head_data_size + } + + #[test] + fn basic_setup_works() { + new_test_ext().execute_with(|| { + assert_eq!(PendingSwap::::get(&ParaId::from(0u32)), None); + assert_eq!(Paras::::get(&ParaId::from(0u32)), None); + }); + } + + #[test] + fn end_to_end_scenario_works() { + new_test_ext().execute_with(|| { + let para_id = LOWEST_PUBLIC_ID; + + const START_SESSION_INDEX: SessionIndex = 1; + run_to_session(START_SESSION_INDEX); + + // first para is not yet registered + assert!(!Parachains::is_parathread(para_id)); + // We register the Para ID + let validation_code = test_validation_code(32); + assert_ok!(Registrar::reserve(RuntimeOrigin::signed(1))); + assert_ok!(Registrar::register( + RuntimeOrigin::signed(1), + para_id, + test_genesis_head(32), + validation_code.clone(), + )); + conclude_pvf_checking::(&validation_code, VALIDATORS, START_SESSION_INDEX); + + run_to_session(START_SESSION_INDEX + 2); + // It is now a parathread (on-demand parachain). + assert!(Parachains::is_parathread(para_id)); + assert!(!Parachains::is_parachain(para_id)); + // Some other external process will elevate on-demand to lease holding parachain + assert_ok!(Registrar::make_parachain(para_id)); + run_to_session(START_SESSION_INDEX + 4); + // It is now a lease holding parachain. + assert!(!Parachains::is_parathread(para_id)); + assert!(Parachains::is_parachain(para_id)); + // Turn it back into a parathread (on-demand parachain) + assert_ok!(Registrar::make_parathread(para_id)); + run_to_session(START_SESSION_INDEX + 6); + assert!(Parachains::is_parathread(para_id)); + assert!(!Parachains::is_parachain(para_id)); + // Deregister it + assert_ok!(Registrar::deregister(RuntimeOrigin::root(), para_id,)); + run_to_session(START_SESSION_INDEX + 8); + // It is nothing + assert!(!Parachains::is_parathread(para_id)); + assert!(!Parachains::is_parachain(para_id)); + }); + } + + #[test] + fn register_works() { + new_test_ext().execute_with(|| { + const START_SESSION_INDEX: SessionIndex = 1; + run_to_session(START_SESSION_INDEX); + + let para_id = LOWEST_PUBLIC_ID; + assert!(!Parachains::is_parathread(para_id)); + + let validation_code = test_validation_code(32); + assert_ok!(Registrar::reserve(RuntimeOrigin::signed(1))); + assert_eq!(Balances::reserved_balance(&1), ::ParaDeposit::get()); + assert_ok!(Registrar::register( + RuntimeOrigin::signed(1), + para_id, + test_genesis_head(32), + validation_code.clone(), + )); + conclude_pvf_checking::(&validation_code, VALIDATORS, START_SESSION_INDEX); + + run_to_session(START_SESSION_INDEX + 2); + assert!(Parachains::is_parathread(para_id)); + // Even though the registered validation code has a smaller size than the maximum the + // para manager's deposit is reserved as though they registered the maximum-sized code. + // Consequently, they can upgrade their code to the maximum size at any point without + // additional cost. + let validation_code_deposit = + max_code_size() as BalanceOf * ::DataDepositPerByte::get(); + let head_deposit = 32 * ::DataDepositPerByte::get(); + assert_eq!( + Balances::reserved_balance(&1), + ::ParaDeposit::get() + head_deposit + validation_code_deposit + ); + }); + } + + #[test] + fn schedule_code_upgrade_validates_code() { + new_test_ext().execute_with(|| { + const START_SESSION_INDEX: SessionIndex = 1; + run_to_session(START_SESSION_INDEX); + + let para_id = LOWEST_PUBLIC_ID; + assert!(!Parachains::is_parathread(para_id)); + + let validation_code = test_validation_code(32); + assert_ok!(Registrar::reserve(RuntimeOrigin::signed(1))); + assert_eq!(Balances::reserved_balance(&1), ::ParaDeposit::get()); + assert_ok!(Registrar::register( + RuntimeOrigin::signed(1), + para_id, + test_genesis_head(32), + validation_code.clone(), + )); + conclude_pvf_checking::(&validation_code, VALIDATORS, START_SESSION_INDEX); + + run_to_session(START_SESSION_INDEX + 2); + assert!(Parachains::is_parathread(para_id)); + + let new_code = test_validation_code(0); + assert_noop!( + Registrar::schedule_code_upgrade( + RuntimeOrigin::signed(1), + para_id, + new_code.clone(), + ), + paras::Error::::InvalidCode + ); + + let new_code = test_validation_code(max_code_size() as usize + 1); + assert_noop!( + Registrar::schedule_code_upgrade( + RuntimeOrigin::signed(1), + para_id, + new_code.clone(), + ), + paras::Error::::InvalidCode + ); + }); + } + + #[test] + fn register_handles_basic_errors() { + new_test_ext().execute_with(|| { + let para_id = LOWEST_PUBLIC_ID; + + assert_noop!( + Registrar::register( + RuntimeOrigin::signed(1), + para_id, + test_genesis_head(max_head_size() as usize), + test_validation_code(max_code_size() as usize), + ), + Error::::NotReserved + ); + + // Successfully register para + assert_ok!(Registrar::reserve(RuntimeOrigin::signed(1))); + + assert_noop!( + Registrar::register( + RuntimeOrigin::signed(2), + para_id, + test_genesis_head(max_head_size() as usize), + test_validation_code(max_code_size() as usize), + ), + Error::::NotOwner + ); + + assert_ok!(Registrar::register( + RuntimeOrigin::signed(1), + para_id, + test_genesis_head(max_head_size() as usize), + test_validation_code(max_code_size() as usize), + )); + // Can skip pre-check and deregister para which's still onboarding. + run_to_session(2); + + assert_ok!(Registrar::deregister(RuntimeOrigin::root(), para_id)); + + // Can't do it again + assert_noop!( + Registrar::register( + RuntimeOrigin::signed(1), + para_id, + test_genesis_head(max_head_size() as usize), + test_validation_code(max_code_size() as usize), + ), + Error::::NotReserved + ); + + // Head Size Check + assert_ok!(Registrar::reserve(RuntimeOrigin::signed(2))); + assert_noop!( + Registrar::register( + RuntimeOrigin::signed(2), + para_id + 1, + test_genesis_head((max_head_size() + 1) as usize), + test_validation_code(max_code_size() as usize), + ), + Error::::HeadDataTooLarge + ); + + // Code Size Check + assert_noop!( + Registrar::register( + RuntimeOrigin::signed(2), + para_id + 1, + test_genesis_head(max_head_size() as usize), + test_validation_code((max_code_size() + 1) as usize), + ), + Error::::CodeTooLarge + ); + + // Needs enough funds for deposit + assert_noop!( + Registrar::reserve(RuntimeOrigin::signed(1337)), + BalancesError::::InsufficientBalance + ); + }); + } + + #[test] + fn deregister_works() { + new_test_ext().execute_with(|| { + const START_SESSION_INDEX: SessionIndex = 1; + run_to_session(START_SESSION_INDEX); + + let para_id = LOWEST_PUBLIC_ID; + assert!(!Parachains::is_parathread(para_id)); + + let validation_code = test_validation_code(32); + assert_ok!(Registrar::reserve(RuntimeOrigin::signed(1))); + assert_ok!(Registrar::register( + RuntimeOrigin::signed(1), + para_id, + test_genesis_head(32), + validation_code.clone(), + )); + conclude_pvf_checking::(&validation_code, VALIDATORS, START_SESSION_INDEX); + + run_to_session(START_SESSION_INDEX + 2); + assert!(Parachains::is_parathread(para_id)); + assert_ok!(Registrar::deregister(RuntimeOrigin::root(), para_id,)); + run_to_session(START_SESSION_INDEX + 4); + assert!(paras::Pallet::::lifecycle(para_id).is_none()); + assert_eq!(Balances::reserved_balance(&1), 0); + }); + } + + #[test] + fn deregister_handles_basic_errors() { + new_test_ext().execute_with(|| { + const START_SESSION_INDEX: SessionIndex = 1; + run_to_session(START_SESSION_INDEX); + + let para_id = LOWEST_PUBLIC_ID; + assert!(!Parachains::is_parathread(para_id)); + + let validation_code = test_validation_code(32); + assert_ok!(Registrar::reserve(RuntimeOrigin::signed(1))); + assert_ok!(Registrar::register( + RuntimeOrigin::signed(1), + para_id, + test_genesis_head(32), + validation_code.clone(), + )); + conclude_pvf_checking::(&validation_code, VALIDATORS, START_SESSION_INDEX); + + run_to_session(START_SESSION_INDEX + 2); + assert!(Parachains::is_parathread(para_id)); + // Owner check + assert_noop!(Registrar::deregister(RuntimeOrigin::signed(2), para_id,), BadOrigin); + assert_ok!(Registrar::make_parachain(para_id)); + run_to_session(START_SESSION_INDEX + 4); + // Cant directly deregister parachain + assert_noop!( + Registrar::deregister(RuntimeOrigin::root(), para_id,), + Error::::NotParathread + ); + }); + } + + #[test] + fn swap_works() { + new_test_ext().execute_with(|| { + const START_SESSION_INDEX: SessionIndex = 1; + run_to_session(START_SESSION_INDEX); + + // Successfully register first two parachains + let para_1 = LOWEST_PUBLIC_ID; + let para_2 = LOWEST_PUBLIC_ID + 1; + + let validation_code = test_validation_code(max_code_size() as usize); + assert_ok!(Registrar::reserve(RuntimeOrigin::signed(1))); + assert_ok!(Registrar::register( + RuntimeOrigin::signed(1), + para_1, + test_genesis_head(max_head_size() as usize), + validation_code.clone(), + )); + assert_ok!(Registrar::reserve(RuntimeOrigin::signed(2))); + assert_ok!(Registrar::register( + RuntimeOrigin::signed(2), + para_2, + test_genesis_head(max_head_size() as usize), + validation_code.clone(), + )); + conclude_pvf_checking::(&validation_code, VALIDATORS, START_SESSION_INDEX); + + run_to_session(START_SESSION_INDEX + 2); + + // Upgrade para 1 into a parachain + assert_ok!(Registrar::make_parachain(para_1)); + + // Set some mock swap data. + let mut swap_data = SwapData::get(); + swap_data.insert(para_1, 69); + swap_data.insert(para_2, 1337); + SwapData::set(swap_data); + + run_to_session(START_SESSION_INDEX + 4); + + // Roles are as we expect + assert!(Parachains::is_parachain(para_1)); + assert!(!Parachains::is_parathread(para_1)); + assert!(!Parachains::is_parachain(para_2)); + assert!(Parachains::is_parathread(para_2)); + + // Both paras initiate a swap + // Swap between parachain and parathread + assert_ok!(Registrar::swap(para_origin(para_1), para_1, para_2,)); + assert_ok!(Registrar::swap(para_origin(para_2), para_2, para_1,)); + System::assert_last_event(RuntimeEvent::Registrar(paras_registrar::Event::Swapped { + para_id: para_2, + other_id: para_1, + })); + + run_to_session(START_SESSION_INDEX + 6); + + // Roles are swapped + assert!(!Parachains::is_parachain(para_1)); + assert!(Parachains::is_parathread(para_1)); + assert!(Parachains::is_parachain(para_2)); + assert!(!Parachains::is_parathread(para_2)); + + // Data is swapped + assert_eq!(SwapData::get().get(¶_1).unwrap(), &1337); + assert_eq!(SwapData::get().get(¶_2).unwrap(), &69); + + // Both paras initiate a swap + // Swap between parathread and parachain + assert_ok!(Registrar::swap(para_origin(para_1), para_1, para_2,)); + assert_ok!(Registrar::swap(para_origin(para_2), para_2, para_1,)); + System::assert_last_event(RuntimeEvent::Registrar(paras_registrar::Event::Swapped { + para_id: para_2, + other_id: para_1, + })); + + // Data is swapped + assert_eq!(SwapData::get().get(¶_1).unwrap(), &69); + assert_eq!(SwapData::get().get(¶_2).unwrap(), &1337); + + // Parachain to parachain swap + let para_3 = LOWEST_PUBLIC_ID + 2; + let validation_code = test_validation_code(max_code_size() as usize); + assert_ok!(Registrar::reserve(RuntimeOrigin::signed(3))); + assert_ok!(Registrar::register( + RuntimeOrigin::signed(3), + para_3, + test_genesis_head(max_head_size() as usize), + validation_code.clone(), + )); + conclude_pvf_checking::(&validation_code, VALIDATORS, START_SESSION_INDEX + 6); + + run_to_session(START_SESSION_INDEX + 8); + + // Upgrade para 3 into a parachain + assert_ok!(Registrar::make_parachain(para_3)); + + // Set some mock swap data. + let mut swap_data = SwapData::get(); + swap_data.insert(para_3, 777); + SwapData::set(swap_data); + + run_to_session(START_SESSION_INDEX + 10); + + // Both are parachains + assert!(Parachains::is_parachain(para_3)); + assert!(!Parachains::is_parathread(para_3)); + assert!(Parachains::is_parachain(para_1)); + assert!(!Parachains::is_parathread(para_1)); + + // Both paras initiate a swap + // Swap between parachain and parachain + assert_ok!(Registrar::swap(para_origin(para_1), para_1, para_3,)); + assert_ok!(Registrar::swap(para_origin(para_3), para_3, para_1,)); + System::assert_last_event(RuntimeEvent::Registrar(paras_registrar::Event::Swapped { + para_id: para_3, + other_id: para_1, + })); + + // Data is swapped + assert_eq!(SwapData::get().get(¶_3).unwrap(), &69); + assert_eq!(SwapData::get().get(¶_1).unwrap(), &777); + }); + } + + #[test] + fn para_lock_works() { + new_test_ext().execute_with(|| { + run_to_block(1); + + assert_ok!(Registrar::reserve(RuntimeOrigin::signed(1))); + let para_id = LOWEST_PUBLIC_ID; + assert_ok!(Registrar::register( + RuntimeOrigin::signed(1), + para_id, + vec![1; 3].into(), + test_validation_code(32) + )); + + assert_noop!(Registrar::add_lock(RuntimeOrigin::signed(2), para_id), BadOrigin); + + // Once they produces new block, we lock them in. + Registrar::on_new_head(para_id, &Default::default()); + + // Owner cannot pass origin check when checking lock + assert_noop!( + Registrar::ensure_root_para_or_owner(RuntimeOrigin::signed(1), para_id), + BadOrigin + ); + // Owner cannot remove lock. + assert_noop!(Registrar::remove_lock(RuntimeOrigin::signed(1), para_id), BadOrigin); + // Para can. + assert_ok!(Registrar::remove_lock(para_origin(para_id), para_id)); + // Owner can pass origin check again + assert_ok!(Registrar::ensure_root_para_or_owner(RuntimeOrigin::signed(1), para_id)); + + // Won't lock again after it is unlocked + Registrar::on_new_head(para_id, &Default::default()); + + assert_ok!(Registrar::ensure_root_para_or_owner(RuntimeOrigin::signed(1), para_id)); + }); + } + + #[test] + fn swap_handles_bad_states() { + new_test_ext().execute_with(|| { + const START_SESSION_INDEX: SessionIndex = 1; + run_to_session(START_SESSION_INDEX); + + let para_1 = LOWEST_PUBLIC_ID; + let para_2 = LOWEST_PUBLIC_ID + 1; + + // paras are not yet registered + assert!(!Parachains::is_parathread(para_1)); + assert!(!Parachains::is_parathread(para_2)); + + // Cannot even start a swap + assert_noop!( + Registrar::swap(RuntimeOrigin::root(), para_1, para_2), + Error::::NotRegistered + ); + + // We register Paras 1 and 2 + let validation_code = test_validation_code(32); + assert_ok!(Registrar::reserve(RuntimeOrigin::signed(1))); + assert_ok!(Registrar::reserve(RuntimeOrigin::signed(2))); + assert_ok!(Registrar::register( + RuntimeOrigin::signed(1), + para_1, + test_genesis_head(32), + validation_code.clone(), + )); + assert_ok!(Registrar::register( + RuntimeOrigin::signed(2), + para_2, + test_genesis_head(32), + validation_code.clone(), + )); + conclude_pvf_checking::(&validation_code, VALIDATORS, START_SESSION_INDEX); + + // Cannot swap + assert_ok!(Registrar::swap(RuntimeOrigin::root(), para_1, para_2)); + assert_noop!( + Registrar::swap(RuntimeOrigin::root(), para_2, para_1), + Error::::CannotSwap + ); + + run_to_session(START_SESSION_INDEX + 2); + + // They are now parathreads (on-demand parachains). + assert!(Parachains::is_parathread(para_1)); + assert!(Parachains::is_parathread(para_2)); + + // Cannot swap + assert_ok!(Registrar::swap(RuntimeOrigin::root(), para_1, para_2)); + assert_noop!( + Registrar::swap(RuntimeOrigin::root(), para_2, para_1), + Error::::CannotSwap + ); + + // Some other external process will elevate one on-demand + // parachain to a lease holding parachain + assert_ok!(Registrar::make_parachain(para_1)); + + // Cannot swap + assert_ok!(Registrar::swap(RuntimeOrigin::root(), para_1, para_2)); + assert_noop!( + Registrar::swap(RuntimeOrigin::root(), para_2, para_1), + Error::::CannotSwap + ); + + run_to_session(START_SESSION_INDEX + 3); + + // Cannot swap + assert_ok!(Registrar::swap(RuntimeOrigin::root(), para_1, para_2)); + assert_noop!( + Registrar::swap(RuntimeOrigin::root(), para_2, para_1), + Error::::CannotSwap + ); + + run_to_session(START_SESSION_INDEX + 4); + + // It is now a lease holding parachain. + assert!(Parachains::is_parachain(para_1)); + assert!(Parachains::is_parathread(para_2)); + + // Swap works here. + assert_ok!(Registrar::swap(RuntimeOrigin::root(), para_1, para_2)); + assert_ok!(Registrar::swap(RuntimeOrigin::root(), para_2, para_1)); + assert!(System::events().iter().any(|r| matches!( + r.event, + RuntimeEvent::Registrar(paras_registrar::Event::Swapped { .. }) + ))); + + run_to_session(START_SESSION_INDEX + 5); + + // Cannot swap + assert_ok!(Registrar::swap(RuntimeOrigin::root(), para_1, para_2)); + assert_noop!( + Registrar::swap(RuntimeOrigin::root(), para_2, para_1), + Error::::CannotSwap + ); + + run_to_session(START_SESSION_INDEX + 6); + + // Swap worked! + assert!(Parachains::is_parachain(para_2)); + assert!(Parachains::is_parathread(para_1)); + assert!(System::events().iter().any(|r| matches!( + r.event, + RuntimeEvent::Registrar(paras_registrar::Event::Swapped { .. }) + ))); + + // Something starts to downgrade a para + assert_ok!(Registrar::make_parathread(para_2)); + + run_to_session(START_SESSION_INDEX + 7); + + // Cannot swap + assert_ok!(Registrar::swap(RuntimeOrigin::root(), para_1, para_2)); + assert_noop!( + Registrar::swap(RuntimeOrigin::root(), para_2, para_1), + Error::::CannotSwap + ); + + run_to_session(START_SESSION_INDEX + 8); + + assert!(Parachains::is_parathread(para_1)); + assert!(Parachains::is_parathread(para_2)); + }); + } +} #[cfg(feature = "runtime-benchmarks")] -mod benchmarking; +mod benchmarking { + use super::{Pallet as Registrar, *}; + use crate::traits::Registrar as RegistrarT; + use frame_support::assert_ok; + use frame_system::RawOrigin; + use polkadot_primitives::{MAX_CODE_SIZE, MAX_HEAD_DATA_SIZE, MIN_CODE_SIZE}; + use polkadot_runtime_parachains::{paras, shared, Origin as ParaOrigin}; + use sp_runtime::traits::Bounded; + + use frame_benchmarking::{account, benchmarks, whitelisted_caller}; + + fn assert_last_event(generic_event: ::RuntimeEvent) { + let events = frame_system::Pallet::::events(); + let system_event: ::RuntimeEvent = generic_event.into(); + // compare to the last event record + let frame_system::EventRecord { event, .. } = &events[events.len() - 1]; + assert_eq!(event, &system_event); + } + + fn register_para(id: u32) -> ParaId { + let para = ParaId::from(id); + let genesis_head = Registrar::::worst_head_data(); + let validation_code = Registrar::::worst_validation_code(); + let caller: T::AccountId = whitelisted_caller(); + T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); + assert_ok!(Registrar::::reserve(RawOrigin::Signed(caller.clone()).into())); + assert_ok!(Registrar::::register( + RawOrigin::Signed(caller).into(), + para, + genesis_head, + validation_code.clone() + )); + assert_ok!(polkadot_runtime_parachains::paras::Pallet::::add_trusted_validation_code( + frame_system::Origin::::Root.into(), + validation_code, + )); + return para + } + + fn para_origin(id: u32) -> ParaOrigin { + ParaOrigin::Parachain(id.into()) + } + + // This function moves forward to the next scheduled session for parachain lifecycle upgrades. + fn next_scheduled_session() { + shared::Pallet::::set_session_index(shared::Pallet::::scheduled_session()); + paras::Pallet::::test_on_new_session(); + } + + benchmarks! { + where_clause { where ParaOrigin: Into<::RuntimeOrigin> } + + reserve { + let caller: T::AccountId = whitelisted_caller(); + T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); + }: _(RawOrigin::Signed(caller.clone())) + verify { + assert_last_event::(Event::::Reserved { para_id: LOWEST_PUBLIC_ID, who: caller }.into()); + assert!(Paras::::get(LOWEST_PUBLIC_ID).is_some()); + assert_eq!(paras::Pallet::::lifecycle(LOWEST_PUBLIC_ID), None); + } + + register { + let para = LOWEST_PUBLIC_ID; + let genesis_head = Registrar::::worst_head_data(); + let validation_code = Registrar::::worst_validation_code(); + let caller: T::AccountId = whitelisted_caller(); + T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); + assert_ok!(Registrar::::reserve(RawOrigin::Signed(caller.clone()).into())); + }: _(RawOrigin::Signed(caller.clone()), para, genesis_head, validation_code.clone()) + verify { + assert_last_event::(Event::::Registered{ para_id: para, manager: caller }.into()); + assert_eq!(paras::Pallet::::lifecycle(para), Some(ParaLifecycle::Onboarding)); + assert_ok!(polkadot_runtime_parachains::paras::Pallet::::add_trusted_validation_code( + frame_system::Origin::::Root.into(), + validation_code, + )); + next_scheduled_session::(); + assert_eq!(paras::Pallet::::lifecycle(para), Some(ParaLifecycle::Parathread)); + } + + force_register { + let manager: T::AccountId = account("manager", 0, 0); + let deposit = 0u32.into(); + let para = ParaId::from(69); + let genesis_head = Registrar::::worst_head_data(); + let validation_code = Registrar::::worst_validation_code(); + }: _(RawOrigin::Root, manager.clone(), deposit, para, genesis_head, validation_code.clone()) + verify { + assert_last_event::(Event::::Registered { para_id: para, manager }.into()); + assert_eq!(paras::Pallet::::lifecycle(para), Some(ParaLifecycle::Onboarding)); + assert_ok!(polkadot_runtime_parachains::paras::Pallet::::add_trusted_validation_code( + frame_system::Origin::::Root.into(), + validation_code, + )); + next_scheduled_session::(); + assert_eq!(paras::Pallet::::lifecycle(para), Some(ParaLifecycle::Parathread)); + } + + deregister { + let para = register_para::(LOWEST_PUBLIC_ID.into()); + next_scheduled_session::(); + let caller: T::AccountId = whitelisted_caller(); + }: _(RawOrigin::Signed(caller), para) + verify { + assert_last_event::(Event::::Deregistered { para_id: para }.into()); + } + + swap { + // On demand parachain + let parathread = register_para::(LOWEST_PUBLIC_ID.into()); + let parachain = register_para::((LOWEST_PUBLIC_ID + 1).into()); + + let parachain_origin = para_origin(parachain.into()); + + // Actually finish registration process + next_scheduled_session::(); + + // Upgrade the parachain + Registrar::::make_parachain(parachain)?; + next_scheduled_session::(); + + assert_eq!(paras::Pallet::::lifecycle(parachain), Some(ParaLifecycle::Parachain)); + assert_eq!(paras::Pallet::::lifecycle(parathread), Some(ParaLifecycle::Parathread)); + + let caller: T::AccountId = whitelisted_caller(); + Registrar::::swap(parachain_origin.into(), parachain, parathread)?; + }: _(RawOrigin::Signed(caller.clone()), parathread, parachain) + verify { + next_scheduled_session::(); + // Swapped! + assert_eq!(paras::Pallet::::lifecycle(parachain), Some(ParaLifecycle::Parathread)); + assert_eq!(paras::Pallet::::lifecycle(parathread), Some(ParaLifecycle::Parachain)); + } + + schedule_code_upgrade { + let b in MIN_CODE_SIZE .. MAX_CODE_SIZE; + let new_code = ValidationCode(vec![0; b as usize]); + let para_id = ParaId::from(1000); + }: _(RawOrigin::Root, para_id, new_code) + + set_current_head { + let b in 1 .. MAX_HEAD_DATA_SIZE; + let new_head = HeadData(vec![0; b as usize]); + let para_id = ParaId::from(1000); + }: _(RawOrigin::Root, para_id, new_head) + + impl_benchmark_test_suite!( + Registrar, + crate::integration_tests::new_test_ext(), + crate::integration_tests::Test, + ); + } +} diff --git a/polkadot/runtime/common/src/paras_registrar/tests.rs b/polkadot/runtime/common/src/paras_registrar/tests.rs deleted file mode 100644 index 66fef31c9afd..000000000000 --- a/polkadot/runtime/common/src/paras_registrar/tests.rs +++ /dev/null @@ -1,588 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -//! Tests for the paras_registrar pallet. - -#[cfg(test)] -use super::*; -use crate::{ - mock::conclude_pvf_checking, paras_registrar, paras_registrar::mock::*, - traits::Registrar as RegistrarTrait, -}; -use frame_support::{assert_noop, assert_ok}; -use pallet_balances::Error as BalancesError; -use polkadot_primitives::SessionIndex; -use sp_runtime::traits::BadOrigin; - -#[test] -fn end_to_end_scenario_works() { - new_test_ext().execute_with(|| { - let para_id = LOWEST_PUBLIC_ID; - - const START_SESSION_INDEX: SessionIndex = 1; - run_to_session(START_SESSION_INDEX); - - // first para is not yet registered - assert!(!Parachains::is_parathread(para_id)); - // We register the Para ID - let validation_code = test_validation_code(32); - assert_ok!(mock::Registrar::reserve(RuntimeOrigin::signed(1))); - assert_ok!(mock::Registrar::register( - RuntimeOrigin::signed(1), - para_id, - test_genesis_head(32), - validation_code.clone(), - )); - conclude_pvf_checking::(&validation_code, VALIDATORS, START_SESSION_INDEX); - - run_to_session(START_SESSION_INDEX + 2); - // It is now a parathread (on-demand parachain). - assert!(Parachains::is_parathread(para_id)); - assert!(!Parachains::is_parachain(para_id)); - // Some other external process will elevate on-demand to lease holding parachain - assert_ok!(mock::Registrar::make_parachain(para_id)); - run_to_session(START_SESSION_INDEX + 4); - // It is now a lease holding parachain. - assert!(!Parachains::is_parathread(para_id)); - assert!(Parachains::is_parachain(para_id)); - // Turn it back into a parathread (on-demand parachain) - assert_ok!(mock::Registrar::make_parathread(para_id)); - run_to_session(START_SESSION_INDEX + 6); - assert!(Parachains::is_parathread(para_id)); - assert!(!Parachains::is_parachain(para_id)); - // Deregister it - assert_ok!(mock::Registrar::deregister(RuntimeOrigin::root(), para_id,)); - run_to_session(START_SESSION_INDEX + 8); - // It is nothing - assert!(!Parachains::is_parathread(para_id)); - assert!(!Parachains::is_parachain(para_id)); - }); -} - -#[test] -fn register_works() { - new_test_ext().execute_with(|| { - const START_SESSION_INDEX: SessionIndex = 1; - run_to_session(START_SESSION_INDEX); - - let para_id = LOWEST_PUBLIC_ID; - assert!(!Parachains::is_parathread(para_id)); - - let validation_code = test_validation_code(32); - assert_ok!(mock::Registrar::reserve(RuntimeOrigin::signed(1))); - assert_eq!(Balances::reserved_balance(&1), ::ParaDeposit::get()); - assert_ok!(mock::Registrar::register( - RuntimeOrigin::signed(1), - para_id, - test_genesis_head(32), - validation_code.clone(), - )); - conclude_pvf_checking::(&validation_code, VALIDATORS, START_SESSION_INDEX); - - run_to_session(START_SESSION_INDEX + 2); - assert!(Parachains::is_parathread(para_id)); - // Even though the registered validation code has a smaller size than the maximum the - // para manager's deposit is reserved as though they registered the maximum-sized code. - // Consequently, they can upgrade their code to the maximum size at any point without - // additional cost. - let validation_code_deposit = - max_code_size() as BalanceOf * ::DataDepositPerByte::get(); - let head_deposit = 32 * ::DataDepositPerByte::get(); - assert_eq!( - Balances::reserved_balance(&1), - ::ParaDeposit::get() + head_deposit + validation_code_deposit - ); - }); -} - -#[test] -fn schedule_code_upgrade_validates_code() { - new_test_ext().execute_with(|| { - const START_SESSION_INDEX: SessionIndex = 1; - run_to_session(START_SESSION_INDEX); - - let para_id = LOWEST_PUBLIC_ID; - assert!(!Parachains::is_parathread(para_id)); - - let validation_code = test_validation_code(32); - assert_ok!(mock::Registrar::reserve(RuntimeOrigin::signed(1))); - assert_eq!(Balances::reserved_balance(&1), ::ParaDeposit::get()); - assert_ok!(mock::Registrar::register( - RuntimeOrigin::signed(1), - para_id, - test_genesis_head(32), - validation_code.clone(), - )); - conclude_pvf_checking::(&validation_code, VALIDATORS, START_SESSION_INDEX); - - run_to_session(START_SESSION_INDEX + 2); - assert!(Parachains::is_parathread(para_id)); - - let new_code = test_validation_code(0); - assert_noop!( - mock::Registrar::schedule_code_upgrade( - RuntimeOrigin::signed(1), - para_id, - new_code.clone(), - ), - paras::Error::::InvalidCode - ); - - let new_code = test_validation_code(max_code_size() as usize + 1); - assert_noop!( - mock::Registrar::schedule_code_upgrade( - RuntimeOrigin::signed(1), - para_id, - new_code.clone(), - ), - paras::Error::::InvalidCode - ); - }); -} - -#[test] -fn register_handles_basic_errors() { - new_test_ext().execute_with(|| { - let para_id = LOWEST_PUBLIC_ID; - - assert_noop!( - mock::Registrar::register( - RuntimeOrigin::signed(1), - para_id, - test_genesis_head(max_head_size() as usize), - test_validation_code(max_code_size() as usize), - ), - Error::::NotReserved - ); - - // Successfully register para - assert_ok!(mock::Registrar::reserve(RuntimeOrigin::signed(1))); - - assert_noop!( - mock::Registrar::register( - RuntimeOrigin::signed(2), - para_id, - test_genesis_head(max_head_size() as usize), - test_validation_code(max_code_size() as usize), - ), - Error::::NotOwner - ); - - assert_ok!(mock::Registrar::register( - RuntimeOrigin::signed(1), - para_id, - test_genesis_head(max_head_size() as usize), - test_validation_code(max_code_size() as usize), - )); - // Can skip pre-check and deregister para which's still onboarding. - run_to_session(2); - - assert_ok!(mock::Registrar::deregister(RuntimeOrigin::root(), para_id)); - - // Can't do it again - assert_noop!( - mock::Registrar::register( - RuntimeOrigin::signed(1), - para_id, - test_genesis_head(max_head_size() as usize), - test_validation_code(max_code_size() as usize), - ), - Error::::NotReserved - ); - - // Head Size Check - assert_ok!(mock::Registrar::reserve(RuntimeOrigin::signed(2))); - assert_noop!( - mock::Registrar::register( - RuntimeOrigin::signed(2), - para_id + 1, - test_genesis_head((max_head_size() + 1) as usize), - test_validation_code(max_code_size() as usize), - ), - Error::::HeadDataTooLarge - ); - - // Code Size Check - assert_noop!( - mock::Registrar::register( - RuntimeOrigin::signed(2), - para_id + 1, - test_genesis_head(max_head_size() as usize), - test_validation_code((max_code_size() + 1) as usize), - ), - Error::::CodeTooLarge - ); - - // Needs enough funds for deposit - assert_noop!( - mock::Registrar::reserve(RuntimeOrigin::signed(1337)), - BalancesError::::InsufficientBalance - ); - }); -} - -#[test] -fn deregister_works() { - new_test_ext().execute_with(|| { - const START_SESSION_INDEX: SessionIndex = 1; - run_to_session(START_SESSION_INDEX); - - let para_id = LOWEST_PUBLIC_ID; - assert!(!Parachains::is_parathread(para_id)); - - let validation_code = test_validation_code(32); - assert_ok!(mock::Registrar::reserve(RuntimeOrigin::signed(1))); - assert_ok!(mock::Registrar::register( - RuntimeOrigin::signed(1), - para_id, - test_genesis_head(32), - validation_code.clone(), - )); - conclude_pvf_checking::(&validation_code, VALIDATORS, START_SESSION_INDEX); - - run_to_session(START_SESSION_INDEX + 2); - assert!(Parachains::is_parathread(para_id)); - assert_ok!(mock::Registrar::deregister(RuntimeOrigin::root(), para_id,)); - run_to_session(START_SESSION_INDEX + 4); - assert!(paras::Pallet::::lifecycle(para_id).is_none()); - assert_eq!(Balances::reserved_balance(&1), 0); - }); -} - -#[test] -fn deregister_handles_basic_errors() { - new_test_ext().execute_with(|| { - const START_SESSION_INDEX: SessionIndex = 1; - run_to_session(START_SESSION_INDEX); - - let para_id = LOWEST_PUBLIC_ID; - assert!(!Parachains::is_parathread(para_id)); - - let validation_code = test_validation_code(32); - assert_ok!(mock::Registrar::reserve(RuntimeOrigin::signed(1))); - assert_ok!(mock::Registrar::register( - RuntimeOrigin::signed(1), - para_id, - test_genesis_head(32), - validation_code.clone(), - )); - conclude_pvf_checking::(&validation_code, VALIDATORS, START_SESSION_INDEX); - - run_to_session(START_SESSION_INDEX + 2); - assert!(Parachains::is_parathread(para_id)); - // Owner check - assert_noop!(mock::Registrar::deregister(RuntimeOrigin::signed(2), para_id,), BadOrigin); - assert_ok!(mock::Registrar::make_parachain(para_id)); - run_to_session(START_SESSION_INDEX + 4); - // Cant directly deregister parachain - assert_noop!( - mock::Registrar::deregister(RuntimeOrigin::root(), para_id,), - Error::::NotParathread - ); - }); -} - -#[test] -fn swap_works() { - new_test_ext().execute_with(|| { - const START_SESSION_INDEX: SessionIndex = 1; - run_to_session(START_SESSION_INDEX); - - // Successfully register first two parachains - let para_1 = LOWEST_PUBLIC_ID; - let para_2 = LOWEST_PUBLIC_ID + 1; - - let validation_code = test_validation_code(max_code_size() as usize); - assert_ok!(mock::Registrar::reserve(RuntimeOrigin::signed(1))); - assert_ok!(mock::Registrar::register( - RuntimeOrigin::signed(1), - para_1, - test_genesis_head(max_head_size() as usize), - validation_code.clone(), - )); - assert_ok!(mock::Registrar::reserve(RuntimeOrigin::signed(2))); - assert_ok!(mock::Registrar::register( - RuntimeOrigin::signed(2), - para_2, - test_genesis_head(max_head_size() as usize), - validation_code.clone(), - )); - conclude_pvf_checking::(&validation_code, VALIDATORS, START_SESSION_INDEX); - - run_to_session(START_SESSION_INDEX + 2); - - // Upgrade para 1 into a parachain - assert_ok!(mock::Registrar::make_parachain(para_1)); - - // Set some mock swap data. - let mut swap_data = SwapData::get(); - swap_data.insert(para_1, 69); - swap_data.insert(para_2, 1337); - SwapData::set(swap_data); - - run_to_session(START_SESSION_INDEX + 4); - - // Roles are as we expect - assert!(Parachains::is_parachain(para_1)); - assert!(!Parachains::is_parathread(para_1)); - assert!(!Parachains::is_parachain(para_2)); - assert!(Parachains::is_parathread(para_2)); - - // Both paras initiate a swap - // Swap between parachain and parathread - assert_ok!(mock::Registrar::swap(para_origin(para_1), para_1, para_2,)); - assert_ok!(mock::Registrar::swap(para_origin(para_2), para_2, para_1,)); - System::assert_last_event(RuntimeEvent::Registrar(paras_registrar::Event::Swapped { - para_id: para_2, - other_id: para_1, - })); - - run_to_session(START_SESSION_INDEX + 6); - - // Roles are swapped - assert!(!Parachains::is_parachain(para_1)); - assert!(Parachains::is_parathread(para_1)); - assert!(Parachains::is_parachain(para_2)); - assert!(!Parachains::is_parathread(para_2)); - - // Data is swapped - assert_eq!(SwapData::get().get(¶_1).unwrap(), &1337); - assert_eq!(SwapData::get().get(¶_2).unwrap(), &69); - - // Both paras initiate a swap - // Swap between parathread and parachain - assert_ok!(mock::Registrar::swap(para_origin(para_1), para_1, para_2,)); - assert_ok!(mock::Registrar::swap(para_origin(para_2), para_2, para_1,)); - System::assert_last_event(RuntimeEvent::Registrar(paras_registrar::Event::Swapped { - para_id: para_2, - other_id: para_1, - })); - - // Data is swapped - assert_eq!(SwapData::get().get(¶_1).unwrap(), &69); - assert_eq!(SwapData::get().get(¶_2).unwrap(), &1337); - - // Parachain to parachain swap - let para_3 = LOWEST_PUBLIC_ID + 2; - let validation_code = test_validation_code(max_code_size() as usize); - assert_ok!(mock::Registrar::reserve(RuntimeOrigin::signed(3))); - assert_ok!(mock::Registrar::register( - RuntimeOrigin::signed(3), - para_3, - test_genesis_head(max_head_size() as usize), - validation_code.clone(), - )); - conclude_pvf_checking::(&validation_code, VALIDATORS, START_SESSION_INDEX + 6); - - run_to_session(START_SESSION_INDEX + 8); - - // Upgrade para 3 into a parachain - assert_ok!(mock::Registrar::make_parachain(para_3)); - - // Set some mock swap data. - let mut swap_data = SwapData::get(); - swap_data.insert(para_3, 777); - SwapData::set(swap_data); - - run_to_session(START_SESSION_INDEX + 10); - - // Both are parachains - assert!(Parachains::is_parachain(para_3)); - assert!(!Parachains::is_parathread(para_3)); - assert!(Parachains::is_parachain(para_1)); - assert!(!Parachains::is_parathread(para_1)); - - // Both paras initiate a swap - // Swap between parachain and parachain - assert_ok!(mock::Registrar::swap(para_origin(para_1), para_1, para_3,)); - assert_ok!(mock::Registrar::swap(para_origin(para_3), para_3, para_1,)); - System::assert_last_event(RuntimeEvent::Registrar(paras_registrar::Event::Swapped { - para_id: para_3, - other_id: para_1, - })); - - // Data is swapped - assert_eq!(SwapData::get().get(¶_3).unwrap(), &69); - assert_eq!(SwapData::get().get(¶_1).unwrap(), &777); - }); -} - -#[test] -fn para_lock_works() { - new_test_ext().execute_with(|| { - run_to_block(1); - - assert_ok!(mock::Registrar::reserve(RuntimeOrigin::signed(1))); - let para_id = LOWEST_PUBLIC_ID; - assert_ok!(mock::Registrar::register( - RuntimeOrigin::signed(1), - para_id, - vec![1; 3].into(), - test_validation_code(32) - )); - - assert_noop!(mock::Registrar::add_lock(RuntimeOrigin::signed(2), para_id), BadOrigin); - - // Once they produces new block, we lock them in. - mock::Registrar::on_new_head(para_id, &Default::default()); - - // Owner cannot pass origin check when checking lock - assert_noop!( - mock::Registrar::ensure_root_para_or_owner(RuntimeOrigin::signed(1), para_id), - Error::::ParaLocked, - ); - // Owner cannot remove lock. - assert_noop!(mock::Registrar::remove_lock(RuntimeOrigin::signed(1), para_id), BadOrigin); - // Para can. - assert_ok!(mock::Registrar::remove_lock(para_origin(para_id), para_id)); - // Owner can pass origin check again - assert_ok!(mock::Registrar::ensure_root_para_or_owner(RuntimeOrigin::signed(1), para_id)); - - // Won't lock again after it is unlocked - mock::Registrar::on_new_head(para_id, &Default::default()); - - assert_ok!(mock::Registrar::ensure_root_para_or_owner(RuntimeOrigin::signed(1), para_id)); - }); -} - -#[test] -fn swap_handles_bad_states() { - new_test_ext().execute_with(|| { - const START_SESSION_INDEX: SessionIndex = 1; - run_to_session(START_SESSION_INDEX); - - let para_1 = LOWEST_PUBLIC_ID; - let para_2 = LOWEST_PUBLIC_ID + 1; - - // paras are not yet registered - assert!(!Parachains::is_parathread(para_1)); - assert!(!Parachains::is_parathread(para_2)); - - // Cannot even start a swap - assert_noop!( - mock::Registrar::swap(RuntimeOrigin::root(), para_1, para_2), - Error::::NotRegistered - ); - - // We register Paras 1 and 2 - let validation_code = test_validation_code(32); - assert_ok!(mock::Registrar::reserve(RuntimeOrigin::signed(1))); - assert_ok!(mock::Registrar::reserve(RuntimeOrigin::signed(2))); - assert_ok!(mock::Registrar::register( - RuntimeOrigin::signed(1), - para_1, - test_genesis_head(32), - validation_code.clone(), - )); - assert_ok!(mock::Registrar::register( - RuntimeOrigin::signed(2), - para_2, - test_genesis_head(32), - validation_code.clone(), - )); - conclude_pvf_checking::(&validation_code, VALIDATORS, START_SESSION_INDEX); - - // Cannot swap - assert_ok!(mock::Registrar::swap(RuntimeOrigin::root(), para_1, para_2)); - assert_noop!( - mock::Registrar::swap(RuntimeOrigin::root(), para_2, para_1), - Error::::CannotSwap - ); - - run_to_session(START_SESSION_INDEX + 2); - - // They are now parathreads (on-demand parachains). - assert!(Parachains::is_parathread(para_1)); - assert!(Parachains::is_parathread(para_2)); - - // Cannot swap - assert_ok!(mock::Registrar::swap(RuntimeOrigin::root(), para_1, para_2)); - assert_noop!( - mock::Registrar::swap(RuntimeOrigin::root(), para_2, para_1), - Error::::CannotSwap - ); - - // Some other external process will elevate one on-demand - // parachain to a lease holding parachain - assert_ok!(mock::Registrar::make_parachain(para_1)); - - // Cannot swap - assert_ok!(mock::Registrar::swap(RuntimeOrigin::root(), para_1, para_2)); - assert_noop!( - mock::Registrar::swap(RuntimeOrigin::root(), para_2, para_1), - Error::::CannotSwap - ); - - run_to_session(START_SESSION_INDEX + 3); - - // Cannot swap - assert_ok!(mock::Registrar::swap(RuntimeOrigin::root(), para_1, para_2)); - assert_noop!( - mock::Registrar::swap(RuntimeOrigin::root(), para_2, para_1), - Error::::CannotSwap - ); - - run_to_session(START_SESSION_INDEX + 4); - - // It is now a lease holding parachain. - assert!(Parachains::is_parachain(para_1)); - assert!(Parachains::is_parathread(para_2)); - - // Swap works here. - assert_ok!(mock::Registrar::swap(RuntimeOrigin::root(), para_1, para_2)); - assert_ok!(mock::Registrar::swap(RuntimeOrigin::root(), para_2, para_1)); - assert!(System::events().iter().any(|r| matches!( - r.event, - RuntimeEvent::Registrar(paras_registrar::Event::Swapped { .. }) - ))); - - run_to_session(START_SESSION_INDEX + 5); - - // Cannot swap - assert_ok!(mock::Registrar::swap(RuntimeOrigin::root(), para_1, para_2)); - assert_noop!( - mock::Registrar::swap(RuntimeOrigin::root(), para_2, para_1), - Error::::CannotSwap - ); - - run_to_session(START_SESSION_INDEX + 6); - - // Swap worked! - assert!(Parachains::is_parachain(para_2)); - assert!(Parachains::is_parathread(para_1)); - assert!(System::events().iter().any(|r| matches!( - r.event, - RuntimeEvent::Registrar(paras_registrar::Event::Swapped { .. }) - ))); - - // Something starts to downgrade a para - assert_ok!(mock::Registrar::make_parathread(para_2)); - - run_to_session(START_SESSION_INDEX + 7); - - // Cannot swap - assert_ok!(mock::Registrar::swap(RuntimeOrigin::root(), para_1, para_2)); - assert_noop!( - mock::Registrar::swap(RuntimeOrigin::root(), para_2, para_1), - Error::::CannotSwap - ); - - run_to_session(START_SESSION_INDEX + 8); - - assert!(Parachains::is_parathread(para_1)); - assert!(Parachains::is_parathread(para_2)); - }); -} diff --git a/polkadot/runtime/common/src/paras_sudo_wrapper.rs b/polkadot/runtime/common/src/paras_sudo_wrapper.rs index bd5984b3b63e..af93c70b4783 100644 --- a/polkadot/runtime/common/src/paras_sudo_wrapper.rs +++ b/polkadot/runtime/common/src/paras_sudo_wrapper.rs @@ -24,7 +24,7 @@ pub use pallet::*; use polkadot_primitives::Id as ParaId; use polkadot_runtime_parachains::{ configuration, dmp, hrmp, - paras::{self, AssignCoretime, ParaGenesisArgs, ParaKind}, + paras::{self, AssignCoretime, ParaGenesisArgs}, ParaLifecycle, }; @@ -48,8 +48,6 @@ pub mod pallet { /// A DMP message couldn't be sent because it exceeds the maximum size allowed for a /// downward message. ExceedsMaxMessageSize, - /// A DMP message couldn't be sent because the destination is unreachable. - Unroutable, /// Could not schedule para cleanup. CouldntCleanup, /// Not a parathread (on-demand parachain). @@ -82,15 +80,10 @@ pub mod pallet { genesis: ParaGenesisArgs, ) -> DispatchResult { ensure_root(origin)?; - - let assign_coretime = genesis.para_kind == ParaKind::Parachain; - polkadot_runtime_parachains::schedule_para_initialize::(id, genesis) .map_err(|_| Error::::ParaAlreadyExists)?; - if assign_coretime { - T::AssignCoretime::assign_coretime(id)?; - } + T::AssignCoretime::assign_coretime(id)?; Ok(()) } @@ -159,7 +152,6 @@ pub mod pallet { { dmp::QueueDownwardMessageError::ExceedsMaxMessageSize => Error::::ExceedsMaxMessageSize.into(), - dmp::QueueDownwardMessageError::Unroutable => Error::::Unroutable.into(), }) } diff --git a/polkadot/runtime/common/src/purchase.rs b/polkadot/runtime/common/src/purchase.rs new file mode 100644 index 000000000000..cec92540654c --- /dev/null +++ b/polkadot/runtime/common/src/purchase.rs @@ -0,0 +1,1178 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Pallet to process purchase of DOTs. + +use alloc::vec::Vec; +use codec::{Decode, Encode}; +use frame_support::{ + pallet_prelude::*, + traits::{Currency, EnsureOrigin, ExistenceRequirement, Get, VestingSchedule}, +}; +use frame_system::pallet_prelude::*; +pub use pallet::*; +use scale_info::TypeInfo; +use sp_core::sr25519; +use sp_runtime::{ + traits::{CheckedAdd, Saturating, Verify, Zero}, + AnySignature, DispatchError, DispatchResult, Permill, RuntimeDebug, +}; + +type BalanceOf = + <::Currency as Currency<::AccountId>>::Balance; + +/// The kind of statement an account needs to make for a claim to be valid. +#[derive(Encode, Decode, Clone, Copy, Eq, PartialEq, RuntimeDebug, TypeInfo)] +pub enum AccountValidity { + /// Account is not valid. + Invalid, + /// Account has initiated the account creation process. + Initiated, + /// Account is pending validation. + Pending, + /// Account is valid with a low contribution amount. + ValidLow, + /// Account is valid with a high contribution amount. + ValidHigh, + /// Account has completed the purchase process. + Completed, +} + +impl Default for AccountValidity { + fn default() -> Self { + AccountValidity::Invalid + } +} + +impl AccountValidity { + fn is_valid(&self) -> bool { + match self { + Self::Invalid => false, + Self::Initiated => false, + Self::Pending => false, + Self::ValidLow => true, + Self::ValidHigh => true, + Self::Completed => false, + } + } +} + +/// All information about an account regarding the purchase of DOTs. +#[derive(Encode, Decode, Default, Clone, Eq, PartialEq, RuntimeDebug, TypeInfo)] +pub struct AccountStatus { + /// The current validity status of the user. Will denote if the user has passed KYC, + /// how much they are able to purchase, and when their purchase process has completed. + validity: AccountValidity, + /// The amount of free DOTs they have purchased. + free_balance: Balance, + /// The amount of locked DOTs they have purchased. + locked_balance: Balance, + /// Their sr25519/ed25519 signature verifying they have signed our required statement. + signature: Vec, + /// The percentage of VAT the purchaser is responsible for. This is already factored into + /// account balance. + vat: Permill, +} + +#[frame_support::pallet] +pub mod pallet { + use super::*; + + #[pallet::pallet] + #[pallet::without_storage_info] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config { + /// The overarching event type. + type RuntimeEvent: From> + IsType<::RuntimeEvent>; + + /// Balances Pallet + type Currency: Currency; + + /// Vesting Pallet + type VestingSchedule: VestingSchedule< + Self::AccountId, + Moment = BlockNumberFor, + Currency = Self::Currency, + >; + + /// The origin allowed to set account status. + type ValidityOrigin: EnsureOrigin; + + /// The origin allowed to make configurations to the pallet. + type ConfigurationOrigin: EnsureOrigin; + + /// The maximum statement length for the statement users to sign when creating an account. + #[pallet::constant] + type MaxStatementLength: Get; + + /// The amount of purchased locked DOTs that we will unlock for basic actions on the chain. + #[pallet::constant] + type UnlockedProportion: Get; + + /// The maximum amount of locked DOTs that we will unlock. + #[pallet::constant] + type MaxUnlocked: Get>; + } + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + /// A new account was created. + AccountCreated { who: T::AccountId }, + /// Someone's account validity was updated. + ValidityUpdated { who: T::AccountId, validity: AccountValidity }, + /// Someone's purchase balance was updated. + BalanceUpdated { who: T::AccountId, free: BalanceOf, locked: BalanceOf }, + /// A payout was made to a purchaser. + PaymentComplete { who: T::AccountId, free: BalanceOf, locked: BalanceOf }, + /// A new payment account was set. + PaymentAccountSet { who: T::AccountId }, + /// A new statement was set. + StatementUpdated, + /// A new statement was set. `[block_number]` + UnlockBlockUpdated { block_number: BlockNumberFor }, + } + + #[pallet::error] + pub enum Error { + /// Account is not currently valid to use. + InvalidAccount, + /// Account used in the purchase already exists. + ExistingAccount, + /// Provided signature is invalid + InvalidSignature, + /// Account has already completed the purchase process. + AlreadyCompleted, + /// An overflow occurred when doing calculations. + Overflow, + /// The statement is too long to be stored on chain. + InvalidStatement, + /// The unlock block is in the past! + InvalidUnlockBlock, + /// Vesting schedule already exists for this account. + VestingScheduleExists, + } + + // A map of all participants in the DOT purchase process. + #[pallet::storage] + pub(super) type Accounts = + StorageMap<_, Blake2_128Concat, T::AccountId, AccountStatus>, ValueQuery>; + + // The account that will be used to payout participants of the DOT purchase process. + #[pallet::storage] + pub(super) type PaymentAccount = StorageValue<_, T::AccountId, OptionQuery>; + + // The statement purchasers will need to sign to participate. + #[pallet::storage] + pub(super) type Statement = StorageValue<_, Vec, ValueQuery>; + + // The block where all locked dots will unlock. + #[pallet::storage] + pub(super) type UnlockBlock = StorageValue<_, BlockNumberFor, ValueQuery>; + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet { + /// Create a new account. Proof of existence through a valid signed message. + /// + /// We check that the account does not exist at this stage. + /// + /// Origin must match the `ValidityOrigin`. + #[pallet::call_index(0)] + #[pallet::weight(Weight::from_parts(200_000_000, 0) + T::DbWeight::get().reads_writes(4, 1))] + pub fn create_account( + origin: OriginFor, + who: T::AccountId, + signature: Vec, + ) -> DispatchResult { + T::ValidityOrigin::ensure_origin(origin)?; + // Account is already being tracked by the pallet. + ensure!(!Accounts::::contains_key(&who), Error::::ExistingAccount); + // Account should not have a vesting schedule. + ensure!( + T::VestingSchedule::vesting_balance(&who).is_none(), + Error::::VestingScheduleExists + ); + + // Verify the signature provided is valid for the statement. + Self::verify_signature(&who, &signature)?; + + // Create a new pending account. + let status = AccountStatus { + validity: AccountValidity::Initiated, + signature, + free_balance: Zero::zero(), + locked_balance: Zero::zero(), + vat: Permill::zero(), + }; + Accounts::::insert(&who, status); + Self::deposit_event(Event::::AccountCreated { who }); + Ok(()) + } + + /// Update the validity status of an existing account. If set to completed, the account + /// will no longer be able to continue through the crowdfund process. + /// + /// We check that the account exists at this stage, but has not completed the process. + /// + /// Origin must match the `ValidityOrigin`. + #[pallet::call_index(1)] + #[pallet::weight(T::DbWeight::get().reads_writes(1, 1))] + pub fn update_validity_status( + origin: OriginFor, + who: T::AccountId, + validity: AccountValidity, + ) -> DispatchResult { + T::ValidityOrigin::ensure_origin(origin)?; + ensure!(Accounts::::contains_key(&who), Error::::InvalidAccount); + Accounts::::try_mutate( + &who, + |status: &mut AccountStatus>| -> DispatchResult { + ensure!( + status.validity != AccountValidity::Completed, + Error::::AlreadyCompleted + ); + status.validity = validity; + Ok(()) + }, + )?; + Self::deposit_event(Event::::ValidityUpdated { who, validity }); + Ok(()) + } + + /// Update the balance of a valid account. + /// + /// We check that the account is valid for a balance transfer at this point. + /// + /// Origin must match the `ValidityOrigin`. + #[pallet::call_index(2)] + #[pallet::weight(T::DbWeight::get().reads_writes(2, 1))] + pub fn update_balance( + origin: OriginFor, + who: T::AccountId, + free_balance: BalanceOf, + locked_balance: BalanceOf, + vat: Permill, + ) -> DispatchResult { + T::ValidityOrigin::ensure_origin(origin)?; + + Accounts::::try_mutate( + &who, + |status: &mut AccountStatus>| -> DispatchResult { + // Account has a valid status (not Invalid, Pending, or Completed)... + ensure!(status.validity.is_valid(), Error::::InvalidAccount); + + free_balance.checked_add(&locked_balance).ok_or(Error::::Overflow)?; + status.free_balance = free_balance; + status.locked_balance = locked_balance; + status.vat = vat; + Ok(()) + }, + )?; + Self::deposit_event(Event::::BalanceUpdated { + who, + free: free_balance, + locked: locked_balance, + }); + Ok(()) + } + + /// Pay the user and complete the purchase process. + /// + /// We reverify all assumptions about the state of an account, and complete the process. + /// + /// Origin must match the configured `PaymentAccount` (if it is not configured then this + /// will always fail with `BadOrigin`). + #[pallet::call_index(3)] + #[pallet::weight(T::DbWeight::get().reads_writes(4, 2))] + pub fn payout(origin: OriginFor, who: T::AccountId) -> DispatchResult { + // Payments must be made directly by the `PaymentAccount`. + let payment_account = ensure_signed(origin)?; + let test_against = PaymentAccount::::get().ok_or(DispatchError::BadOrigin)?; + ensure!(payment_account == test_against, DispatchError::BadOrigin); + + // Account should not have a vesting schedule. + ensure!( + T::VestingSchedule::vesting_balance(&who).is_none(), + Error::::VestingScheduleExists + ); + + Accounts::::try_mutate( + &who, + |status: &mut AccountStatus>| -> DispatchResult { + // Account has a valid status (not Invalid, Pending, or Completed)... + ensure!(status.validity.is_valid(), Error::::InvalidAccount); + + // Transfer funds from the payment account into the purchasing user. + let total_balance = status + .free_balance + .checked_add(&status.locked_balance) + .ok_or(Error::::Overflow)?; + T::Currency::transfer( + &payment_account, + &who, + total_balance, + ExistenceRequirement::AllowDeath, + )?; + + if !status.locked_balance.is_zero() { + let unlock_block = UnlockBlock::::get(); + // We allow some configurable portion of the purchased locked DOTs to be + // unlocked for basic usage. + let unlocked = (T::UnlockedProportion::get() * status.locked_balance) + .min(T::MaxUnlocked::get()); + let locked = status.locked_balance.saturating_sub(unlocked); + // We checked that this account has no existing vesting schedule. So this + // function should never fail, however if it does, not much we can do about + // it at this point. + let _ = T::VestingSchedule::add_vesting_schedule( + // Apply vesting schedule to this user + &who, + // For this much amount + locked, + // Unlocking the full amount after one block + locked, + // When everything unlocks + unlock_block, + ); + } + + // Setting the user account to `Completed` ends the purchase process for this + // user. + status.validity = AccountValidity::Completed; + Self::deposit_event(Event::::PaymentComplete { + who: who.clone(), + free: status.free_balance, + locked: status.locked_balance, + }); + Ok(()) + }, + )?; + Ok(()) + } + + /* Configuration Operations */ + + /// Set the account that will be used to payout users in the DOT purchase process. + /// + /// Origin must match the `ConfigurationOrigin` + #[pallet::call_index(4)] + #[pallet::weight(T::DbWeight::get().writes(1))] + pub fn set_payment_account(origin: OriginFor, who: T::AccountId) -> DispatchResult { + T::ConfigurationOrigin::ensure_origin(origin)?; + // Possibly this is worse than having the caller account be the payment account? + PaymentAccount::::put(who.clone()); + Self::deposit_event(Event::::PaymentAccountSet { who }); + Ok(()) + } + + /// Set the statement that must be signed for a user to participate on the DOT sale. + /// + /// Origin must match the `ConfigurationOrigin` + #[pallet::call_index(5)] + #[pallet::weight(T::DbWeight::get().writes(1))] + pub fn set_statement(origin: OriginFor, statement: Vec) -> DispatchResult { + T::ConfigurationOrigin::ensure_origin(origin)?; + ensure!( + (statement.len() as u32) < T::MaxStatementLength::get(), + Error::::InvalidStatement + ); + // Possibly this is worse than having the caller account be the payment account? + Statement::::set(statement); + Self::deposit_event(Event::::StatementUpdated); + Ok(()) + } + + /// Set the block where locked DOTs will become unlocked. + /// + /// Origin must match the `ConfigurationOrigin` + #[pallet::call_index(6)] + #[pallet::weight(T::DbWeight::get().writes(1))] + pub fn set_unlock_block( + origin: OriginFor, + unlock_block: BlockNumberFor, + ) -> DispatchResult { + T::ConfigurationOrigin::ensure_origin(origin)?; + ensure!( + unlock_block > frame_system::Pallet::::block_number(), + Error::::InvalidUnlockBlock + ); + // Possibly this is worse than having the caller account be the payment account? + UnlockBlock::::set(unlock_block); + Self::deposit_event(Event::::UnlockBlockUpdated { block_number: unlock_block }); + Ok(()) + } + } +} + +impl Pallet { + fn verify_signature(who: &T::AccountId, signature: &[u8]) -> Result<(), DispatchError> { + // sr25519 always expects a 64 byte signature. + let signature: AnySignature = sr25519::Signature::try_from(signature) + .map_err(|_| Error::::InvalidSignature)? + .into(); + + // In Polkadot, the AccountId is always the same as the 32 byte public key. + let account_bytes: [u8; 32] = account_to_bytes(who)?; + let public_key = sr25519::Public::from_raw(account_bytes); + + let message = Statement::::get(); + + // Check if everything is good or not. + match signature.verify(message.as_slice(), &public_key) { + true => Ok(()), + false => Err(Error::::InvalidSignature)?, + } + } +} + +// This function converts a 32 byte AccountId to its byte-array equivalent form. +fn account_to_bytes(account: &AccountId) -> Result<[u8; 32], DispatchError> +where + AccountId: Encode, +{ + let account_vec = account.encode(); + ensure!(account_vec.len() == 32, "AccountId must be 32 bytes."); + let mut bytes = [0u8; 32]; + bytes.copy_from_slice(&account_vec); + Ok(bytes) +} + +/// WARNING: Executing this function will clear all storage used by this pallet. +/// Be sure this is what you want... +pub fn remove_pallet() -> frame_support::weights::Weight +where + T: frame_system::Config, +{ + #[allow(deprecated)] + use frame_support::migration::remove_storage_prefix; + #[allow(deprecated)] + remove_storage_prefix(b"Purchase", b"Accounts", b""); + #[allow(deprecated)] + remove_storage_prefix(b"Purchase", b"PaymentAccount", b""); + #[allow(deprecated)] + remove_storage_prefix(b"Purchase", b"Statement", b""); + #[allow(deprecated)] + remove_storage_prefix(b"Purchase", b"UnlockBlock", b""); + + ::BlockWeights::get().max_block +} + +#[cfg(test)] +mod tests { + use super::*; + + use sp_core::{crypto::AccountId32, H256}; + use sp_keyring::{Ed25519Keyring, Sr25519Keyring}; + // The testing primitives are very useful for avoiding having to work with signatures + // or public keys. `u64` is used as the `AccountId` and no `Signature`s are required. + use crate::purchase; + use frame_support::{ + assert_noop, assert_ok, derive_impl, ord_parameter_types, parameter_types, + traits::{Currency, WithdrawReasons}, + }; + use sp_runtime::{ + traits::{BlakeTwo256, Dispatchable, Identity, IdentityLookup}, + ArithmeticError, BuildStorage, + DispatchError::BadOrigin, + }; + + type Block = frame_system::mocking::MockBlock; + + frame_support::construct_runtime!( + pub enum Test + { + System: frame_system, + Balances: pallet_balances, + Vesting: pallet_vesting, + Purchase: purchase, + } + ); + + type AccountId = AccountId32; + + #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] + impl frame_system::Config for Test { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); + type RuntimeOrigin = RuntimeOrigin; + type RuntimeCall = RuntimeCall; + type Nonce = u64; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = AccountId; + type Lookup = IdentityLookup; + type Block = Block; + type RuntimeEvent = RuntimeEvent; + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); + type MaxConsumers = frame_support::traits::ConstU32<16>; + } + + #[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] + impl pallet_balances::Config for Test { + type AccountStore = System; + } + + parameter_types! { + pub const MinVestedTransfer: u64 = 1; + pub UnvestedFundsAllowedWithdrawReasons: WithdrawReasons = + WithdrawReasons::except(WithdrawReasons::TRANSFER | WithdrawReasons::RESERVE); + } + + impl pallet_vesting::Config for Test { + type RuntimeEvent = RuntimeEvent; + type Currency = Balances; + type BlockNumberToBalance = Identity; + type MinVestedTransfer = MinVestedTransfer; + type WeightInfo = (); + type UnvestedFundsAllowedWithdrawReasons = UnvestedFundsAllowedWithdrawReasons; + type BlockNumberProvider = System; + const MAX_VESTING_SCHEDULES: u32 = 28; + } + + parameter_types! { + pub const MaxStatementLength: u32 = 1_000; + pub const UnlockedProportion: Permill = Permill::from_percent(10); + pub const MaxUnlocked: u64 = 10; + } + + ord_parameter_types! { + pub const ValidityOrigin: AccountId = AccountId32::from([0u8; 32]); + pub const PaymentOrigin: AccountId = AccountId32::from([1u8; 32]); + pub const ConfigurationOrigin: AccountId = AccountId32::from([2u8; 32]); + } + + impl Config for Test { + type RuntimeEvent = RuntimeEvent; + type Currency = Balances; + type VestingSchedule = Vesting; + type ValidityOrigin = frame_system::EnsureSignedBy; + type ConfigurationOrigin = frame_system::EnsureSignedBy; + type MaxStatementLength = MaxStatementLength; + type UnlockedProportion = UnlockedProportion; + type MaxUnlocked = MaxUnlocked; + } + + // This function basically just builds a genesis storage key/value store according to + // our desired mockup. It also executes our `setup` function which sets up this pallet for use. + pub fn new_test_ext() -> sp_io::TestExternalities { + let t = frame_system::GenesisConfig::::default().build_storage().unwrap(); + let mut ext = sp_io::TestExternalities::new(t); + ext.execute_with(|| setup()); + ext + } + + fn setup() { + let statement = b"Hello, World".to_vec(); + let unlock_block = 100; + Purchase::set_statement(RuntimeOrigin::signed(configuration_origin()), statement).unwrap(); + Purchase::set_unlock_block(RuntimeOrigin::signed(configuration_origin()), unlock_block) + .unwrap(); + Purchase::set_payment_account( + RuntimeOrigin::signed(configuration_origin()), + payment_account(), + ) + .unwrap(); + Balances::make_free_balance_be(&payment_account(), 100_000); + } + + fn alice() -> AccountId { + Sr25519Keyring::Alice.to_account_id() + } + + fn alice_ed25519() -> AccountId { + Ed25519Keyring::Alice.to_account_id() + } + + fn bob() -> AccountId { + Sr25519Keyring::Bob.to_account_id() + } + + fn alice_signature() -> [u8; 64] { + // echo -n "Hello, World" | subkey -s sign "bottom drive obey lake curtain smoke basket hold + // race lonely fit walk//Alice" + hex_literal::hex!("20e0faffdf4dfe939f2faa560f73b1d01cde8472e2b690b7b40606a374244c3a2e9eb9c8107c10b605138374003af8819bd4387d7c24a66ee9253c2e688ab881") + } + + fn bob_signature() -> [u8; 64] { + // echo -n "Hello, World" | subkey -s sign "bottom drive obey lake curtain smoke basket hold + // race lonely fit walk//Bob" + hex_literal::hex!("d6d460187ecf530f3ec2d6e3ac91b9d083c8fbd8f1112d92a82e4d84df552d18d338e6da8944eba6e84afaacf8a9850f54e7b53a84530d649be2e0119c7ce889") + } + + fn alice_signature_ed25519() -> [u8; 64] { + // echo -n "Hello, World" | subkey -e sign "bottom drive obey lake curtain smoke basket hold + // race lonely fit walk//Alice" + hex_literal::hex!("ee3f5a6cbfc12a8f00c18b811dc921b550ddf272354cda4b9a57b1d06213fcd8509f5af18425d39a279d13622f14806c3e978e2163981f2ec1c06e9628460b0e") + } + + fn validity_origin() -> AccountId { + ValidityOrigin::get() + } + + fn configuration_origin() -> AccountId { + ConfigurationOrigin::get() + } + + fn payment_account() -> AccountId { + [42u8; 32].into() + } + + #[test] + fn set_statement_works_and_handles_basic_errors() { + new_test_ext().execute_with(|| { + let statement = b"Test Set Statement".to_vec(); + // Invalid origin + assert_noop!( + Purchase::set_statement(RuntimeOrigin::signed(alice()), statement.clone()), + BadOrigin, + ); + // Too Long + let long_statement = [0u8; 10_000].to_vec(); + assert_noop!( + Purchase::set_statement( + RuntimeOrigin::signed(configuration_origin()), + long_statement + ), + Error::::InvalidStatement, + ); + // Just right... + assert_ok!(Purchase::set_statement( + RuntimeOrigin::signed(configuration_origin()), + statement.clone() + )); + assert_eq!(Statement::::get(), statement); + }); + } + + #[test] + fn set_unlock_block_works_and_handles_basic_errors() { + new_test_ext().execute_with(|| { + let unlock_block = 69; + // Invalid origin + assert_noop!( + Purchase::set_unlock_block(RuntimeOrigin::signed(alice()), unlock_block), + BadOrigin, + ); + // Block Number in Past + let bad_unlock_block = 50; + System::set_block_number(bad_unlock_block); + assert_noop!( + Purchase::set_unlock_block( + RuntimeOrigin::signed(configuration_origin()), + bad_unlock_block + ), + Error::::InvalidUnlockBlock, + ); + // Just right... + assert_ok!(Purchase::set_unlock_block( + RuntimeOrigin::signed(configuration_origin()), + unlock_block + )); + assert_eq!(UnlockBlock::::get(), unlock_block); + }); + } + + #[test] + fn set_payment_account_works_and_handles_basic_errors() { + new_test_ext().execute_with(|| { + let payment_account: AccountId = [69u8; 32].into(); + // Invalid Origin + assert_noop!( + Purchase::set_payment_account( + RuntimeOrigin::signed(alice()), + payment_account.clone() + ), + BadOrigin, + ); + // Just right... + assert_ok!(Purchase::set_payment_account( + RuntimeOrigin::signed(configuration_origin()), + payment_account.clone() + )); + assert_eq!(PaymentAccount::::get(), Some(payment_account)); + }); + } + + #[test] + fn signature_verification_works() { + new_test_ext().execute_with(|| { + assert_ok!(Purchase::verify_signature(&alice(), &alice_signature())); + assert_ok!(Purchase::verify_signature(&alice_ed25519(), &alice_signature_ed25519())); + assert_ok!(Purchase::verify_signature(&bob(), &bob_signature())); + + // Mixing and matching fails + assert_noop!( + Purchase::verify_signature(&alice(), &bob_signature()), + Error::::InvalidSignature + ); + assert_noop!( + Purchase::verify_signature(&bob(), &alice_signature()), + Error::::InvalidSignature + ); + }); + } + + #[test] + fn account_creation_works() { + new_test_ext().execute_with(|| { + assert!(!Accounts::::contains_key(alice())); + assert_ok!(Purchase::create_account( + RuntimeOrigin::signed(validity_origin()), + alice(), + alice_signature().to_vec(), + )); + assert_eq!( + Accounts::::get(alice()), + AccountStatus { + validity: AccountValidity::Initiated, + free_balance: Zero::zero(), + locked_balance: Zero::zero(), + signature: alice_signature().to_vec(), + vat: Permill::zero(), + } + ); + }); + } + + #[test] + fn account_creation_handles_basic_errors() { + new_test_ext().execute_with(|| { + // Wrong Origin + assert_noop!( + Purchase::create_account( + RuntimeOrigin::signed(alice()), + alice(), + alice_signature().to_vec() + ), + BadOrigin, + ); + + // Wrong Account/Signature + assert_noop!( + Purchase::create_account( + RuntimeOrigin::signed(validity_origin()), + alice(), + bob_signature().to_vec() + ), + Error::::InvalidSignature, + ); + + // Account with vesting + Balances::make_free_balance_be(&alice(), 100); + assert_ok!(::VestingSchedule::add_vesting_schedule( + &alice(), + 100, + 1, + 50 + )); + assert_noop!( + Purchase::create_account( + RuntimeOrigin::signed(validity_origin()), + alice(), + alice_signature().to_vec() + ), + Error::::VestingScheduleExists, + ); + + // Duplicate Purchasing Account + assert_ok!(Purchase::create_account( + RuntimeOrigin::signed(validity_origin()), + bob(), + bob_signature().to_vec() + )); + assert_noop!( + Purchase::create_account( + RuntimeOrigin::signed(validity_origin()), + bob(), + bob_signature().to_vec() + ), + Error::::ExistingAccount, + ); + }); + } + + #[test] + fn update_validity_status_works() { + new_test_ext().execute_with(|| { + // Alice account is created. + assert_ok!(Purchase::create_account( + RuntimeOrigin::signed(validity_origin()), + alice(), + alice_signature().to_vec(), + )); + // She submits KYC, and we update the status to `Pending`. + assert_ok!(Purchase::update_validity_status( + RuntimeOrigin::signed(validity_origin()), + alice(), + AccountValidity::Pending, + )); + // KYC comes back negative, so we mark the account invalid. + assert_ok!(Purchase::update_validity_status( + RuntimeOrigin::signed(validity_origin()), + alice(), + AccountValidity::Invalid, + )); + assert_eq!( + Accounts::::get(alice()), + AccountStatus { + validity: AccountValidity::Invalid, + free_balance: Zero::zero(), + locked_balance: Zero::zero(), + signature: alice_signature().to_vec(), + vat: Permill::zero(), + } + ); + // She fixes it, we mark her account valid. + assert_ok!(Purchase::update_validity_status( + RuntimeOrigin::signed(validity_origin()), + alice(), + AccountValidity::ValidLow, + )); + assert_eq!( + Accounts::::get(alice()), + AccountStatus { + validity: AccountValidity::ValidLow, + free_balance: Zero::zero(), + locked_balance: Zero::zero(), + signature: alice_signature().to_vec(), + vat: Permill::zero(), + } + ); + }); + } + + #[test] + fn update_validity_status_handles_basic_errors() { + new_test_ext().execute_with(|| { + // Wrong Origin + assert_noop!( + Purchase::update_validity_status( + RuntimeOrigin::signed(alice()), + alice(), + AccountValidity::Pending, + ), + BadOrigin + ); + // Inactive Account + assert_noop!( + Purchase::update_validity_status( + RuntimeOrigin::signed(validity_origin()), + alice(), + AccountValidity::Pending, + ), + Error::::InvalidAccount + ); + // Already Completed + assert_ok!(Purchase::create_account( + RuntimeOrigin::signed(validity_origin()), + alice(), + alice_signature().to_vec(), + )); + assert_ok!(Purchase::update_validity_status( + RuntimeOrigin::signed(validity_origin()), + alice(), + AccountValidity::Completed, + )); + assert_noop!( + Purchase::update_validity_status( + RuntimeOrigin::signed(validity_origin()), + alice(), + AccountValidity::Pending, + ), + Error::::AlreadyCompleted + ); + }); + } + + #[test] + fn update_balance_works() { + new_test_ext().execute_with(|| { + // Alice account is created + assert_ok!(Purchase::create_account( + RuntimeOrigin::signed(validity_origin()), + alice(), + alice_signature().to_vec() + )); + // And approved for basic contribution + assert_ok!(Purchase::update_validity_status( + RuntimeOrigin::signed(validity_origin()), + alice(), + AccountValidity::ValidLow, + )); + // We set a balance on the user based on the payment they made. 50 locked, 50 free. + assert_ok!(Purchase::update_balance( + RuntimeOrigin::signed(validity_origin()), + alice(), + 50, + 50, + Permill::from_rational(77u32, 1000u32), + )); + assert_eq!( + Accounts::::get(alice()), + AccountStatus { + validity: AccountValidity::ValidLow, + free_balance: 50, + locked_balance: 50, + signature: alice_signature().to_vec(), + vat: Permill::from_parts(77000), + } + ); + // We can update the balance based on new information. + assert_ok!(Purchase::update_balance( + RuntimeOrigin::signed(validity_origin()), + alice(), + 25, + 50, + Permill::zero(), + )); + assert_eq!( + Accounts::::get(alice()), + AccountStatus { + validity: AccountValidity::ValidLow, + free_balance: 25, + locked_balance: 50, + signature: alice_signature().to_vec(), + vat: Permill::zero(), + } + ); + }); + } + + #[test] + fn update_balance_handles_basic_errors() { + new_test_ext().execute_with(|| { + // Wrong Origin + assert_noop!( + Purchase::update_balance( + RuntimeOrigin::signed(alice()), + alice(), + 50, + 50, + Permill::zero(), + ), + BadOrigin + ); + // Inactive Account + assert_noop!( + Purchase::update_balance( + RuntimeOrigin::signed(validity_origin()), + alice(), + 50, + 50, + Permill::zero(), + ), + Error::::InvalidAccount + ); + // Overflow + assert_noop!( + Purchase::update_balance( + RuntimeOrigin::signed(validity_origin()), + alice(), + u64::MAX, + u64::MAX, + Permill::zero(), + ), + Error::::InvalidAccount + ); + }); + } + + #[test] + fn payout_works() { + new_test_ext().execute_with(|| { + // Alice and Bob accounts are created + assert_ok!(Purchase::create_account( + RuntimeOrigin::signed(validity_origin()), + alice(), + alice_signature().to_vec() + )); + assert_ok!(Purchase::create_account( + RuntimeOrigin::signed(validity_origin()), + bob(), + bob_signature().to_vec() + )); + // Alice is approved for basic contribution + assert_ok!(Purchase::update_validity_status( + RuntimeOrigin::signed(validity_origin()), + alice(), + AccountValidity::ValidLow, + )); + // Bob is approved for high contribution + assert_ok!(Purchase::update_validity_status( + RuntimeOrigin::signed(validity_origin()), + bob(), + AccountValidity::ValidHigh, + )); + // We set a balance on the users based on the payment they made. 50 locked, 50 free. + assert_ok!(Purchase::update_balance( + RuntimeOrigin::signed(validity_origin()), + alice(), + 50, + 50, + Permill::zero(), + )); + assert_ok!(Purchase::update_balance( + RuntimeOrigin::signed(validity_origin()), + bob(), + 100, + 150, + Permill::zero(), + )); + // Now we call payout for Alice and Bob. + assert_ok!(Purchase::payout(RuntimeOrigin::signed(payment_account()), alice(),)); + assert_ok!(Purchase::payout(RuntimeOrigin::signed(payment_account()), bob(),)); + // Payment is made. + assert_eq!(::Currency::free_balance(&payment_account()), 99_650); + assert_eq!(::Currency::free_balance(&alice()), 100); + // 10% of the 50 units is unlocked automatically for Alice + assert_eq!(::VestingSchedule::vesting_balance(&alice()), Some(45)); + assert_eq!(::Currency::free_balance(&bob()), 250); + // A max of 10 units is unlocked automatically for Bob + assert_eq!(::VestingSchedule::vesting_balance(&bob()), Some(140)); + // Status is completed. + assert_eq!( + Accounts::::get(alice()), + AccountStatus { + validity: AccountValidity::Completed, + free_balance: 50, + locked_balance: 50, + signature: alice_signature().to_vec(), + vat: Permill::zero(), + } + ); + assert_eq!( + Accounts::::get(bob()), + AccountStatus { + validity: AccountValidity::Completed, + free_balance: 100, + locked_balance: 150, + signature: bob_signature().to_vec(), + vat: Permill::zero(), + } + ); + // Vesting lock is removed in whole on block 101 (100 blocks after block 1) + System::set_block_number(100); + let vest_call = RuntimeCall::Vesting(pallet_vesting::Call::::vest {}); + assert_ok!(vest_call.clone().dispatch(RuntimeOrigin::signed(alice()))); + assert_ok!(vest_call.clone().dispatch(RuntimeOrigin::signed(bob()))); + assert_eq!(::VestingSchedule::vesting_balance(&alice()), Some(45)); + assert_eq!(::VestingSchedule::vesting_balance(&bob()), Some(140)); + System::set_block_number(101); + assert_ok!(vest_call.clone().dispatch(RuntimeOrigin::signed(alice()))); + assert_ok!(vest_call.clone().dispatch(RuntimeOrigin::signed(bob()))); + assert_eq!(::VestingSchedule::vesting_balance(&alice()), None); + assert_eq!(::VestingSchedule::vesting_balance(&bob()), None); + }); + } + + #[test] + fn payout_handles_basic_errors() { + new_test_ext().execute_with(|| { + // Wrong Origin + assert_noop!(Purchase::payout(RuntimeOrigin::signed(alice()), alice(),), BadOrigin); + // Account with Existing Vesting Schedule + Balances::make_free_balance_be(&bob(), 100); + assert_ok!( + ::VestingSchedule::add_vesting_schedule(&bob(), 100, 1, 50,) + ); + assert_noop!( + Purchase::payout(RuntimeOrigin::signed(payment_account()), bob(),), + Error::::VestingScheduleExists + ); + // Invalid Account (never created) + assert_noop!( + Purchase::payout(RuntimeOrigin::signed(payment_account()), alice(),), + Error::::InvalidAccount + ); + // Invalid Account (created, but not valid) + assert_ok!(Purchase::create_account( + RuntimeOrigin::signed(validity_origin()), + alice(), + alice_signature().to_vec() + )); + assert_noop!( + Purchase::payout(RuntimeOrigin::signed(payment_account()), alice(),), + Error::::InvalidAccount + ); + // Not enough funds in payment account + assert_ok!(Purchase::update_validity_status( + RuntimeOrigin::signed(validity_origin()), + alice(), + AccountValidity::ValidHigh, + )); + assert_ok!(Purchase::update_balance( + RuntimeOrigin::signed(validity_origin()), + alice(), + 100_000, + 100_000, + Permill::zero(), + )); + assert_noop!( + Purchase::payout(RuntimeOrigin::signed(payment_account()), alice()), + ArithmeticError::Underflow + ); + }); + } + + #[test] + fn remove_pallet_works() { + new_test_ext().execute_with(|| { + let account_status = AccountStatus { + validity: AccountValidity::Completed, + free_balance: 1234, + locked_balance: 4321, + signature: b"my signature".to_vec(), + vat: Permill::from_percent(50), + }; + + // Add some storage. + Accounts::::insert(alice(), account_status.clone()); + Accounts::::insert(bob(), account_status); + PaymentAccount::::put(alice()); + Statement::::put(b"hello, world!".to_vec()); + UnlockBlock::::put(4); + + // Verify storage exists. + assert_eq!(Accounts::::iter().count(), 2); + assert!(PaymentAccount::::exists()); + assert!(Statement::::exists()); + assert!(UnlockBlock::::exists()); + + // Remove storage. + remove_pallet::(); + + // Verify storage is gone. + assert_eq!(Accounts::::iter().count(), 0); + assert!(!PaymentAccount::::exists()); + assert!(!Statement::::exists()); + assert!(!UnlockBlock::::exists()); + }); + } +} diff --git a/polkadot/runtime/common/src/purchase/mock.rs b/polkadot/runtime/common/src/purchase/mock.rs deleted file mode 100644 index ec8599f3b792..000000000000 --- a/polkadot/runtime/common/src/purchase/mock.rs +++ /dev/null @@ -1,181 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -//! Mocking utilities for testing in purchase pallet. - -#[cfg(test)] -use super::*; - -use sp_core::{crypto::AccountId32, H256}; -use sp_keyring::{Ed25519Keyring, Sr25519Keyring}; -// The testing primitives are very useful for avoiding having to work with signatures -// or public keys. `u64` is used as the `AccountId` and no `Signature`s are required. -use crate::purchase; -use frame_support::{ - derive_impl, ord_parameter_types, parameter_types, - traits::{Currency, WithdrawReasons}, -}; -use sp_runtime::{ - traits::{BlakeTwo256, Identity, IdentityLookup}, - BuildStorage, -}; - -type Block = frame_system::mocking::MockBlock; - -frame_support::construct_runtime!( - pub enum Test - { - System: frame_system, - Balances: pallet_balances, - Vesting: pallet_vesting, - Purchase: purchase, - } -); - -type AccountId = AccountId32; - -#[derive_impl(frame_system::config_preludes::TestDefaultConfig)] -impl frame_system::Config for Test { - type BaseCallFilter = frame_support::traits::Everything; - type BlockWeights = (); - type BlockLength = (); - type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; - type RuntimeCall = RuntimeCall; - type Nonce = u64; - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = AccountId; - type Lookup = IdentityLookup; - type Block = Block; - type RuntimeEvent = RuntimeEvent; - type Version = (); - type PalletInfo = PalletInfo; - type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - type SystemWeightInfo = (); - type SS58Prefix = (); - type OnSetCode = (); - type MaxConsumers = frame_support::traits::ConstU32<16>; -} - -#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] -impl pallet_balances::Config for Test { - type AccountStore = System; -} - -parameter_types! { - pub const MinVestedTransfer: u64 = 1; - pub UnvestedFundsAllowedWithdrawReasons: WithdrawReasons = - WithdrawReasons::except(WithdrawReasons::TRANSFER | WithdrawReasons::RESERVE); -} - -impl pallet_vesting::Config for Test { - type RuntimeEvent = RuntimeEvent; - type Currency = Balances; - type BlockNumberToBalance = Identity; - type MinVestedTransfer = MinVestedTransfer; - type WeightInfo = (); - type UnvestedFundsAllowedWithdrawReasons = UnvestedFundsAllowedWithdrawReasons; - type BlockNumberProvider = System; - const MAX_VESTING_SCHEDULES: u32 = 28; -} - -parameter_types! { - pub const MaxStatementLength: u32 = 1_000; - pub const UnlockedProportion: Permill = Permill::from_percent(10); - pub const MaxUnlocked: u64 = 10; -} - -ord_parameter_types! { - pub const ValidityOrigin: AccountId = AccountId32::from([0u8; 32]); - pub const PaymentOrigin: AccountId = AccountId32::from([1u8; 32]); - pub const ConfigurationOrigin: AccountId = AccountId32::from([2u8; 32]); -} - -impl Config for Test { - type RuntimeEvent = RuntimeEvent; - type Currency = Balances; - type VestingSchedule = Vesting; - type ValidityOrigin = frame_system::EnsureSignedBy; - type ConfigurationOrigin = frame_system::EnsureSignedBy; - type MaxStatementLength = MaxStatementLength; - type UnlockedProportion = UnlockedProportion; - type MaxUnlocked = MaxUnlocked; -} - -// This function basically just builds a genesis storage key/value store according to -// our desired mockup. It also executes our `setup` function which sets up this pallet for use. -pub fn new_test_ext() -> sp_io::TestExternalities { - let t = frame_system::GenesisConfig::::default().build_storage().unwrap(); - let mut ext = sp_io::TestExternalities::new(t); - ext.execute_with(|| setup()); - ext -} - -pub fn setup() { - let statement = b"Hello, World".to_vec(); - let unlock_block = 100; - Purchase::set_statement(RuntimeOrigin::signed(configuration_origin()), statement).unwrap(); - Purchase::set_unlock_block(RuntimeOrigin::signed(configuration_origin()), unlock_block) - .unwrap(); - Purchase::set_payment_account(RuntimeOrigin::signed(configuration_origin()), payment_account()) - .unwrap(); - Balances::make_free_balance_be(&payment_account(), 100_000); -} - -pub fn alice() -> AccountId { - Sr25519Keyring::Alice.to_account_id() -} - -pub fn alice_ed25519() -> AccountId { - Ed25519Keyring::Alice.to_account_id() -} - -pub fn bob() -> AccountId { - Sr25519Keyring::Bob.to_account_id() -} - -pub fn alice_signature() -> [u8; 64] { - // echo -n "Hello, World" | subkey -s sign "bottom drive obey lake curtain smoke basket hold - // race lonely fit walk//Alice" - hex_literal::hex!("20e0faffdf4dfe939f2faa560f73b1d01cde8472e2b690b7b40606a374244c3a2e9eb9c8107c10b605138374003af8819bd4387d7c24a66ee9253c2e688ab881") -} - -pub fn bob_signature() -> [u8; 64] { - // echo -n "Hello, World" | subkey -s sign "bottom drive obey lake curtain smoke basket hold - // race lonely fit walk//Bob" - hex_literal::hex!("d6d460187ecf530f3ec2d6e3ac91b9d083c8fbd8f1112d92a82e4d84df552d18d338e6da8944eba6e84afaacf8a9850f54e7b53a84530d649be2e0119c7ce889") -} - -pub fn alice_signature_ed25519() -> [u8; 64] { - // echo -n "Hello, World" | subkey -e sign "bottom drive obey lake curtain smoke basket hold - // race lonely fit walk//Alice" - hex_literal::hex!("ee3f5a6cbfc12a8f00c18b811dc921b550ddf272354cda4b9a57b1d06213fcd8509f5af18425d39a279d13622f14806c3e978e2163981f2ec1c06e9628460b0e") -} - -pub fn validity_origin() -> AccountId { - ValidityOrigin::get() -} - -pub fn configuration_origin() -> AccountId { - ConfigurationOrigin::get() -} - -pub fn payment_account() -> AccountId { - [42u8; 32].into() -} diff --git a/polkadot/runtime/common/src/purchase/mod.rs b/polkadot/runtime/common/src/purchase/mod.rs deleted file mode 100644 index 71dc5b579670..000000000000 --- a/polkadot/runtime/common/src/purchase/mod.rs +++ /dev/null @@ -1,482 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -//! Pallet to process purchase of DOTs. - -use alloc::vec::Vec; -use codec::{Decode, Encode}; -use frame_support::{ - pallet_prelude::*, - traits::{Currency, EnsureOrigin, ExistenceRequirement, Get, VestingSchedule}, -}; -use frame_system::pallet_prelude::*; -pub use pallet::*; -use scale_info::TypeInfo; -use sp_core::sr25519; -use sp_runtime::{ - traits::{CheckedAdd, Saturating, Verify, Zero}, - AnySignature, DispatchError, DispatchResult, Permill, RuntimeDebug, -}; - -type BalanceOf = - <::Currency as Currency<::AccountId>>::Balance; - -/// The kind of statement an account needs to make for a claim to be valid. -#[derive(Encode, Decode, Clone, Copy, Eq, PartialEq, RuntimeDebug, TypeInfo)] -pub enum AccountValidity { - /// Account is not valid. - Invalid, - /// Account has initiated the account creation process. - Initiated, - /// Account is pending validation. - Pending, - /// Account is valid with a low contribution amount. - ValidLow, - /// Account is valid with a high contribution amount. - ValidHigh, - /// Account has completed the purchase process. - Completed, -} - -impl Default for AccountValidity { - fn default() -> Self { - AccountValidity::Invalid - } -} - -impl AccountValidity { - fn is_valid(&self) -> bool { - match self { - Self::Invalid => false, - Self::Initiated => false, - Self::Pending => false, - Self::ValidLow => true, - Self::ValidHigh => true, - Self::Completed => false, - } - } -} - -/// All information about an account regarding the purchase of DOTs. -#[derive(Encode, Decode, Default, Clone, Eq, PartialEq, RuntimeDebug, TypeInfo)] -pub struct AccountStatus { - /// The current validity status of the user. Will denote if the user has passed KYC, - /// how much they are able to purchase, and when their purchase process has completed. - validity: AccountValidity, - /// The amount of free DOTs they have purchased. - free_balance: Balance, - /// The amount of locked DOTs they have purchased. - locked_balance: Balance, - /// Their sr25519/ed25519 signature verifying they have signed our required statement. - signature: Vec, - /// The percentage of VAT the purchaser is responsible for. This is already factored into - /// account balance. - vat: Permill, -} - -#[frame_support::pallet] -pub mod pallet { - use super::*; - - #[pallet::pallet] - #[pallet::without_storage_info] - pub struct Pallet(_); - - #[pallet::config] - pub trait Config: frame_system::Config { - /// The overarching event type. - type RuntimeEvent: From> + IsType<::RuntimeEvent>; - - /// Balances Pallet - type Currency: Currency; - - /// Vesting Pallet - type VestingSchedule: VestingSchedule< - Self::AccountId, - Moment = BlockNumberFor, - Currency = Self::Currency, - >; - - /// The origin allowed to set account status. - type ValidityOrigin: EnsureOrigin; - - /// The origin allowed to make configurations to the pallet. - type ConfigurationOrigin: EnsureOrigin; - - /// The maximum statement length for the statement users to sign when creating an account. - #[pallet::constant] - type MaxStatementLength: Get; - - /// The amount of purchased locked DOTs that we will unlock for basic actions on the chain. - #[pallet::constant] - type UnlockedProportion: Get; - - /// The maximum amount of locked DOTs that we will unlock. - #[pallet::constant] - type MaxUnlocked: Get>; - } - - #[pallet::event] - #[pallet::generate_deposit(pub(super) fn deposit_event)] - pub enum Event { - /// A new account was created. - AccountCreated { who: T::AccountId }, - /// Someone's account validity was updated. - ValidityUpdated { who: T::AccountId, validity: AccountValidity }, - /// Someone's purchase balance was updated. - BalanceUpdated { who: T::AccountId, free: BalanceOf, locked: BalanceOf }, - /// A payout was made to a purchaser. - PaymentComplete { who: T::AccountId, free: BalanceOf, locked: BalanceOf }, - /// A new payment account was set. - PaymentAccountSet { who: T::AccountId }, - /// A new statement was set. - StatementUpdated, - /// A new statement was set. `[block_number]` - UnlockBlockUpdated { block_number: BlockNumberFor }, - } - - #[pallet::error] - pub enum Error { - /// Account is not currently valid to use. - InvalidAccount, - /// Account used in the purchase already exists. - ExistingAccount, - /// Provided signature is invalid - InvalidSignature, - /// Account has already completed the purchase process. - AlreadyCompleted, - /// An overflow occurred when doing calculations. - Overflow, - /// The statement is too long to be stored on chain. - InvalidStatement, - /// The unlock block is in the past! - InvalidUnlockBlock, - /// Vesting schedule already exists for this account. - VestingScheduleExists, - } - - // A map of all participants in the DOT purchase process. - #[pallet::storage] - pub(super) type Accounts = - StorageMap<_, Blake2_128Concat, T::AccountId, AccountStatus>, ValueQuery>; - - // The account that will be used to payout participants of the DOT purchase process. - #[pallet::storage] - pub(super) type PaymentAccount = StorageValue<_, T::AccountId, OptionQuery>; - - // The statement purchasers will need to sign to participate. - #[pallet::storage] - pub(super) type Statement = StorageValue<_, Vec, ValueQuery>; - - // The block where all locked dots will unlock. - #[pallet::storage] - pub(super) type UnlockBlock = StorageValue<_, BlockNumberFor, ValueQuery>; - - #[pallet::hooks] - impl Hooks> for Pallet {} - - #[pallet::call] - impl Pallet { - /// Create a new account. Proof of existence through a valid signed message. - /// - /// We check that the account does not exist at this stage. - /// - /// Origin must match the `ValidityOrigin`. - #[pallet::call_index(0)] - #[pallet::weight(Weight::from_parts(200_000_000, 0) + T::DbWeight::get().reads_writes(4, 1))] - pub fn create_account( - origin: OriginFor, - who: T::AccountId, - signature: Vec, - ) -> DispatchResult { - T::ValidityOrigin::ensure_origin(origin)?; - // Account is already being tracked by the pallet. - ensure!(!Accounts::::contains_key(&who), Error::::ExistingAccount); - // Account should not have a vesting schedule. - ensure!( - T::VestingSchedule::vesting_balance(&who).is_none(), - Error::::VestingScheduleExists - ); - - // Verify the signature provided is valid for the statement. - Self::verify_signature(&who, &signature)?; - - // Create a new pending account. - let status = AccountStatus { - validity: AccountValidity::Initiated, - signature, - free_balance: Zero::zero(), - locked_balance: Zero::zero(), - vat: Permill::zero(), - }; - Accounts::::insert(&who, status); - Self::deposit_event(Event::::AccountCreated { who }); - Ok(()) - } - - /// Update the validity status of an existing account. If set to completed, the account - /// will no longer be able to continue through the crowdfund process. - /// - /// We check that the account exists at this stage, but has not completed the process. - /// - /// Origin must match the `ValidityOrigin`. - #[pallet::call_index(1)] - #[pallet::weight(T::DbWeight::get().reads_writes(1, 1))] - pub fn update_validity_status( - origin: OriginFor, - who: T::AccountId, - validity: AccountValidity, - ) -> DispatchResult { - T::ValidityOrigin::ensure_origin(origin)?; - ensure!(Accounts::::contains_key(&who), Error::::InvalidAccount); - Accounts::::try_mutate( - &who, - |status: &mut AccountStatus>| -> DispatchResult { - ensure!( - status.validity != AccountValidity::Completed, - Error::::AlreadyCompleted - ); - status.validity = validity; - Ok(()) - }, - )?; - Self::deposit_event(Event::::ValidityUpdated { who, validity }); - Ok(()) - } - - /// Update the balance of a valid account. - /// - /// We check that the account is valid for a balance transfer at this point. - /// - /// Origin must match the `ValidityOrigin`. - #[pallet::call_index(2)] - #[pallet::weight(T::DbWeight::get().reads_writes(2, 1))] - pub fn update_balance( - origin: OriginFor, - who: T::AccountId, - free_balance: BalanceOf, - locked_balance: BalanceOf, - vat: Permill, - ) -> DispatchResult { - T::ValidityOrigin::ensure_origin(origin)?; - - Accounts::::try_mutate( - &who, - |status: &mut AccountStatus>| -> DispatchResult { - // Account has a valid status (not Invalid, Pending, or Completed)... - ensure!(status.validity.is_valid(), Error::::InvalidAccount); - - free_balance.checked_add(&locked_balance).ok_or(Error::::Overflow)?; - status.free_balance = free_balance; - status.locked_balance = locked_balance; - status.vat = vat; - Ok(()) - }, - )?; - Self::deposit_event(Event::::BalanceUpdated { - who, - free: free_balance, - locked: locked_balance, - }); - Ok(()) - } - - /// Pay the user and complete the purchase process. - /// - /// We reverify all assumptions about the state of an account, and complete the process. - /// - /// Origin must match the configured `PaymentAccount` (if it is not configured then this - /// will always fail with `BadOrigin`). - #[pallet::call_index(3)] - #[pallet::weight(T::DbWeight::get().reads_writes(4, 2))] - pub fn payout(origin: OriginFor, who: T::AccountId) -> DispatchResult { - // Payments must be made directly by the `PaymentAccount`. - let payment_account = ensure_signed(origin)?; - let test_against = PaymentAccount::::get().ok_or(DispatchError::BadOrigin)?; - ensure!(payment_account == test_against, DispatchError::BadOrigin); - - // Account should not have a vesting schedule. - ensure!( - T::VestingSchedule::vesting_balance(&who).is_none(), - Error::::VestingScheduleExists - ); - - Accounts::::try_mutate( - &who, - |status: &mut AccountStatus>| -> DispatchResult { - // Account has a valid status (not Invalid, Pending, or Completed)... - ensure!(status.validity.is_valid(), Error::::InvalidAccount); - - // Transfer funds from the payment account into the purchasing user. - let total_balance = status - .free_balance - .checked_add(&status.locked_balance) - .ok_or(Error::::Overflow)?; - T::Currency::transfer( - &payment_account, - &who, - total_balance, - ExistenceRequirement::AllowDeath, - )?; - - if !status.locked_balance.is_zero() { - let unlock_block = UnlockBlock::::get(); - // We allow some configurable portion of the purchased locked DOTs to be - // unlocked for basic usage. - let unlocked = (T::UnlockedProportion::get() * status.locked_balance) - .min(T::MaxUnlocked::get()); - let locked = status.locked_balance.saturating_sub(unlocked); - // We checked that this account has no existing vesting schedule. So this - // function should never fail, however if it does, not much we can do about - // it at this point. - let _ = T::VestingSchedule::add_vesting_schedule( - // Apply vesting schedule to this user - &who, - // For this much amount - locked, - // Unlocking the full amount after one block - locked, - // When everything unlocks - unlock_block, - ); - } - - // Setting the user account to `Completed` ends the purchase process for this - // user. - status.validity = AccountValidity::Completed; - Self::deposit_event(Event::::PaymentComplete { - who: who.clone(), - free: status.free_balance, - locked: status.locked_balance, - }); - Ok(()) - }, - )?; - Ok(()) - } - - /* Configuration Operations */ - - /// Set the account that will be used to payout users in the DOT purchase process. - /// - /// Origin must match the `ConfigurationOrigin` - #[pallet::call_index(4)] - #[pallet::weight(T::DbWeight::get().writes(1))] - pub fn set_payment_account(origin: OriginFor, who: T::AccountId) -> DispatchResult { - T::ConfigurationOrigin::ensure_origin(origin)?; - // Possibly this is worse than having the caller account be the payment account? - PaymentAccount::::put(who.clone()); - Self::deposit_event(Event::::PaymentAccountSet { who }); - Ok(()) - } - - /// Set the statement that must be signed for a user to participate on the DOT sale. - /// - /// Origin must match the `ConfigurationOrigin` - #[pallet::call_index(5)] - #[pallet::weight(T::DbWeight::get().writes(1))] - pub fn set_statement(origin: OriginFor, statement: Vec) -> DispatchResult { - T::ConfigurationOrigin::ensure_origin(origin)?; - ensure!( - (statement.len() as u32) < T::MaxStatementLength::get(), - Error::::InvalidStatement - ); - // Possibly this is worse than having the caller account be the payment account? - Statement::::set(statement); - Self::deposit_event(Event::::StatementUpdated); - Ok(()) - } - - /// Set the block where locked DOTs will become unlocked. - /// - /// Origin must match the `ConfigurationOrigin` - #[pallet::call_index(6)] - #[pallet::weight(T::DbWeight::get().writes(1))] - pub fn set_unlock_block( - origin: OriginFor, - unlock_block: BlockNumberFor, - ) -> DispatchResult { - T::ConfigurationOrigin::ensure_origin(origin)?; - ensure!( - unlock_block > frame_system::Pallet::::block_number(), - Error::::InvalidUnlockBlock - ); - // Possibly this is worse than having the caller account be the payment account? - UnlockBlock::::set(unlock_block); - Self::deposit_event(Event::::UnlockBlockUpdated { block_number: unlock_block }); - Ok(()) - } - } -} - -impl Pallet { - fn verify_signature(who: &T::AccountId, signature: &[u8]) -> Result<(), DispatchError> { - // sr25519 always expects a 64 byte signature. - let signature: AnySignature = sr25519::Signature::try_from(signature) - .map_err(|_| Error::::InvalidSignature)? - .into(); - - // In Polkadot, the AccountId is always the same as the 32 byte public key. - let account_bytes: [u8; 32] = account_to_bytes(who)?; - let public_key = sr25519::Public::from_raw(account_bytes); - - let message = Statement::::get(); - - // Check if everything is good or not. - match signature.verify(message.as_slice(), &public_key) { - true => Ok(()), - false => Err(Error::::InvalidSignature)?, - } - } -} - -// This function converts a 32 byte AccountId to its byte-array equivalent form. -fn account_to_bytes(account: &AccountId) -> Result<[u8; 32], DispatchError> -where - AccountId: Encode, -{ - let account_vec = account.encode(); - ensure!(account_vec.len() == 32, "AccountId must be 32 bytes."); - let mut bytes = [0u8; 32]; - bytes.copy_from_slice(&account_vec); - Ok(bytes) -} - -/// WARNING: Executing this function will clear all storage used by this pallet. -/// Be sure this is what you want... -pub fn remove_pallet() -> frame_support::weights::Weight -where - T: frame_system::Config, -{ - #[allow(deprecated)] - use frame_support::migration::remove_storage_prefix; - #[allow(deprecated)] - remove_storage_prefix(b"Purchase", b"Accounts", b""); - #[allow(deprecated)] - remove_storage_prefix(b"Purchase", b"PaymentAccount", b""); - #[allow(deprecated)] - remove_storage_prefix(b"Purchase", b"Statement", b""); - #[allow(deprecated)] - remove_storage_prefix(b"Purchase", b"UnlockBlock", b""); - - ::BlockWeights::get().max_block -} - -#[cfg(test)] -mod mock; - -#[cfg(test)] -mod tests; diff --git a/polkadot/runtime/common/src/purchase/tests.rs b/polkadot/runtime/common/src/purchase/tests.rs deleted file mode 100644 index 8cf2a124d245..000000000000 --- a/polkadot/runtime/common/src/purchase/tests.rs +++ /dev/null @@ -1,547 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -//! Tests for the purchase pallet. - -#[cfg(test)] -use super::*; - -use sp_core::crypto::AccountId32; -// The testing primitives are very useful for avoiding having to work with signatures -// or public keys. `u64` is used as the `AccountId` and no `Signature`s are required. -use frame_support::{assert_noop, assert_ok, traits::Currency}; -use sp_runtime::{traits::Dispatchable, ArithmeticError, DispatchError::BadOrigin}; - -use crate::purchase::mock::*; - -#[test] -fn set_statement_works_and_handles_basic_errors() { - new_test_ext().execute_with(|| { - let statement = b"Test Set Statement".to_vec(); - // Invalid origin - assert_noop!( - Purchase::set_statement(RuntimeOrigin::signed(alice()), statement.clone()), - BadOrigin, - ); - // Too Long - let long_statement = [0u8; 10_000].to_vec(); - assert_noop!( - Purchase::set_statement(RuntimeOrigin::signed(configuration_origin()), long_statement), - Error::::InvalidStatement, - ); - // Just right... - assert_ok!(Purchase::set_statement( - RuntimeOrigin::signed(configuration_origin()), - statement.clone() - )); - assert_eq!(Statement::::get(), statement); - }); -} - -#[test] -fn set_unlock_block_works_and_handles_basic_errors() { - new_test_ext().execute_with(|| { - let unlock_block = 69; - // Invalid origin - assert_noop!( - Purchase::set_unlock_block(RuntimeOrigin::signed(alice()), unlock_block), - BadOrigin, - ); - // Block Number in Past - let bad_unlock_block = 50; - System::set_block_number(bad_unlock_block); - assert_noop!( - Purchase::set_unlock_block( - RuntimeOrigin::signed(configuration_origin()), - bad_unlock_block - ), - Error::::InvalidUnlockBlock, - ); - // Just right... - assert_ok!(Purchase::set_unlock_block( - RuntimeOrigin::signed(configuration_origin()), - unlock_block - )); - assert_eq!(UnlockBlock::::get(), unlock_block); - }); -} - -#[test] -fn set_payment_account_works_and_handles_basic_errors() { - new_test_ext().execute_with(|| { - let payment_account: AccountId32 = [69u8; 32].into(); - // Invalid Origin - assert_noop!( - Purchase::set_payment_account(RuntimeOrigin::signed(alice()), payment_account.clone()), - BadOrigin, - ); - // Just right... - assert_ok!(Purchase::set_payment_account( - RuntimeOrigin::signed(configuration_origin()), - payment_account.clone() - )); - assert_eq!(PaymentAccount::::get(), Some(payment_account)); - }); -} - -#[test] -fn signature_verification_works() { - new_test_ext().execute_with(|| { - assert_ok!(Purchase::verify_signature(&alice(), &alice_signature())); - assert_ok!(Purchase::verify_signature(&alice_ed25519(), &alice_signature_ed25519())); - assert_ok!(Purchase::verify_signature(&bob(), &bob_signature())); - - // Mixing and matching fails - assert_noop!( - Purchase::verify_signature(&alice(), &bob_signature()), - Error::::InvalidSignature - ); - assert_noop!( - Purchase::verify_signature(&bob(), &alice_signature()), - Error::::InvalidSignature - ); - }); -} - -#[test] -fn account_creation_works() { - new_test_ext().execute_with(|| { - assert!(!Accounts::::contains_key(alice())); - assert_ok!(Purchase::create_account( - RuntimeOrigin::signed(validity_origin()), - alice(), - alice_signature().to_vec(), - )); - assert_eq!( - Accounts::::get(alice()), - AccountStatus { - validity: AccountValidity::Initiated, - free_balance: Zero::zero(), - locked_balance: Zero::zero(), - signature: alice_signature().to_vec(), - vat: Permill::zero(), - } - ); - }); -} - -#[test] -fn account_creation_handles_basic_errors() { - new_test_ext().execute_with(|| { - // Wrong Origin - assert_noop!( - Purchase::create_account( - RuntimeOrigin::signed(alice()), - alice(), - alice_signature().to_vec() - ), - BadOrigin, - ); - - // Wrong Account/Signature - assert_noop!( - Purchase::create_account( - RuntimeOrigin::signed(validity_origin()), - alice(), - bob_signature().to_vec() - ), - Error::::InvalidSignature, - ); - - // Account with vesting - Balances::make_free_balance_be(&alice(), 100); - assert_ok!(::VestingSchedule::add_vesting_schedule(&alice(), 100, 1, 50)); - assert_noop!( - Purchase::create_account( - RuntimeOrigin::signed(validity_origin()), - alice(), - alice_signature().to_vec() - ), - Error::::VestingScheduleExists, - ); - - // Duplicate Purchasing Account - assert_ok!(Purchase::create_account( - RuntimeOrigin::signed(validity_origin()), - bob(), - bob_signature().to_vec() - )); - assert_noop!( - Purchase::create_account( - RuntimeOrigin::signed(validity_origin()), - bob(), - bob_signature().to_vec() - ), - Error::::ExistingAccount, - ); - }); -} - -#[test] -fn update_validity_status_works() { - new_test_ext().execute_with(|| { - // Alice account is created. - assert_ok!(Purchase::create_account( - RuntimeOrigin::signed(validity_origin()), - alice(), - alice_signature().to_vec(), - )); - // She submits KYC, and we update the status to `Pending`. - assert_ok!(Purchase::update_validity_status( - RuntimeOrigin::signed(validity_origin()), - alice(), - AccountValidity::Pending, - )); - // KYC comes back negative, so we mark the account invalid. - assert_ok!(Purchase::update_validity_status( - RuntimeOrigin::signed(validity_origin()), - alice(), - AccountValidity::Invalid, - )); - assert_eq!( - Accounts::::get(alice()), - AccountStatus { - validity: AccountValidity::Invalid, - free_balance: Zero::zero(), - locked_balance: Zero::zero(), - signature: alice_signature().to_vec(), - vat: Permill::zero(), - } - ); - // She fixes it, we mark her account valid. - assert_ok!(Purchase::update_validity_status( - RuntimeOrigin::signed(validity_origin()), - alice(), - AccountValidity::ValidLow, - )); - assert_eq!( - Accounts::::get(alice()), - AccountStatus { - validity: AccountValidity::ValidLow, - free_balance: Zero::zero(), - locked_balance: Zero::zero(), - signature: alice_signature().to_vec(), - vat: Permill::zero(), - } - ); - }); -} - -#[test] -fn update_validity_status_handles_basic_errors() { - new_test_ext().execute_with(|| { - // Wrong Origin - assert_noop!( - Purchase::update_validity_status( - RuntimeOrigin::signed(alice()), - alice(), - AccountValidity::Pending, - ), - BadOrigin - ); - // Inactive Account - assert_noop!( - Purchase::update_validity_status( - RuntimeOrigin::signed(validity_origin()), - alice(), - AccountValidity::Pending, - ), - Error::::InvalidAccount - ); - // Already Completed - assert_ok!(Purchase::create_account( - RuntimeOrigin::signed(validity_origin()), - alice(), - alice_signature().to_vec(), - )); - assert_ok!(Purchase::update_validity_status( - RuntimeOrigin::signed(validity_origin()), - alice(), - AccountValidity::Completed, - )); - assert_noop!( - Purchase::update_validity_status( - RuntimeOrigin::signed(validity_origin()), - alice(), - AccountValidity::Pending, - ), - Error::::AlreadyCompleted - ); - }); -} - -#[test] -fn update_balance_works() { - new_test_ext().execute_with(|| { - // Alice account is created - assert_ok!(Purchase::create_account( - RuntimeOrigin::signed(validity_origin()), - alice(), - alice_signature().to_vec() - )); - // And approved for basic contribution - assert_ok!(Purchase::update_validity_status( - RuntimeOrigin::signed(validity_origin()), - alice(), - AccountValidity::ValidLow, - )); - // We set a balance on the user based on the payment they made. 50 locked, 50 free. - assert_ok!(Purchase::update_balance( - RuntimeOrigin::signed(validity_origin()), - alice(), - 50, - 50, - Permill::from_rational(77u32, 1000u32), - )); - assert_eq!( - Accounts::::get(alice()), - AccountStatus { - validity: AccountValidity::ValidLow, - free_balance: 50, - locked_balance: 50, - signature: alice_signature().to_vec(), - vat: Permill::from_parts(77000), - } - ); - // We can update the balance based on new information. - assert_ok!(Purchase::update_balance( - RuntimeOrigin::signed(validity_origin()), - alice(), - 25, - 50, - Permill::zero(), - )); - assert_eq!( - Accounts::::get(alice()), - AccountStatus { - validity: AccountValidity::ValidLow, - free_balance: 25, - locked_balance: 50, - signature: alice_signature().to_vec(), - vat: Permill::zero(), - } - ); - }); -} - -#[test] -fn update_balance_handles_basic_errors() { - new_test_ext().execute_with(|| { - // Wrong Origin - assert_noop!( - Purchase::update_balance( - RuntimeOrigin::signed(alice()), - alice(), - 50, - 50, - Permill::zero(), - ), - BadOrigin - ); - // Inactive Account - assert_noop!( - Purchase::update_balance( - RuntimeOrigin::signed(validity_origin()), - alice(), - 50, - 50, - Permill::zero(), - ), - Error::::InvalidAccount - ); - // Overflow - assert_noop!( - Purchase::update_balance( - RuntimeOrigin::signed(validity_origin()), - alice(), - u64::MAX, - u64::MAX, - Permill::zero(), - ), - Error::::InvalidAccount - ); - }); -} - -#[test] -fn payout_works() { - new_test_ext().execute_with(|| { - // Alice and Bob accounts are created - assert_ok!(Purchase::create_account( - RuntimeOrigin::signed(validity_origin()), - alice(), - alice_signature().to_vec() - )); - assert_ok!(Purchase::create_account( - RuntimeOrigin::signed(validity_origin()), - bob(), - bob_signature().to_vec() - )); - // Alice is approved for basic contribution - assert_ok!(Purchase::update_validity_status( - RuntimeOrigin::signed(validity_origin()), - alice(), - AccountValidity::ValidLow, - )); - // Bob is approved for high contribution - assert_ok!(Purchase::update_validity_status( - RuntimeOrigin::signed(validity_origin()), - bob(), - AccountValidity::ValidHigh, - )); - // We set a balance on the users based on the payment they made. 50 locked, 50 free. - assert_ok!(Purchase::update_balance( - RuntimeOrigin::signed(validity_origin()), - alice(), - 50, - 50, - Permill::zero(), - )); - assert_ok!(Purchase::update_balance( - RuntimeOrigin::signed(validity_origin()), - bob(), - 100, - 150, - Permill::zero(), - )); - // Now we call payout for Alice and Bob. - assert_ok!(Purchase::payout(RuntimeOrigin::signed(payment_account()), alice(),)); - assert_ok!(Purchase::payout(RuntimeOrigin::signed(payment_account()), bob(),)); - // Payment is made. - assert_eq!(::Currency::free_balance(&payment_account()), 99_650); - assert_eq!(::Currency::free_balance(&alice()), 100); - // 10% of the 50 units is unlocked automatically for Alice - assert_eq!(::VestingSchedule::vesting_balance(&alice()), Some(45)); - assert_eq!(::Currency::free_balance(&bob()), 250); - // A max of 10 units is unlocked automatically for Bob - assert_eq!(::VestingSchedule::vesting_balance(&bob()), Some(140)); - // Status is completed. - assert_eq!( - Accounts::::get(alice()), - AccountStatus { - validity: AccountValidity::Completed, - free_balance: 50, - locked_balance: 50, - signature: alice_signature().to_vec(), - vat: Permill::zero(), - } - ); - assert_eq!( - Accounts::::get(bob()), - AccountStatus { - validity: AccountValidity::Completed, - free_balance: 100, - locked_balance: 150, - signature: bob_signature().to_vec(), - vat: Permill::zero(), - } - ); - // Vesting lock is removed in whole on block 101 (100 blocks after block 1) - System::set_block_number(100); - let vest_call = RuntimeCall::Vesting(pallet_vesting::Call::::vest {}); - assert_ok!(vest_call.clone().dispatch(RuntimeOrigin::signed(alice()))); - assert_ok!(vest_call.clone().dispatch(RuntimeOrigin::signed(bob()))); - assert_eq!(::VestingSchedule::vesting_balance(&alice()), Some(45)); - assert_eq!(::VestingSchedule::vesting_balance(&bob()), Some(140)); - System::set_block_number(101); - assert_ok!(vest_call.clone().dispatch(RuntimeOrigin::signed(alice()))); - assert_ok!(vest_call.clone().dispatch(RuntimeOrigin::signed(bob()))); - assert_eq!(::VestingSchedule::vesting_balance(&alice()), None); - assert_eq!(::VestingSchedule::vesting_balance(&bob()), None); - }); -} - -#[test] -fn payout_handles_basic_errors() { - new_test_ext().execute_with(|| { - // Wrong Origin - assert_noop!(Purchase::payout(RuntimeOrigin::signed(alice()), alice(),), BadOrigin); - // Account with Existing Vesting Schedule - Balances::make_free_balance_be(&bob(), 100); - assert_ok!(::VestingSchedule::add_vesting_schedule(&bob(), 100, 1, 50,)); - assert_noop!( - Purchase::payout(RuntimeOrigin::signed(payment_account()), bob(),), - Error::::VestingScheduleExists - ); - // Invalid Account (never created) - assert_noop!( - Purchase::payout(RuntimeOrigin::signed(payment_account()), alice(),), - Error::::InvalidAccount - ); - // Invalid Account (created, but not valid) - assert_ok!(Purchase::create_account( - RuntimeOrigin::signed(validity_origin()), - alice(), - alice_signature().to_vec() - )); - assert_noop!( - Purchase::payout(RuntimeOrigin::signed(payment_account()), alice(),), - Error::::InvalidAccount - ); - // Not enough funds in payment account - assert_ok!(Purchase::update_validity_status( - RuntimeOrigin::signed(validity_origin()), - alice(), - AccountValidity::ValidHigh, - )); - assert_ok!(Purchase::update_balance( - RuntimeOrigin::signed(validity_origin()), - alice(), - 100_000, - 100_000, - Permill::zero(), - )); - assert_noop!( - Purchase::payout(RuntimeOrigin::signed(payment_account()), alice()), - ArithmeticError::Underflow - ); - }); -} - -#[test] -fn remove_pallet_works() { - new_test_ext().execute_with(|| { - let account_status = AccountStatus { - validity: AccountValidity::Completed, - free_balance: 1234, - locked_balance: 4321, - signature: b"my signature".to_vec(), - vat: Permill::from_percent(50), - }; - - // Add some storage. - Accounts::::insert(alice(), account_status.clone()); - Accounts::::insert(bob(), account_status); - PaymentAccount::::put(alice()); - Statement::::put(b"hello, world!".to_vec()); - UnlockBlock::::put(4); - - // Verify storage exists. - assert_eq!(Accounts::::iter().count(), 2); - assert!(PaymentAccount::::exists()); - assert!(Statement::::exists()); - assert!(UnlockBlock::::exists()); - - // Remove storage. - remove_pallet::(); - - // Verify storage is gone. - assert_eq!(Accounts::::iter().count(), 0); - assert!(!PaymentAccount::::exists()); - assert!(!Statement::::exists()); - assert!(!UnlockBlock::::exists()); - }); -} diff --git a/polkadot/runtime/common/src/xcm_sender.rs b/polkadot/runtime/common/src/xcm_sender.rs index 32ea4fdd2f27..7ff7f69faf14 100644 --- a/polkadot/runtime/common/src/xcm_sender.rs +++ b/polkadot/runtime/common/src/xcm_sender.rs @@ -138,13 +138,6 @@ where .map(|()| hash) .map_err(|_| SendError::Transport(&"Error placing into DMP queue")) } - - #[cfg(feature = "runtime-benchmarks")] - fn ensure_successful_delivery(location: Option) { - if let Some((0, [Parachain(id)])) = location.as_ref().map(|l| l.unpack()) { - dmp::Pallet::::make_parachain_reachable(*id); - } - } } impl InspectMessageQueues for ChildParachainRouter { @@ -197,7 +190,7 @@ impl< ExistentialDeposit: Get>, PriceForDelivery: PriceForMessageDelivery, Parachain: Get, - ToParachainHelper: polkadot_runtime_parachains::EnsureForParachain, + ToParachainHelper: EnsureForParachain, > xcm_builder::EnsureDelivery for ToParachainDeliveryHelper< XcmConfig, @@ -226,9 +219,6 @@ impl< return (None, None) } - // allow more initialization for target parachain - ToParachainHelper::ensure(Parachain::get()); - let mut fees_mode = None; if !XcmConfig::FeeManager::is_waived(Some(origin_ref), fee_reason) { // if not waived, we need to set up accounts for paying and receiving fees @@ -248,6 +238,9 @@ impl< XcmConfig::AssetTransactor::deposit_asset(&fee, &origin_ref, None).unwrap(); } + // allow more initialization for target parachain + ToParachainHelper::ensure(Parachain::get()); + // expected worst case - direct withdraw fees_mode = Some(FeesMode { jit_withdraw: true }); } @@ -255,6 +248,18 @@ impl< } } +/// Ensure more initialization for `ParaId`. (e.g. open HRMP channels, ...) +#[cfg(feature = "runtime-benchmarks")] +pub trait EnsureForParachain { + fn ensure(para_id: ParaId); +} +#[cfg(feature = "runtime-benchmarks")] +impl EnsureForParachain for () { + fn ensure(_: ParaId) { + // doing nothing + } +} + #[cfg(test)] mod tests { use super::*; @@ -344,8 +349,6 @@ mod tests { c.max_downward_message_size = u32::MAX; }); - dmp::Pallet::::make_parachain_reachable(5555); - // Check that the good message is validated: assert_ok!(::validate( &mut Some(dest.into()), diff --git a/polkadot/runtime/metrics/Cargo.toml b/polkadot/runtime/metrics/Cargo.toml index beb7e3236d5a..3709e1eb697e 100644 --- a/polkadot/runtime/metrics/Cargo.toml +++ b/polkadot/runtime/metrics/Cargo.toml @@ -5,17 +5,15 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "Runtime metric interface for the Polkadot node" -homepage.workspace = true -repository.workspace = true [lints] workspace = true [dependencies] +sp-tracing = { workspace = true } codec = { workspace = true } -frame-benchmarking = { optional = true, workspace = true } polkadot-primitives = { workspace = true } -sp-tracing = { workspace = true } +frame-benchmarking = { optional = true, workspace = true } bs58 = { features = ["alloc"], workspace = true } diff --git a/polkadot/runtime/parachains/Cargo.toml b/polkadot/runtime/parachains/Cargo.toml index 7c00995d2291..a3eec3f9d961 100644 --- a/polkadot/runtime/parachains/Cargo.toml +++ b/polkadot/runtime/parachains/Cargo.toml @@ -5,42 +5,37 @@ description = "Relay Chain runtime code responsible for Parachains." authors.workspace = true edition.workspace = true license.workspace = true -homepage.workspace = true -repository.workspace = true [lints] workspace = true [dependencies] -bitflags = { workspace = true } +impl-trait-for-tuples = { workspace = true } bitvec = { features = ["alloc"], workspace = true } codec = { features = ["derive", "max-encoded-len"], workspace = true } -derive_more = { workspace = true, default-features = true } -impl-trait-for-tuples = { workspace = true } log = { workspace = true } scale-info = { features = ["derive"], workspace = true } serde = { features = ["alloc", "derive"], workspace = true } +derive_more = { workspace = true, default-features = true } +bitflags = { workspace = true } sp-api = { workspace = true } -sp-application-crypto = { optional = true, workspace = true } -sp-arithmetic = { workspace = true } -sp-core = { features = ["serde"], workspace = true } sp-inherents = { workspace = true } sp-io = { workspace = true } -sp-keystore = { optional = true, workspace = true } sp-runtime = { features = ["serde"], workspace = true } sp-session = { workspace = true } sp-staking = { features = ["serde"], workspace = true } -sp-std = { workspace = true, optional = true } +sp-core = { features = ["serde"], workspace = true } +sp-keystore = { optional = true, workspace = true } +sp-application-crypto = { optional = true, workspace = true } sp-tracing = { optional = true, workspace = true } +sp-arithmetic = { workspace = true } +sp-std = { workspace = true, optional = true } -frame-benchmarking = { optional = true, workspace = true } -frame-support = { workspace = true } -frame-system = { workspace = true } pallet-authority-discovery = { workspace = true } pallet-authorship = { workspace = true } -pallet-babe = { workspace = true } pallet-balances = { workspace = true } +pallet-babe = { workspace = true } pallet-broker = { workspace = true } pallet-message-queue = { workspace = true } pallet-mmr = { workspace = true, optional = true } @@ -48,33 +43,36 @@ pallet-session = { workspace = true } pallet-staking = { workspace = true } pallet-timestamp = { workspace = true } pallet-vesting = { workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } -polkadot-primitives = { workspace = true } xcm = { workspace = true } xcm-executor = { workspace = true } +polkadot-primitives = { workspace = true } -polkadot-core-primitives = { workspace = true } -polkadot-parachain-primitives = { workspace = true } -polkadot-runtime-metrics = { workspace = true } rand = { workspace = true } rand_chacha = { workspace = true } static_assertions = { optional = true, workspace = true, default-features = true } +polkadot-parachain-primitives = { workspace = true } +polkadot-runtime-metrics = { workspace = true } +polkadot-core-primitives = { workspace = true } [dev-dependencies] polkadot-primitives = { workspace = true, features = ["test"] } -assert_matches = { workspace = true } -frame-support-test = { workspace = true } futures = { workspace = true } hex-literal = { workspace = true, default-features = true } -polkadot-primitives-test-helpers = { workspace = true } -rstest = { workspace = true } -sc-keystore = { workspace = true, default-features = true } -serde_json = { workspace = true, default-features = true } -sp-crypto-hashing = { workspace = true, default-features = true } sp-keyring = { workspace = true, default-features = true } +frame-support-test = { workspace = true } +sc-keystore = { workspace = true, default-features = true } +polkadot-primitives-test-helpers = { workspace = true } sp-tracing = { workspace = true, default-features = true } +sp-crypto-hashing = { workspace = true, default-features = true } thousands = { workspace = true } +assert_matches = { workspace = true } +rstest = { workspace = true } +serde_json = { workspace = true, default-features = true } [features] default = ["std"] @@ -140,7 +138,6 @@ runtime-benchmarks = [ "sp-std", "static_assertions", "xcm-executor/runtime-benchmarks", - "xcm/runtime-benchmarks", ] try-runtime = [ "frame-support-test/try-runtime", diff --git a/polkadot/runtime/parachains/src/coretime/benchmarking.rs b/polkadot/runtime/parachains/src/coretime/benchmarking.rs index 49e3d8a88c01..6d593f1954ff 100644 --- a/polkadot/runtime/parachains/src/coretime/benchmarking.rs +++ b/polkadot/runtime/parachains/src/coretime/benchmarking.rs @@ -43,8 +43,6 @@ mod benchmarks { .unwrap(); on_demand::Revenue::::put(rev); - crate::paras::Heads::::insert(ParaId::from(T::BrokerId::get()), vec![1, 2, 3]); - ::Currency::make_free_balance_be( &>::account_id(), minimum_balance * (mhr * (mhr + 1)).into(), diff --git a/polkadot/runtime/parachains/src/coretime/mod.rs b/polkadot/runtime/parachains/src/coretime/mod.rs index 5656e92b90be..966b7997a277 100644 --- a/polkadot/runtime/parachains/src/coretime/mod.rs +++ b/polkadot/runtime/parachains/src/coretime/mod.rs @@ -136,11 +136,6 @@ pub mod pallet { type AssetTransactor: TransactAsset; /// AccountId to Location converter type AccountToLocation: for<'a> TryConvert<&'a Self::AccountId, Location>; - - /// Maximum weight for any XCM transact call that should be executed on the coretime chain. - /// - /// Basically should be `max_weight(set_leases, reserve, notify_core_count)`. - type MaxXcmTransactWeight: Get; } #[pallet::event] @@ -338,7 +333,6 @@ impl OnNewSession> for Pallet { fn mk_coretime_call(call: crate::coretime::CoretimeCalls) -> Instruction<()> { Instruction::Transact { origin_kind: OriginKind::Superuser, - fallback_max_weight: Some(T::MaxXcmTransactWeight::get()), call: BrokerRuntimePallets::Broker(call).encode().into(), } } diff --git a/polkadot/runtime/parachains/src/disputes/benchmarking.rs b/polkadot/runtime/parachains/src/disputes/benchmarking.rs index 571c44d1ac24..05f4b3f1ac81 100644 --- a/polkadot/runtime/parachains/src/disputes/benchmarking.rs +++ b/polkadot/runtime/parachains/src/disputes/benchmarking.rs @@ -16,21 +16,15 @@ use super::*; -use frame_benchmarking::v2::*; +use frame_benchmarking::benchmarks; use frame_system::RawOrigin; use sp_runtime::traits::One; -#[benchmarks] -mod benchmarks { - use super::*; - - #[benchmark] - fn force_unfreeze() { +benchmarks! { + force_unfreeze { Frozen::::set(Some(One::one())); - - #[extrinsic_call] - _(RawOrigin::Root); - + }: _(RawOrigin::Root) + verify { assert!(Frozen::::get().is_none()) } diff --git a/polkadot/runtime/parachains/src/disputes/slashing.rs b/polkadot/runtime/parachains/src/disputes/slashing.rs index 95dbf2ba42bb..2e09ea667f74 100644 --- a/polkadot/runtime/parachains/src/disputes/slashing.rs +++ b/polkadot/runtime/parachains/src/disputes/slashing.rs @@ -355,12 +355,12 @@ impl HandleReports for () { } pub trait WeightInfo { - fn report_dispute_lost_unsigned(validator_count: ValidatorSetCount) -> Weight; + fn report_dispute_lost(validator_count: ValidatorSetCount) -> Weight; } pub struct TestWeightInfo; impl WeightInfo for TestWeightInfo { - fn report_dispute_lost_unsigned(_validator_count: ValidatorSetCount) -> Weight { + fn report_dispute_lost(_validator_count: ValidatorSetCount) -> Weight { Weight::zero() } } @@ -445,7 +445,7 @@ pub mod pallet { #[pallet::call] impl Pallet { #[pallet::call_index(0)] - #[pallet::weight(::WeightInfo::report_dispute_lost_unsigned( + #[pallet::weight(::WeightInfo::report_dispute_lost( key_owner_proof.validator_count() ))] pub fn report_dispute_lost_unsigned( diff --git a/polkadot/runtime/parachains/src/disputes/slashing/benchmarking.rs b/polkadot/runtime/parachains/src/disputes/slashing/benchmarking.rs index bfd46d752438..b53f98caeea3 100644 --- a/polkadot/runtime/parachains/src/disputes/slashing/benchmarking.rs +++ b/polkadot/runtime/parachains/src/disputes/slashing/benchmarking.rs @@ -18,7 +18,7 @@ use super::*; use crate::{disputes::SlashingHandler, initializer, shared}; use codec::Decode; -use frame_benchmarking::v2::*; +use frame_benchmarking::{benchmarks, whitelist_account}; use frame_support::traits::{OnFinalize, OnInitialize}; use frame_system::{pallet_prelude::BlockNumberFor, RawOrigin}; use pallet_staking::testing_utils::create_validators; @@ -29,11 +29,6 @@ use sp_session::MembershipProof; // Candidate hash of the disputed candidate. const CANDIDATE_HASH: CandidateHash = CandidateHash(Hash::zero()); -// Simplify getting the value in the benchmark -pub const fn max_validators_for() -> u32 { - <::BenchmarkingConfig as BenchmarkingConfiguration>::MAX_VALIDATORS -} - pub trait Config: pallet_session::Config + pallet_session::historical::Config @@ -111,7 +106,6 @@ where (session_index, key_owner_proof, validator_id) } -/// Submits a single `ForInvalid` dispute. fn setup_dispute(session_index: SessionIndex, validator_id: ValidatorId) -> DisputeProof where T: Config, @@ -131,7 +125,6 @@ where dispute_proof(session_index, validator_id, validator_index) } -/// Creates a `ForInvalid` dispute proof. fn dispute_proof( session_index: SessionIndex, validator_id: ValidatorId, @@ -143,20 +136,27 @@ fn dispute_proof( DisputeProof { time_slot, kind, validator_index, validator_id } } -#[benchmarks(where T: Config)] -mod benchmarks { - use super::*; +benchmarks! { + where_clause { + where T: Config, + } - #[benchmark] - fn report_dispute_lost_unsigned(n: Linear<4, { max_validators_for::() }>) { - let (session_index, key_owner_proof, validator_id) = setup_validator_set::(n); + // in this setup we have a single `ForInvalid` dispute + // submitted for a past session + report_dispute_lost { + let n in 4..<::BenchmarkingConfig as BenchmarkingConfiguration>::MAX_VALIDATORS; - // submit a single `ForInvalid` dispute for a past session. + let origin = RawOrigin::None.into(); + let (session_index, key_owner_proof, validator_id) = setup_validator_set::(n); let dispute_proof = setup_dispute::(session_index, validator_id); - - #[extrinsic_call] - _(RawOrigin::None, Box::new(dispute_proof), key_owner_proof); - + }: { + let result = Pallet::::report_dispute_lost_unsigned( + origin, + Box::new(dispute_proof), + key_owner_proof, + ); + assert!(result.is_ok()); + } verify { let unapplied = >::get(session_index, CANDIDATE_HASH); assert!(unapplied.is_none()); } diff --git a/polkadot/runtime/parachains/src/dmp.rs b/polkadot/runtime/parachains/src/dmp.rs index 3c9cf8004186..03580e11b8e9 100644 --- a/polkadot/runtime/parachains/src/dmp.rs +++ b/polkadot/runtime/parachains/src/dmp.rs @@ -44,7 +44,7 @@ use crate::{ configuration::{self, HostConfiguration}, - initializer, paras, FeeTracker, + initializer, FeeTracker, }; use alloc::vec::Vec; use core::fmt; @@ -72,15 +72,12 @@ const MESSAGE_SIZE_FEE_BASE: FixedU128 = FixedU128::from_rational(1, 1000); // 0 pub enum QueueDownwardMessageError { /// The message being sent exceeds the configured max message size. ExceedsMaxMessageSize, - /// The destination is unknown. - Unroutable, } impl From for SendError { fn from(err: QueueDownwardMessageError) -> Self { match err { QueueDownwardMessageError::ExceedsMaxMessageSize => SendError::ExceedsMaxMessageSize, - QueueDownwardMessageError::Unroutable => SendError::Unroutable, } } } @@ -119,7 +116,7 @@ pub mod pallet { pub struct Pallet(_); #[pallet::config] - pub trait Config: frame_system::Config + configuration::Config + paras::Config {} + pub trait Config: frame_system::Config + configuration::Config {} /// The downward messages addressed for a certain para. #[pallet::storage] @@ -203,11 +200,6 @@ impl Pallet { return Err(QueueDownwardMessageError::ExceedsMaxMessageSize) } - // If the head exists, we assume the parachain is legit and exists. - if !paras::Heads::::contains_key(para) { - return Err(QueueDownwardMessageError::Unroutable) - } - Ok(()) } @@ -225,7 +217,14 @@ impl Pallet { msg: DownwardMessage, ) -> Result<(), QueueDownwardMessageError> { let serialized_len = msg.len() as u32; - Self::can_queue_downward_message(config, ¶, &msg)?; + if serialized_len > config.max_downward_message_size { + return Err(QueueDownwardMessageError::ExceedsMaxMessageSize) + } + + // Hard limit on Queue size + if Self::dmq_length(para) > Self::dmq_max_length(config.max_downward_message_size) { + return Err(QueueDownwardMessageError::ExceedsMaxMessageSize) + } let inbound = InboundDownwardMessage { msg, sent_at: frame_system::Pallet::::block_number() }; @@ -337,15 +336,6 @@ impl Pallet { ) -> Vec>> { DownwardMessageQueues::::get(&recipient) } - - /// Make the parachain reachable for downward messages. - /// - /// Only useable in benchmarks or tests. - #[cfg(any(feature = "runtime-benchmarks", feature = "std"))] - pub fn make_parachain_reachable(para: impl Into) { - let para = para.into(); - crate::paras::Heads::::insert(para, para.encode()); - } } impl FeeTracker for Pallet { @@ -369,10 +359,3 @@ impl FeeTracker for Pallet { }) } } - -#[cfg(feature = "runtime-benchmarks")] -impl crate::EnsureForParachain for Pallet { - fn ensure(para: ParaId) { - Self::make_parachain_reachable(para); - } -} diff --git a/polkadot/runtime/parachains/src/dmp/tests.rs b/polkadot/runtime/parachains/src/dmp/tests.rs index 617c9488bd2a..de1515958125 100644 --- a/polkadot/runtime/parachains/src/dmp/tests.rs +++ b/polkadot/runtime/parachains/src/dmp/tests.rs @@ -61,12 +61,6 @@ fn queue_downward_message( Dmp::queue_downward_message(&configuration::ActiveConfig::::get(), para_id, msg) } -fn register_paras(paras: &[ParaId]) { - paras.iter().for_each(|p| { - Dmp::make_parachain_reachable(*p); - }); -} - #[test] fn clean_dmp_works() { let a = ParaId::from(1312); @@ -74,8 +68,6 @@ fn clean_dmp_works() { let c = ParaId::from(123); new_test_ext(default_genesis_config()).execute_with(|| { - register_paras(&[a, b, c]); - // enqueue downward messages to A, B and C. queue_downward_message(a, vec![1, 2, 3]).unwrap(); queue_downward_message(b, vec![4, 5, 6]).unwrap(); @@ -97,8 +89,6 @@ fn dmq_length_and_head_updated_properly() { let b = ParaId::from(228); new_test_ext(default_genesis_config()).execute_with(|| { - register_paras(&[a, b]); - assert_eq!(Dmp::dmq_length(a), 0); assert_eq!(Dmp::dmq_length(b), 0); @@ -111,30 +101,11 @@ fn dmq_length_and_head_updated_properly() { }); } -#[test] -fn dmq_fail_if_para_does_not_exist() { - let a = ParaId::from(1312); - - new_test_ext(default_genesis_config()).execute_with(|| { - assert_eq!(Dmp::dmq_length(a), 0); - - assert!(matches!( - queue_downward_message(a, vec![1, 2, 3]), - Err(QueueDownwardMessageError::Unroutable) - )); - - assert_eq!(Dmp::dmq_length(a), 0); - assert!(Dmp::dmq_mqc_head(a).is_zero()); - }); -} - #[test] fn dmp_mqc_head_fixture() { let a = ParaId::from(2000); new_test_ext(default_genesis_config()).execute_with(|| { - register_paras(&[a]); - run_to_block(2, None); assert!(Dmp::dmq_mqc_head(a).is_zero()); queue_downward_message(a, vec![1, 2, 3]).unwrap(); @@ -154,8 +125,6 @@ fn check_processed_downward_messages() { let a = ParaId::from(1312); new_test_ext(default_genesis_config()).execute_with(|| { - register_paras(&[a]); - let block_number = System::block_number(); // processed_downward_messages=0 is allowed when the DMQ is empty. @@ -181,8 +150,6 @@ fn check_processed_downward_messages_advancement_rule() { let a = ParaId::from(1312); new_test_ext(default_genesis_config()).execute_with(|| { - register_paras(&[a]); - let block_number = System::block_number(); run_to_block(block_number + 1, None); @@ -203,8 +170,6 @@ fn dmq_pruning() { let a = ParaId::from(1312); new_test_ext(default_genesis_config()).execute_with(|| { - register_paras(&[a]); - assert_eq!(Dmp::dmq_length(a), 0); queue_downward_message(a, vec![1, 2, 3]).unwrap(); @@ -229,8 +194,6 @@ fn queue_downward_message_critical() { genesis.configuration.config.max_downward_message_size = 7; new_test_ext(genesis).execute_with(|| { - register_paras(&[a]); - let smol = [0; 3].to_vec(); let big = [0; 8].to_vec(); @@ -252,8 +215,6 @@ fn verify_dmq_mqc_head_is_externally_accessible() { let a = ParaId::from(2020); new_test_ext(default_genesis_config()).execute_with(|| { - register_paras(&[a]); - let head = sp_io::storage::get(&well_known_keys::dmq_mqc_head(a)); assert_eq!(head, None); @@ -274,12 +235,9 @@ fn verify_dmq_mqc_head_is_externally_accessible() { #[test] fn verify_fee_increase_and_decrease() { let a = ParaId::from(123); - let mut genesis = default_genesis_config(); genesis.configuration.config.max_downward_message_size = 16777216; new_test_ext(genesis).execute_with(|| { - register_paras(&[a]); - let initial = InitialFactor::get(); assert_eq!(DeliveryFeeFactor::::get(a), initial); @@ -329,8 +287,6 @@ fn verify_fee_factor_reaches_high_value() { let mut genesis = default_genesis_config(); genesis.configuration.config.max_downward_message_size = 51200; new_test_ext(genesis).execute_with(|| { - register_paras(&[a]); - let max_messages = Dmp::dmq_max_length(ActiveConfig::::get().max_downward_message_size); let mut total_fee_factor = FixedU128::from_float(1.0); diff --git a/polkadot/runtime/parachains/src/inclusion/benchmarking.rs b/polkadot/runtime/parachains/src/inclusion/benchmarking.rs index ab95c5c2366a..1dac3c92cf16 100644 --- a/polkadot/runtime/parachains/src/inclusion/benchmarking.rs +++ b/polkadot/runtime/parachains/src/inclusion/benchmarking.rs @@ -14,14 +14,6 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use bitvec::{bitvec, prelude::Lsb0}; -use frame_benchmarking::v2::*; -use pallet_message_queue as mq; -use polkadot_primitives::{ - vstaging::CommittedCandidateReceiptV2 as CommittedCandidateReceipt, CandidateCommitments, - HrmpChannelId, OutboundHrmpMessage, SessionIndex, -}; - use super::*; use crate::{ builder::generate_validator_pairs, @@ -29,6 +21,13 @@ use crate::{ hrmp::{HrmpChannel, HrmpChannels}, initializer, HeadData, ValidationCode, }; +use bitvec::{bitvec, prelude::Lsb0}; +use frame_benchmarking::benchmarks; +use pallet_message_queue as mq; +use polkadot_primitives::{ + vstaging::CommittedCandidateReceiptV2 as CommittedCandidateReceipt, CandidateCommitments, + HrmpChannelId, OutboundHrmpMessage, SessionIndex, +}; fn create_candidate_commitments( para_id: ParaId, @@ -71,7 +70,7 @@ fn create_candidate_commitments( BoundedVec::truncate_from(unbounded) }; - let new_validation_code = code_upgrade.then_some(ValidationCode(vec![42_u8; 1024])); + let new_validation_code = code_upgrade.then_some(ValidationCode(vec![42u8; 1024])); CandidateCommitments:: { upward_messages, @@ -88,13 +87,18 @@ fn create_messages(msg_len: usize, n_msgs: usize) -> Vec> { vec![vec![best_number; msg_len]; n_msgs] } -#[benchmarks(where T: mq::Config + configuration::Config + initializer::Config)] -mod benchmarks { - use super::*; +benchmarks! { + where_clause { + where + T: mq::Config + configuration::Config + initializer::Config, + } + + enact_candidate { + let u in 0 .. 2; + let h in 0 .. 2; + let c in 0 .. 1; - #[benchmark] - fn enact_candidate(u: Linear<0, 2>, h: Linear<0, 2>, c: Linear<0, 1>) { - let para = 42_u32.into(); // not especially important. + let para = 42_u32.into(); // not especially important. let max_len = mq::MaxMessageLenOf::::get() as usize; @@ -102,7 +106,7 @@ mod benchmarks { let n_validators = config.max_validators.unwrap_or(500); let validators = generate_validator_pairs::(n_validators); - let session = SessionIndex::from(0_u32); + let session = SessionIndex::from(0u32); initializer::Pallet::::test_trigger_on_new_session( false, session, @@ -112,7 +116,7 @@ mod benchmarks { let backing_group_size = config.scheduler_params.max_validators_per_core.unwrap_or(5); let head_data = HeadData(vec![0xFF; 1024]); - let relay_parent_number = BlockNumberFor::::from(10_u32); + let relay_parent_number = BlockNumberFor::::from(10u32); let commitments = create_candidate_commitments::(para, head_data, max_len, u, h, c != 0); let backers = bitvec![u8, Lsb0; 1; backing_group_size as usize]; let availability_votes = bitvec![u8, Lsb0; 1; n_validators as usize]; @@ -131,26 +135,17 @@ mod benchmarks { ValidationCode(vec![1, 2, 3]).hash(), ); - let receipt = CommittedCandidateReceipt:: { descriptor, commitments }; + let receipt = CommittedCandidateReceipt:: { + descriptor, + commitments, + }; - Pallet::::receive_upward_messages(para, &vec![vec![0; max_len]; 1]); + Pallet::::receive_upward_messages(para, vec![vec![0; max_len]; 1].as_slice()); + } : { Pallet::::enact_candidate(relay_parent_number, receipt, backers, availability_votes, core_index, backing_group) } - #[block] - { - Pallet::::enact_candidate( - relay_parent_number, - receipt, - backers, - availability_votes, - core_index, - backing_group, - ); - } - } - - impl_benchmark_test_suite! { + impl_benchmark_test_suite!( Pallet, crate::mock::new_test_ext(Default::default()), crate::mock::Test - } + ); } diff --git a/polkadot/runtime/parachains/src/lib.rs b/polkadot/runtime/parachains/src/lib.rs index b1ff5419470e..828c0b9bcef2 100644 --- a/polkadot/runtime/parachains/src/lib.rs +++ b/polkadot/runtime/parachains/src/lib.rs @@ -114,19 +114,3 @@ pub fn schedule_code_upgrade( pub fn set_current_head(id: ParaId, new_head: HeadData) { paras::Pallet::::set_current_head(id, new_head) } - -/// Ensure more initialization for `ParaId` when benchmarking. (e.g. open HRMP channels, ...) -#[cfg(feature = "runtime-benchmarks")] -pub trait EnsureForParachain { - fn ensure(para_id: ParaId); -} - -#[cfg(feature = "runtime-benchmarks")] -#[impl_trait_for_tuples::impl_for_tuples(30)] -impl EnsureForParachain for Tuple { - fn ensure(para: ParaId) { - for_tuples!( #( - Tuple::ensure(para); - )* ); - } -} diff --git a/polkadot/runtime/parachains/src/mock.rs b/polkadot/runtime/parachains/src/mock.rs index ee1990a7b618..d701e1f9bd80 100644 --- a/polkadot/runtime/parachains/src/mock.rs +++ b/polkadot/runtime/parachains/src/mock.rs @@ -421,7 +421,6 @@ impl assigner_coretime::Config for Test {} parameter_types! { pub const BrokerId: u32 = 10u32; - pub MaxXcmTransactWeight: Weight = Weight::from_parts(10_000_000, 10_000); } pub struct BrokerPot; @@ -438,7 +437,6 @@ impl coretime::Config for Test { type BrokerId = BrokerId; type WeightInfo = crate::coretime::TestWeightInfo; type SendXcm = DummyXcmSender; - type MaxXcmTransactWeight = MaxXcmTransactWeight; type BrokerPotLocation = BrokerPot; type AssetTransactor = (); type AccountToLocation = (); diff --git a/polkadot/runtime/parachains/src/paras/benchmarking.rs b/polkadot/runtime/parachains/src/paras/benchmarking.rs index 4d617cbb05bb..7bf8b833ed91 100644 --- a/polkadot/runtime/parachains/src/paras/benchmarking.rs +++ b/polkadot/runtime/parachains/src/paras/benchmarking.rs @@ -17,7 +17,7 @@ use super::*; use crate::configuration::HostConfiguration; use alloc::vec; -use frame_benchmarking::v2::*; +use frame_benchmarking::benchmarks; use frame_system::{pallet_prelude::BlockNumberFor, RawOrigin}; use polkadot_primitives::{ HeadData, Id as ParaId, ValidationCode, MAX_CODE_SIZE, MAX_HEAD_DATA_SIZE, @@ -84,58 +84,41 @@ fn generate_disordered_actions_queue() { }); } -#[benchmarks] -mod benchmarks { - use super::*; - - #[benchmark] - fn force_set_current_code(c: Linear) { +benchmarks! { + force_set_current_code { + let c in MIN_CODE_SIZE .. MAX_CODE_SIZE; let new_code = ValidationCode(vec![0; c as usize]); let para_id = ParaId::from(c as u32); CurrentCodeHash::::insert(¶_id, new_code.hash()); generate_disordered_pruning::(); - - #[extrinsic_call] - _(RawOrigin::Root, para_id, new_code); - + }: _(RawOrigin::Root, para_id, new_code) + verify { assert_last_event::(Event::CurrentCodeUpdated(para_id).into()); } - - #[benchmark] - fn force_set_current_head(s: Linear) { + force_set_current_head { + let s in MIN_CODE_SIZE .. MAX_HEAD_DATA_SIZE; let new_head = HeadData(vec![0; s as usize]); let para_id = ParaId::from(1000); - - #[extrinsic_call] - _(RawOrigin::Root, para_id, new_head); - + }: _(RawOrigin::Root, para_id, new_head) + verify { assert_last_event::(Event::CurrentHeadUpdated(para_id).into()); } - - #[benchmark] - fn force_set_most_recent_context() { + force_set_most_recent_context { let para_id = ParaId::from(1000); let context = BlockNumberFor::::from(1000u32); - - #[extrinsic_call] - _(RawOrigin::Root, para_id, context); - } - - #[benchmark] - fn force_schedule_code_upgrade(c: Linear) { + }: _(RawOrigin::Root, para_id, context) + force_schedule_code_upgrade { + let c in MIN_CODE_SIZE .. MAX_CODE_SIZE; let new_code = ValidationCode(vec![0; c as usize]); let para_id = ParaId::from(c as u32); let block = BlockNumberFor::::from(c); generate_disordered_upgrades::(); - - #[extrinsic_call] - _(RawOrigin::Root, para_id, new_code, block); - + }: _(RawOrigin::Root, para_id, new_code, block) + verify { assert_last_event::(Event::CodeUpgradeScheduled(para_id).into()); } - - #[benchmark] - fn force_note_new_head(s: Linear) { + force_note_new_head { + let s in MIN_CODE_SIZE .. MAX_HEAD_DATA_SIZE; let para_id = ParaId::from(1000); let new_head = HeadData(vec![0; s as usize]); let old_code_hash = ValidationCode(vec![0]).hash(); @@ -152,101 +135,70 @@ mod benchmarks { &config, UpgradeStrategy::SetGoAheadSignal, ); - - #[extrinsic_call] - _(RawOrigin::Root, para_id, new_head); - + }: _(RawOrigin::Root, para_id, new_head) + verify { assert_last_event::(Event::NewHeadNoted(para_id).into()); } - - #[benchmark] - fn force_queue_action() { + force_queue_action { let para_id = ParaId::from(1000); generate_disordered_actions_queue::(); - - #[extrinsic_call] - _(RawOrigin::Root, para_id); - - let next_session = - crate::shared::CurrentSessionIndex::::get().saturating_add(One::one()); + }: _(RawOrigin::Root, para_id) + verify { + let next_session = crate::shared::CurrentSessionIndex::::get().saturating_add(One::one()); assert_last_event::(Event::ActionQueued(para_id, next_session).into()); } - #[benchmark] - fn add_trusted_validation_code(c: Linear) { + add_trusted_validation_code { + let c in MIN_CODE_SIZE .. MAX_CODE_SIZE; let new_code = ValidationCode(vec![0; c as usize]); pvf_check::prepare_bypassing_bench::(new_code.clone()); + }: _(RawOrigin::Root, new_code) - #[extrinsic_call] - _(RawOrigin::Root, new_code); - } - - #[benchmark] - fn poke_unused_validation_code() { + poke_unused_validation_code { let code_hash = [0; 32].into(); + }: _(RawOrigin::Root, code_hash) - #[extrinsic_call] - _(RawOrigin::Root, code_hash); - } - - #[benchmark] - fn include_pvf_check_statement() { + include_pvf_check_statement { let (stmt, signature) = pvf_check::prepare_inclusion_bench::(); - - #[block] - { - let _ = - Pallet::::include_pvf_check_statement(RawOrigin::None.into(), stmt, signature); - } + }: { + let _ = Pallet::::include_pvf_check_statement(RawOrigin::None.into(), stmt, signature); } - #[benchmark] - fn include_pvf_check_statement_finalize_upgrade_accept() { - let (stmt, signature) = - pvf_check::prepare_finalization_bench::(VoteCause::Upgrade, VoteOutcome::Accept); - - #[block] - { - let _ = - Pallet::::include_pvf_check_statement(RawOrigin::None.into(), stmt, signature); - } + include_pvf_check_statement_finalize_upgrade_accept { + let (stmt, signature) = pvf_check::prepare_finalization_bench::( + VoteCause::Upgrade, + VoteOutcome::Accept, + ); + }: { + let _ = Pallet::::include_pvf_check_statement(RawOrigin::None.into(), stmt, signature); } - #[benchmark] - fn include_pvf_check_statement_finalize_upgrade_reject() { - let (stmt, signature) = - pvf_check::prepare_finalization_bench::(VoteCause::Upgrade, VoteOutcome::Reject); - - #[block] - { - let _ = - Pallet::::include_pvf_check_statement(RawOrigin::None.into(), stmt, signature); - } + include_pvf_check_statement_finalize_upgrade_reject { + let (stmt, signature) = pvf_check::prepare_finalization_bench::( + VoteCause::Upgrade, + VoteOutcome::Reject, + ); + }: { + let _ = Pallet::::include_pvf_check_statement(RawOrigin::None.into(), stmt, signature); } - #[benchmark] - fn include_pvf_check_statement_finalize_onboarding_accept() { - let (stmt, signature) = - pvf_check::prepare_finalization_bench::(VoteCause::Onboarding, VoteOutcome::Accept); - - #[block] - { - let _ = - Pallet::::include_pvf_check_statement(RawOrigin::None.into(), stmt, signature); - } + include_pvf_check_statement_finalize_onboarding_accept { + let (stmt, signature) = pvf_check::prepare_finalization_bench::( + VoteCause::Onboarding, + VoteOutcome::Accept, + ); + }: { + let _ = Pallet::::include_pvf_check_statement(RawOrigin::None.into(), stmt, signature); } - #[benchmark] - fn include_pvf_check_statement_finalize_onboarding_reject() { - let (stmt, signature) = - pvf_check::prepare_finalization_bench::(VoteCause::Onboarding, VoteOutcome::Reject); - - #[block] - { - let _ = - Pallet::::include_pvf_check_statement(RawOrigin::None.into(), stmt, signature); - } + include_pvf_check_statement_finalize_onboarding_reject { + let (stmt, signature) = pvf_check::prepare_finalization_bench::( + VoteCause::Onboarding, + VoteOutcome::Reject, + ); + }: { + let _ = Pallet::::include_pvf_check_statement(RawOrigin::None.into(), stmt, signature); } impl_benchmark_test_suite!( diff --git a/polkadot/runtime/rococo/Cargo.toml b/polkadot/runtime/rococo/Cargo.toml index e7f463566e3a..3b11c977edf3 100644 --- a/polkadot/runtime/rococo/Cargo.toml +++ b/polkadot/runtime/rococo/Cargo.toml @@ -6,51 +6,44 @@ description = "Rococo testnet Relay Chain runtime." authors.workspace = true edition.workspace = true license.workspace = true -homepage.workspace = true -repository.workspace = true [lints] workspace = true [dependencies] -bitvec = { features = ["alloc"], workspace = true } codec = { features = ["derive", "max-encoded-len"], workspace = true } -log = { workspace = true } scale-info = { features = ["derive"], workspace = true } +log = { workspace = true } serde = { workspace = true } serde_derive = { optional = true, workspace = true } serde_json = { features = ["alloc"], workspace = true } -smallvec = { workspace = true, default-features = true } static_assertions = { workspace = true, default-features = true } +smallvec = { workspace = true, default-features = true } +bitvec = { features = ["alloc"], workspace = true } -binary-merkle-tree = { workspace = true } -rococo-runtime-constants = { workspace = true } -sp-api = { workspace = true } -sp-arithmetic = { workspace = true } sp-authority-discovery = { workspace = true } -sp-block-builder = { workspace = true } sp-consensus-babe = { workspace = true } sp-consensus-beefy = { workspace = true } sp-consensus-grandpa = { workspace = true } -sp-core = { workspace = true } +binary-merkle-tree = { workspace = true } +rococo-runtime-constants = { workspace = true } +sp-api = { workspace = true } sp-genesis-builder = { workspace = true } sp-inherents = { workspace = true } +sp-offchain = { workspace = true } +sp-arithmetic = { workspace = true } sp-io = { workspace = true } -sp-keyring = { workspace = true } sp-mmr-primitives = { workspace = true } -sp-offchain = { workspace = true } sp-runtime = { workspace = true } -sp-session = { workspace = true } sp-staking = { workspace = true } +sp-core = { workspace = true } +sp-session = { workspace = true } sp-storage = { workspace = true } -sp-transaction-pool = { workspace = true } sp-version = { workspace = true } +sp-transaction-pool = { workspace = true } +sp-block-builder = { workspace = true } +sp-keyring = { workspace = true } -frame-executive = { workspace = true } -frame-support = { features = ["tuples-96"], workspace = true } -frame-system = { workspace = true } -frame-system-rpc-runtime-api = { workspace = true } -pallet-asset-rate = { workspace = true } pallet-authority-discovery = { workspace = true } pallet-authorship = { workspace = true } pallet-babe = { workspace = true } @@ -59,10 +52,15 @@ pallet-beefy = { workspace = true } pallet-beefy-mmr = { workspace = true } pallet-bounties = { workspace = true } pallet-child-bounties = { workspace = true } +pallet-state-trie-migration = { workspace = true } +pallet-transaction-payment = { workspace = true } +pallet-transaction-payment-rpc-runtime-api = { workspace = true } pallet-collective = { workspace = true } pallet-conviction-voting = { workspace = true } pallet-democracy = { workspace = true } pallet-elections-phragmen = { workspace = true } +pallet-asset-rate = { workspace = true } +frame-executive = { workspace = true } pallet-grandpa = { workspace = true } pallet-identity = { workspace = true } pallet-indices = { workspace = true } @@ -79,48 +77,48 @@ pallet-proxy = { workspace = true } pallet-ranked-collective = { workspace = true } pallet-recovery = { workspace = true } pallet-referenda = { workspace = true } -pallet-root-testing = { workspace = true } pallet-scheduler = { workspace = true } pallet-session = { workspace = true } pallet-society = { workspace = true } -pallet-staking = { workspace = true } -pallet-state-trie-migration = { workspace = true } pallet-sudo = { workspace = true } +frame-support = { features = ["tuples-96"], workspace = true } +pallet-staking = { workspace = true } +frame-system = { workspace = true } +frame-system-rpc-runtime-api = { workspace = true } pallet-timestamp = { workspace = true } pallet-tips = { workspace = true } -pallet-transaction-payment = { workspace = true } -pallet-transaction-payment-rpc-runtime-api = { workspace = true } pallet-treasury = { workspace = true } pallet-utility = { workspace = true } pallet-vesting = { workspace = true } pallet-whitelist = { workspace = true } pallet-xcm = { workspace = true } pallet-xcm-benchmarks = { optional = true, workspace = true } +pallet-root-testing = { workspace = true } frame-benchmarking = { optional = true, workspace = true } frame-metadata-hash-extension = { workspace = true } -frame-system-benchmarking = { optional = true, workspace = true } frame-try-runtime = { optional = true, workspace = true } +frame-system-benchmarking = { optional = true, workspace = true } hex-literal = { workspace = true, default-features = true } -polkadot-parachain-primitives = { workspace = true } -polkadot-primitives = { workspace = true } polkadot-runtime-common = { workspace = true } polkadot-runtime-parachains = { workspace = true } +polkadot-primitives = { workspace = true } +polkadot-parachain-primitives = { workspace = true } xcm = { workspace = true } -xcm-builder = { workspace = true } xcm-executor = { workspace = true } +xcm-builder = { workspace = true } xcm-runtime-apis = { workspace = true } [dev-dependencies] +tiny-keccak = { features = ["keccak"], workspace = true } +sp-keyring = { workspace = true, default-features = true } remote-externalities = { workspace = true, default-features = true } +sp-trie = { workspace = true, default-features = true } separator = { workspace = true } serde_json = { workspace = true, default-features = true } -sp-keyring = { workspace = true, default-features = true } sp-tracing = { workspace = true } -sp-trie = { workspace = true, default-features = true } -tiny-keccak = { features = ["keccak"], workspace = true } tokio = { features = ["macros"], workspace = true, default-features = true } [build-dependencies] @@ -277,7 +275,6 @@ runtime-benchmarks = [ "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", "xcm-runtime-apis/runtime-benchmarks", - "xcm/runtime-benchmarks", ] try-runtime = [ "frame-executive/try-runtime", diff --git a/polkadot/runtime/rococo/constants/Cargo.toml b/polkadot/runtime/rococo/constants/Cargo.toml index cc62d230d2c0..1d0adac44af4 100644 --- a/polkadot/runtime/rococo/constants/Cargo.toml +++ b/polkadot/runtime/rococo/constants/Cargo.toml @@ -5,8 +5,6 @@ description = "Constants used throughout the Rococo network." authors.workspace = true edition.workspace = true license.workspace = true -homepage.workspace = true -repository.workspace = true [package.metadata.polkadot-sdk] exclude-from-umbrella = true @@ -20,9 +18,9 @@ smallvec = { workspace = true, default-features = true } frame-support = { workspace = true } polkadot-primitives = { workspace = true } polkadot-runtime-common = { workspace = true } -sp-core = { workspace = true } sp-runtime = { workspace = true } sp-weights = { workspace = true } +sp-core = { workspace = true } xcm = { workspace = true } xcm-builder = { workspace = true } diff --git a/polkadot/runtime/rococo/src/genesis_config_presets.rs b/polkadot/runtime/rococo/src/genesis_config_presets.rs index a96a509b0e4d..39c862660894 100644 --- a/polkadot/runtime/rococo/src/genesis_config_presets.rs +++ b/polkadot/runtime/rococo/src/genesis_config_presets.rs @@ -23,7 +23,6 @@ use crate::{ #[cfg(not(feature = "std"))] use alloc::format; use alloc::{vec, vec::Vec}; -use frame_support::build_struct_json_patch; use polkadot_primitives::{AccountId, AssignmentId, SchedulerParams, ValidatorId}; use rococo_runtime_constants::currency::UNITS as ROC; use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId; @@ -129,9 +128,7 @@ fn default_parachains_host_configuration( allowed_ancestry_len: 2, }, node_features: bitvec::vec::BitVec::from_element( - 1u8 << (FeatureIndex::ElasticScalingMVP as usize) | - 1u8 << (FeatureIndex::EnableAssignmentsV2 as usize) | - 1u8 << (FeatureIndex::CandidateReceiptV2 as usize), + 1u8 << (FeatureIndex::ElasticScalingMVP as usize), ), scheduler_params: SchedulerParams { lookahead: 2, @@ -166,7 +163,7 @@ fn rococo_testnet_genesis( const ENDOWMENT: u128 = 1_000_000 * ROC; - build_struct_json_patch!(RuntimeGenesisConfig { + let config = RuntimeGenesisConfig { balances: BalancesConfig { balances: endowed_accounts.iter().map(|k| (k.clone(), ENDOWMENT)).collect::>(), }, @@ -188,8 +185,9 @@ fn rococo_testnet_genesis( ) }) .collect::>(), + ..Default::default() }, - babe: BabeConfig { epoch_config: BABE_GENESIS_EPOCH_CONFIG }, + babe: BabeConfig { epoch_config: BABE_GENESIS_EPOCH_CONFIG, ..Default::default() }, sudo: SudoConfig { key: Some(root_key.clone()) }, configuration: ConfigurationConfig { config: polkadot_runtime_parachains::configuration::HostConfiguration { @@ -200,8 +198,14 @@ fn rococo_testnet_genesis( ..default_parachains_host_configuration() }, }, - registrar: RegistrarConfig { next_free_para_id: polkadot_primitives::LOWEST_PUBLIC_ID }, - }) + registrar: RegistrarConfig { + next_free_para_id: polkadot_primitives::LOWEST_PUBLIC_ID, + ..Default::default() + }, + ..Default::default() + }; + + serde_json::to_value(config).expect("Could not build genesis config.") } // staging_testnet @@ -423,7 +427,7 @@ fn rococo_staging_testnet_config_genesis() -> serde_json::Value { const ENDOWMENT: u128 = 1_000_000 * ROC; const STASH: u128 = 100 * ROC; - build_struct_json_patch!(RuntimeGenesisConfig { + let config = RuntimeGenesisConfig { balances: BalancesConfig { balances: endowed_accounts .iter() @@ -436,12 +440,19 @@ fn rococo_staging_testnet_config_genesis() -> serde_json::Value { .into_iter() .map(|x| (x.0.clone(), x.0, rococo_session_keys(x.2, x.3, x.4, x.5, x.6, x.7))) .collect::>(), + ..Default::default() }, - babe: BabeConfig { epoch_config: BABE_GENESIS_EPOCH_CONFIG }, + babe: BabeConfig { epoch_config: BABE_GENESIS_EPOCH_CONFIG, ..Default::default() }, sudo: SudoConfig { key: Some(endowed_accounts[0].clone()) }, configuration: ConfigurationConfig { config: default_parachains_host_configuration() }, - registrar: RegistrarConfig { next_free_para_id: polkadot_primitives::LOWEST_PUBLIC_ID }, - }) + registrar: RegistrarConfig { + next_free_para_id: polkadot_primitives::LOWEST_PUBLIC_ID, + ..Default::default() + }, + ..Default::default() + }; + + serde_json::to_value(config).expect("Could not build genesis config.") } //development diff --git a/polkadot/runtime/rococo/src/impls.rs b/polkadot/runtime/rococo/src/impls.rs index a5cb2eddfa0d..ab796edc54b1 100644 --- a/polkadot/runtime/rococo/src/impls.rs +++ b/polkadot/runtime/rococo/src/impls.rs @@ -21,7 +21,7 @@ use core::marker::PhantomData; use frame_support::pallet_prelude::DispatchResult; use frame_system::RawOrigin; use polkadot_primitives::Balance; -use polkadot_runtime_common::identity_migrator::{OnReapIdentity, WeightInfo}; +use polkadot_runtime_common::identity_migrator::OnReapIdentity; use rococo_runtime_constants::currency::*; use xcm::{latest::prelude::*, VersionedLocation, VersionedXcm}; use xcm_executor::traits::TransactAsset; @@ -88,10 +88,7 @@ where AccountId: Into<[u8; 32]> + Clone + Encode, { fn on_reap_identity(who: &AccountId, fields: u32, subs: u32) -> DispatchResult { - use crate::{ - impls::IdentityMigratorCalls::PokeDeposit, - weights::polkadot_runtime_common_identity_migrator::WeightInfo as MigratorWeights, - }; + use crate::impls::IdentityMigratorCalls::PokeDeposit; let total_to_send = Self::calculate_remote_deposit(fields, subs); @@ -144,7 +141,6 @@ where .into(); let poke = PeopleRuntimePallets::::IdentityMigrator(PokeDeposit(who.clone())); - let remote_weight_limit = MigratorWeights::::poke_deposit().saturating_mul(2); // Actual program to execute on People Chain. let program: Xcm<()> = Xcm(vec![ @@ -161,11 +157,7 @@ where .into(), }, // Poke the deposit to reserve the appropriate amount on the parachain. - Transact { - origin_kind: OriginKind::Superuser, - fallback_max_weight: Some(remote_weight_limit), - call: poke.encode().into(), - }, + Transact { origin_kind: OriginKind::Superuser, call: poke.encode().into() }, ]); // send @@ -176,9 +168,4 @@ where )?; Ok(()) } - - #[cfg(feature = "runtime-benchmarks")] - fn ensure_successful_identity_reaping(_: &AccountId, _: u32, _: u32) { - crate::Dmp::make_parachain_reachable(1004); - } } diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index 4034f8bc1431..96a97faa4750 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -20,17 +20,6 @@ // `construct_runtime!` does a lot of recursion and requires us to increase the limit. #![recursion_limit = "512"] -#[cfg(all(any(target_arch = "riscv32", target_arch = "riscv64"), target_feature = "e"))] -// Allocate 2 MiB stack. -// -// TODO: A workaround. Invoke polkavm_derive::min_stack_size!() instead -// later on. -::core::arch::global_asm!( - ".pushsection .polkavm_min_stack_size,\"R\",@note\n", - ".4byte 2097152", - ".popsection\n", -); - extern crate alloc; use alloc::{ @@ -182,7 +171,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: alloc::borrow::Cow::Borrowed("rococo"), impl_name: alloc::borrow::Cow::Borrowed("parity-rococo-v2.0"), authoring_version: 0, - spec_version: 1_017_001, + spec_version: 1_016_001, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 26, @@ -778,7 +767,6 @@ impl pallet_multisig::Config for Runtime { type DepositFactor = DepositFactor; type MaxSignatories = MaxSignatories; type WeightInfo = weights::pallet_multisig::WeightInfo; - type BlockNumberProvider = frame_system::Pallet; } parameter_types! { @@ -792,7 +780,6 @@ impl pallet_recovery::Config for Runtime { type RuntimeEvent = RuntimeEvent; type WeightInfo = (); type RuntimeCall = RuntimeCall; - type BlockNumberProvider = System; type Currency = Balances; type ConfigDepositBase = ConfigDepositBase; type FriendDepositFactor = FriendDepositFactor; @@ -984,7 +971,6 @@ impl pallet_proxy::Config for Runtime { type CallHasher = BlakeTwo256; type AnnouncementDepositBase = AnnouncementDepositBase; type AnnouncementDepositFactor = AnnouncementDepositFactor; - type BlockNumberProvider = frame_system::Pallet; } impl parachains_origin::Config for Runtime {} @@ -1112,7 +1098,6 @@ impl parachains_scheduler::Config for Runtime { parameter_types! { pub const BrokerId: u32 = BROKER_ID; pub const BrokerPalletId: PalletId = PalletId(*b"py/broke"); - pub MaxXcmTransactWeight: Weight = Weight::from_parts(200_000_000, 20_000); } pub struct BrokerPot; @@ -1136,7 +1121,6 @@ impl coretime::Config for Runtime { xcm_config::ThisNetwork, ::AccountId, >; - type MaxXcmTransactWeight = MaxXcmTransactWeight; } parameter_types! { @@ -2483,14 +2467,14 @@ sp_api::impl_runtime_apis! { ExistentialDepositAsset, xcm_config::PriceForChildParachainDelivery, AssetHubParaId, - Dmp, + (), >, polkadot_runtime_common::xcm_sender::ToParachainDeliveryHelper< XcmConfig, ExistentialDepositAsset, xcm_config::PriceForChildParachainDelivery, RandomParaId, - Dmp, + (), > ); @@ -2549,7 +2533,7 @@ sp_api::impl_runtime_apis! { ExistentialDepositAsset, xcm_config::PriceForChildParachainDelivery, AssetHubParaId, - Dmp, + (), >; fn valid_destination() -> Result { Ok(AssetHub::get()) diff --git a/polkadot/runtime/rococo/src/tests.rs b/polkadot/runtime/rococo/src/tests.rs index 0b46caec5a35..01eaad87e342 100644 --- a/polkadot/runtime/rococo/src/tests.rs +++ b/polkadot/runtime/rococo/src/tests.rs @@ -22,7 +22,7 @@ use std::collections::HashSet; use crate::xcm_config::LocationConverter; use frame_support::traits::WhitelistedStorageKeys; use sp_core::{crypto::Ss58Codec, hexdisplay::HexDisplay}; -use sp_keyring::Sr25519Keyring::Alice; +use sp_keyring::AccountKeyring::Alice; use xcm_runtime_apis::conversions::LocationToAccountHelper; #[test] diff --git a/polkadot/runtime/rococo/src/weights/pallet_xcm.rs b/polkadot/runtime/rococo/src/weights/pallet_xcm.rs index b60165934f92..d5cf33515e6b 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_xcm.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_xcm.rs @@ -17,27 +17,27 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-12-18, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `65a7f4d3191f`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: -// target/production/polkadot +// ./target/production/polkadot // benchmark // pallet -// --extrinsic=* // --chain=rococo-dev -// --pallet=pallet_xcm -// --header=/__w/polkadot-sdk/polkadot-sdk/polkadot/file_header.txt -// --output=./polkadot/runtime/rococo/src/weights -// --wasm-execution=compiled // --steps=50 // --repeat=20 -// --heap-pages=4096 // --no-storage-info -// --no-min-squares // --no-median-slopes +// --no-min-squares +// --pallet=pallet_xcm +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -56,46 +56,38 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Paras::Heads` (r:1 w:0) - /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) fn send() -> Weight { // Proof Size summary in bytes: - // Measured: `245` - // Estimated: `3710` - // Minimum execution time: 37_787_000 picoseconds. - Weight::from_parts(39_345_000, 0) - .saturating_add(Weight::from_parts(0, 3710)) - .saturating_add(T::DbWeight::get().reads(5)) + // Measured: `180` + // Estimated: `3645` + // Minimum execution time: 25_521_000 picoseconds. + Weight::from_parts(25_922_000, 0) + .saturating_add(Weight::from_parts(0, 3645)) + .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `XcmPallet::ShouldRecordXcm` (r:1 w:0) - /// Proof: `XcmPallet::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Paras::Heads` (r:1 w:0) - /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) fn teleport_assets() -> Weight { // Proof Size summary in bytes: - // Measured: `245` - // Estimated: `3710` - // Minimum execution time: 138_755_000 picoseconds. - Weight::from_parts(142_908_000, 0) - .saturating_add(Weight::from_parts(0, 3710)) - .saturating_add(T::DbWeight::get().reads(7)) + // Measured: `180` + // Estimated: `3645` + // Minimum execution time: 112_185_000 picoseconds. + Weight::from_parts(115_991_000, 0) + .saturating_add(Weight::from_parts(0, 3645)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: `XcmPallet::ShouldRecordXcm` (r:1 w:0) - /// Proof: `XcmPallet::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) @@ -104,54 +96,45 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Paras::Heads` (r:1 w:0) - /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) fn reserve_transfer_assets() -> Weight { // Proof Size summary in bytes: - // Measured: `297` - // Estimated: `3762` - // Minimum execution time: 134_917_000 picoseconds. - Weight::from_parts(138_809_000, 0) - .saturating_add(Weight::from_parts(0, 3762)) - .saturating_add(T::DbWeight::get().reads(7)) + // Measured: `232` + // Estimated: `3697` + // Minimum execution time: 108_693_000 picoseconds. + Weight::from_parts(111_853_000, 0) + .saturating_add(Weight::from_parts(0, 3697)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(3)) } /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `XcmPallet::ShouldRecordXcm` (r:1 w:0) - /// Proof: `XcmPallet::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Paras::Heads` (r:1 w:0) - /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) fn transfer_assets() -> Weight { // Proof Size summary in bytes: - // Measured: `245` - // Estimated: `3710` - // Minimum execution time: 141_303_000 picoseconds. - Weight::from_parts(144_640_000, 0) - .saturating_add(Weight::from_parts(0, 3710)) - .saturating_add(T::DbWeight::get().reads(7)) + // Measured: `180` + // Estimated: `3645` + // Minimum execution time: 113_040_000 picoseconds. + Weight::from_parts(115_635_000, 0) + .saturating_add(Weight::from_parts(0, 3645)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: `XcmPallet::ShouldRecordXcm` (r:1 w:0) - /// Proof: `XcmPallet::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn execute() -> Weight { // Proof Size summary in bytes: // Measured: `0` - // Estimated: `1485` - // Minimum execution time: 9_872_000 picoseconds. - Weight::from_parts(10_402_000, 0) - .saturating_add(Weight::from_parts(0, 1485)) - .saturating_add(T::DbWeight::get().reads(1)) + // Estimated: `0` + // Minimum execution time: 6_979_000 picoseconds. + Weight::from_parts(7_342_000, 0) + .saturating_add(Weight::from_parts(0, 0)) } /// Storage: `XcmPallet::SupportedVersion` (r:0 w:1) /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -159,8 +142,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_312_000 picoseconds. - Weight::from_parts(8_867_000, 0) + // Minimum execution time: 7_144_000 picoseconds. + Weight::from_parts(7_297_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -168,8 +151,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_524_000 picoseconds. - Weight::from_parts(2_800_000, 0) + // Minimum execution time: 1_886_000 picoseconds. + Weight::from_parts(1_995_000, 0) .saturating_add(Weight::from_parts(0, 0)) } /// Storage: `XcmPallet::VersionNotifiers` (r:1 w:1) @@ -182,20 +165,18 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Paras::Heads` (r:1 w:0) - /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `XcmPallet::Queries` (r:0 w:1) /// Proof: `XcmPallet::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_subscribe_version_notify() -> Weight { // Proof Size summary in bytes: - // Measured: `245` - // Estimated: `3710` - // Minimum execution time: 45_426_000 picoseconds. - Weight::from_parts(48_021_000, 0) - .saturating_add(Weight::from_parts(0, 3710)) - .saturating_add(T::DbWeight::get().reads(7)) + // Measured: `180` + // Estimated: `3645` + // Minimum execution time: 31_238_000 picoseconds. + Weight::from_parts(31_955_000, 0) + .saturating_add(Weight::from_parts(0, 3645)) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(5)) } /// Storage: `XcmPallet::VersionNotifiers` (r:1 w:1) @@ -206,20 +187,18 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Paras::Heads` (r:1 w:0) - /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `XcmPallet::Queries` (r:0 w:1) /// Proof: `XcmPallet::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_unsubscribe_version_notify() -> Weight { // Proof Size summary in bytes: - // Measured: `425` - // Estimated: `3890` - // Minimum execution time: 50_854_000 picoseconds. - Weight::from_parts(52_044_000, 0) - .saturating_add(Weight::from_parts(0, 3890)) - .saturating_add(T::DbWeight::get().reads(6)) + // Measured: `360` + // Estimated: `3825` + // Minimum execution time: 37_237_000 picoseconds. + Weight::from_parts(38_569_000, 0) + .saturating_add(Weight::from_parts(0, 3825)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `XcmPallet::XcmExecutionSuspended` (r:0 w:1) @@ -228,45 +207,45 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_566_000 picoseconds. - Weight::from_parts(2_771_000, 0) + // Minimum execution time: 1_884_000 picoseconds. + Weight::from_parts(2_028_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `XcmPallet::SupportedVersion` (r:6 w:2) + /// Storage: `XcmPallet::SupportedVersion` (r:5 w:2) /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_supported_version() -> Weight { // Proof Size summary in bytes: // Measured: `22` - // Estimated: `15862` - // Minimum execution time: 21_854_000 picoseconds. - Weight::from_parts(22_528_000, 0) - .saturating_add(Weight::from_parts(0, 15862)) - .saturating_add(T::DbWeight::get().reads(6)) + // Estimated: `13387` + // Minimum execution time: 16_048_000 picoseconds. + Weight::from_parts(16_617_000, 0) + .saturating_add(Weight::from_parts(0, 13387)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `XcmPallet::VersionNotifiers` (r:6 w:2) + /// Storage: `XcmPallet::VersionNotifiers` (r:5 w:2) /// Proof: `XcmPallet::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notifiers() -> Weight { // Proof Size summary in bytes: // Measured: `26` - // Estimated: `15866` - // Minimum execution time: 21_821_000 picoseconds. - Weight::from_parts(22_368_000, 0) - .saturating_add(Weight::from_parts(0, 15866)) - .saturating_add(T::DbWeight::get().reads(6)) + // Estimated: `13391` + // Minimum execution time: 16_073_000 picoseconds. + Weight::from_parts(16_672_000, 0) + .saturating_add(Weight::from_parts(0, 13391)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `XcmPallet::VersionNotifyTargets` (r:7 w:0) + /// Storage: `XcmPallet::VersionNotifyTargets` (r:6 w:0) /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn already_notified_target() -> Weight { // Proof Size summary in bytes: // Measured: `40` - // Estimated: `18355` - // Minimum execution time: 25_795_000 picoseconds. - Weight::from_parts(26_284_000, 0) - .saturating_add(Weight::from_parts(0, 18355)) - .saturating_add(T::DbWeight::get().reads(7)) + // Estimated: `15880` + // Minimum execution time: 18_422_000 picoseconds. + Weight::from_parts(18_900_000, 0) + .saturating_add(Weight::from_parts(0, 15880)) + .saturating_add(T::DbWeight::get().reads(6)) } /// Storage: `XcmPallet::VersionNotifyTargets` (r:2 w:1) /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -274,62 +253,62 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:0) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Paras::Heads` (r:1 w:0) - /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) fn notify_current_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `244` - // Estimated: `6184` - // Minimum execution time: 33_182_000 picoseconds. - Weight::from_parts(34_506_000, 0) - .saturating_add(Weight::from_parts(0, 6184)) + // Measured: `216` + // Estimated: `6156` + // Minimum execution time: 30_373_000 picoseconds. + Weight::from_parts(30_972_000, 0) + .saturating_add(Weight::from_parts(0, 6156)) .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(1)) + .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: `XcmPallet::VersionNotifyTargets` (r:5 w:0) + /// Storage: `XcmPallet::VersionNotifyTargets` (r:4 w:0) /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn notify_target_migration_fail() -> Weight { // Proof Size summary in bytes: - // Measured: `40` - // Estimated: `13405` - // Minimum execution time: 17_573_000 picoseconds. - Weight::from_parts(18_154_000, 0) - .saturating_add(Weight::from_parts(0, 13405)) - .saturating_add(T::DbWeight::get().reads(5)) + // Measured: `69` + // Estimated: `10959` + // Minimum execution time: 11_863_000 picoseconds. + Weight::from_parts(12_270_000, 0) + .saturating_add(Weight::from_parts(0, 10959)) + .saturating_add(T::DbWeight::get().reads(4)) } - /// Storage: `XcmPallet::VersionNotifyTargets` (r:6 w:2) + /// Storage: `XcmPallet::VersionNotifyTargets` (r:5 w:2) /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notify_targets() -> Weight { // Proof Size summary in bytes: // Measured: `33` - // Estimated: `15873` - // Minimum execution time: 22_491_000 picoseconds. - Weight::from_parts(22_793_000, 0) - .saturating_add(Weight::from_parts(0, 15873)) - .saturating_add(T::DbWeight::get().reads(6)) + // Estimated: `13398` + // Minimum execution time: 16_733_000 picoseconds. + Weight::from_parts(17_094_000, 0) + .saturating_add(Weight::from_parts(0, 13398)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `XcmPallet::VersionNotifyTargets` (r:6 w:1) + /// Storage: `XcmPallet::VersionNotifyTargets` (r:5 w:2) /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:0) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Paras::Heads` (r:1 w:0) - /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_and_notify_old_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `244` - // Estimated: `16084` - // Minimum execution time: 44_441_000 picoseconds. - Weight::from_parts(45_782_000, 0) - .saturating_add(Weight::from_parts(0, 16084)) - .saturating_add(T::DbWeight::get().reads(10)) - .saturating_add(T::DbWeight::get().writes(1)) + // Measured: `216` + // Estimated: `13581` + // Minimum execution time: 39_236_000 picoseconds. + Weight::from_parts(40_587_000, 0) + .saturating_add(Weight::from_parts(0, 13581)) + .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `XcmPallet::QueryCounter` (r:1 w:1) /// Proof: `XcmPallet::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) @@ -339,8 +318,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `1485` - // Minimum execution time: 2_809_000 picoseconds. - Weight::from_parts(2_960_000, 0) + // Minimum execution time: 2_145_000 picoseconds. + Weight::from_parts(2_255_000, 0) .saturating_add(Weight::from_parts(0, 1485)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -351,24 +330,22 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7576` // Estimated: `11041` - // Minimum execution time: 26_248_000 picoseconds. - Weight::from_parts(26_996_000, 0) + // Minimum execution time: 22_518_000 picoseconds. + Weight::from_parts(22_926_000, 0) .saturating_add(Weight::from_parts(0, 11041)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `XcmPallet::ShouldRecordXcm` (r:1 w:0) - /// Proof: `XcmPallet::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `XcmPallet::AssetTraps` (r:1 w:1) /// Proof: `XcmPallet::AssetTraps` (`max_values`: None, `max_size`: None, mode: `Measured`) fn claim_assets() -> Weight { // Proof Size summary in bytes: // Measured: `23` // Estimated: `3488` - // Minimum execution time: 40_299_000 picoseconds. - Weight::from_parts(41_396_000, 0) + // Minimum execution time: 34_438_000 picoseconds. + Weight::from_parts(35_514_000, 0) .saturating_add(Weight::from_parts(0, 3488)) - .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } } diff --git a/polkadot/runtime/rococo/src/weights/pallet_xcm_benchmarks_generic.rs b/polkadot/runtime/rococo/src/weights/pallet_xcm_benchmarks_generic.rs index 1595a6dfbe4b..b62f36172baf 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_xcm_benchmarks_generic.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_xcm_benchmarks_generic.rs @@ -344,11 +344,4 @@ impl pallet_xcm_benchmarks::generic::WeightInfo for Wei Weight::from_parts(1_354_000, 0) .saturating_add(Weight::from_parts(0, 0)) } - fn execute_with_origin() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 713_000 picoseconds. - Weight::from_parts(776_000, 0) - } } diff --git a/polkadot/runtime/rococo/src/weights/xcm/mod.rs b/polkadot/runtime/rococo/src/weights/xcm/mod.rs index eb27e5c5a897..007002bf27bb 100644 --- a/polkadot/runtime/rococo/src/weights/xcm/mod.rs +++ b/polkadot/runtime/rococo/src/weights/xcm/mod.rs @@ -24,7 +24,6 @@ use xcm::{latest::prelude::*, DoubleEncoded}; use pallet_xcm_benchmarks_fungible::WeightInfo as XcmBalancesWeight; use pallet_xcm_benchmarks_generic::WeightInfo as XcmGeneric; -use sp_runtime::BoundedVec; use xcm::latest::AssetTransferFilter; /// Types of asset supported by the Rococo runtime. @@ -112,11 +111,7 @@ impl XcmWeightInfo for RococoXcmWeight { fn transfer_reserve_asset(assets: &Assets, _dest: &Location, _xcm: &Xcm<()>) -> Weight { assets.weigh_assets(XcmBalancesWeight::::transfer_reserve_asset()) } - fn transact( - _origin_kind: &OriginKind, - _fallback_max_weight: &Option, - _call: &DoubleEncoded, - ) -> Weight { + fn transact(_origin_kind: &OriginKind, _call: &DoubleEncoded) -> Weight { XcmGeneric::::transact() } fn hrmp_new_channel_open_request( @@ -291,19 +286,8 @@ impl XcmWeightInfo for RococoXcmWeight { fn unpaid_execution(_: &WeightLimit, _: &Option) -> Weight { XcmGeneric::::unpaid_execution() } - fn set_hints(hints: &BoundedVec) -> Weight { - let mut weight = Weight::zero(); - for hint in hints { - match hint { - AssetClaimer { .. } => { - weight = weight.saturating_add(XcmGeneric::::asset_claimer()); - }, - } - } - weight - } - fn execute_with_origin(_: &Option, _: &Xcm) -> Weight { - XcmGeneric::::execute_with_origin() + fn set_asset_claimer(_location: &Location) -> Weight { + XcmGeneric::::set_asset_claimer() } } diff --git a/polkadot/runtime/rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs b/polkadot/runtime/rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs index 2dc8880c8326..677640b45331 100644 --- a/polkadot/runtime/rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs +++ b/polkadot/runtime/rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm_benchmarks::generic` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-11-05, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-08-27, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-vcatxqpx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-svzsllib-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 // Executed Command: @@ -63,8 +63,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `281` // Estimated: `3746` - // Minimum execution time: 65_164_000 picoseconds. - Weight::from_parts(66_965_000, 3746) + // Minimum execution time: 64_284_000 picoseconds. + Weight::from_parts(65_590_000, 3746) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(3)) } @@ -72,22 +72,15 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 675_000 picoseconds. - Weight::from_parts(745_000, 0) + // Minimum execution time: 777_000 picoseconds. + Weight::from_parts(825_000, 0) } pub(crate) fn pay_fees() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_899_000 picoseconds. - Weight::from_parts(3_090_000, 0) - } - pub(crate) fn asset_claimer() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 669_000 picoseconds. - Weight::from_parts(714_000, 0) + // Minimum execution time: 1_543_000 picoseconds. + Weight::from_parts(1_627_000, 0) } /// Storage: `XcmPallet::Queries` (r:1 w:0) /// Proof: `XcmPallet::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -95,65 +88,58 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `3465` - // Minimum execution time: 6_004_000 picoseconds. - Weight::from_parts(6_152_000, 3465) + // Minimum execution time: 5_995_000 picoseconds. + Weight::from_parts(6_151_000, 3465) .saturating_add(T::DbWeight::get().reads(1)) } pub(crate) fn transact() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_296_000 picoseconds. - Weight::from_parts(7_533_000, 0) + // Minimum execution time: 7_567_000 picoseconds. + Weight::from_parts(7_779_000, 0) } pub(crate) fn refund_surplus() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_292_000 picoseconds. - Weight::from_parts(1_414_000, 0) + // Minimum execution time: 1_226_000 picoseconds. + Weight::from_parts(1_322_000, 0) } pub(crate) fn set_error_handler() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 741_000 picoseconds. - Weight::from_parts(775_000, 0) + // Minimum execution time: 768_000 picoseconds. + Weight::from_parts(828_000, 0) } pub(crate) fn set_appendix() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 702_000 picoseconds. - Weight::from_parts(770_000, 0) + // Minimum execution time: 765_000 picoseconds. + Weight::from_parts(814_000, 0) } pub(crate) fn clear_error() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 648_000 picoseconds. - Weight::from_parts(744_000, 0) + // Minimum execution time: 739_000 picoseconds. + Weight::from_parts(820_000, 0) } pub(crate) fn descend_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 731_000 picoseconds. - Weight::from_parts(772_000, 0) - } - pub(crate) fn execute_with_origin() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 790_000 picoseconds. - Weight::from_parts(843_000, 0) + // Minimum execution time: 806_000 picoseconds. + Weight::from_parts(849_000, 0) } pub(crate) fn clear_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 647_000 picoseconds. - Weight::from_parts(731_000, 0) + // Minimum execution time: 782_000 picoseconds. + Weight::from_parts(820_000, 0) } /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -169,8 +155,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `281` // Estimated: `3746` - // Minimum execution time: 62_808_000 picoseconds. - Weight::from_parts(64_413_000, 3746) + // Minimum execution time: 61_410_000 picoseconds. + Weight::from_parts(62_813_000, 3746) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(3)) } @@ -180,8 +166,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `23` // Estimated: `3488` - // Minimum execution time: 9_298_000 picoseconds. - Weight::from_parts(9_541_000, 3488) + // Minimum execution time: 9_315_000 picoseconds. + Weight::from_parts(9_575_000, 3488) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -189,8 +175,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 696_000 picoseconds. - Weight::from_parts(732_000, 0) + // Minimum execution time: 733_000 picoseconds. + Weight::from_parts(813_000, 0) } /// Storage: `XcmPallet::VersionNotifyTargets` (r:1 w:1) /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -206,8 +192,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `180` // Estimated: `3645` - // Minimum execution time: 30_585_000 picoseconds. - Weight::from_parts(31_622_000, 3645) + // Minimum execution time: 30_641_000 picoseconds. + Weight::from_parts(31_822_000, 3645) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(3)) } @@ -217,44 +203,44 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_036_000 picoseconds. - Weight::from_parts(3_196_000, 0) + // Minimum execution time: 2_978_000 picoseconds. + Weight::from_parts(3_260_000, 0) .saturating_add(T::DbWeight::get().writes(1)) } pub(crate) fn burn_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_035_000 picoseconds. - Weight::from_parts(1_133_000, 0) + // Minimum execution time: 1_139_000 picoseconds. + Weight::from_parts(1_272_000, 0) } pub(crate) fn expect_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 764_000 picoseconds. - Weight::from_parts(802_000, 0) + // Minimum execution time: 850_000 picoseconds. + Weight::from_parts(879_000, 0) } pub(crate) fn expect_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 682_000 picoseconds. - Weight::from_parts(724_000, 0) + // Minimum execution time: 770_000 picoseconds. + Weight::from_parts(834_000, 0) } pub(crate) fn expect_error() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 653_000 picoseconds. - Weight::from_parts(713_000, 0) + // Minimum execution time: 756_000 picoseconds. + Weight::from_parts(797_000, 0) } pub(crate) fn expect_transact_status() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 857_000 picoseconds. - Weight::from_parts(917_000, 0) + // Minimum execution time: 888_000 picoseconds. + Weight::from_parts(1_000_000, 0) } /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -270,8 +256,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `281` // Estimated: `3746` - // Minimum execution time: 72_331_000 picoseconds. - Weight::from_parts(74_740_000, 3746) + // Minimum execution time: 72_138_000 picoseconds. + Weight::from_parts(73_728_000, 3746) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(3)) } @@ -279,8 +265,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_963_000 picoseconds. - Weight::from_parts(9_183_000, 0) + // Minimum execution time: 8_482_000 picoseconds. + Weight::from_parts(8_667_000, 0) } /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -296,8 +282,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `281` // Estimated: `3746` - // Minimum execution time: 62_555_000 picoseconds. - Weight::from_parts(64_824_000, 3746) + // Minimum execution time: 61_580_000 picoseconds. + Weight::from_parts(62_928_000, 3746) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(3)) } @@ -305,29 +291,29 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 740_000 picoseconds. - Weight::from_parts(773_000, 0) + // Minimum execution time: 807_000 picoseconds. + Weight::from_parts(844_000, 0) } pub(crate) fn set_topic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 678_000 picoseconds. - Weight::from_parts(714_000, 0) + // Minimum execution time: 757_000 picoseconds. + Weight::from_parts(808_000, 0) } pub(crate) fn clear_topic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 656_000 picoseconds. - Weight::from_parts(703_000, 0) + // Minimum execution time: 740_000 picoseconds. + Weight::from_parts(810_000, 0) } pub(crate) fn set_fees_mode() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 672_000 picoseconds. - Weight::from_parts(725_000, 0) + // Minimum execution time: 752_000 picoseconds. + Weight::from_parts(786_000, 0) } pub(crate) fn unpaid_execution() -> Weight { // Proof Size summary in bytes: @@ -336,4 +322,11 @@ impl WeightInfo { // Minimum execution time: 798_000 picoseconds. Weight::from_parts(845_000, 0) } + pub(crate) fn set_asset_claimer() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 707_000 picoseconds. + Weight::from_parts(749_000, 0) + } } diff --git a/polkadot/runtime/test-runtime/Cargo.toml b/polkadot/runtime/test-runtime/Cargo.toml index f35bb53ac904..90a0285cd17b 100644 --- a/polkadot/runtime/test-runtime/Cargo.toml +++ b/polkadot/runtime/test-runtime/Cargo.toml @@ -16,59 +16,59 @@ log = { workspace = true } scale-info = { features = ["derive"], workspace = true } serde = { workspace = true } -frame-election-provider-support = { workspace = true } -sp-api = { workspace = true } sp-authority-discovery = { workspace = true } -sp-block-builder = { workspace = true } sp-consensus-babe = { workspace = true } sp-consensus-beefy = { workspace = true } -sp-core = { workspace = true } -sp-genesis-builder = { workspace = true } +sp-api = { workspace = true } sp-inherents = { workspace = true } -sp-io = { workspace = true } -sp-mmr-primitives = { workspace = true } sp-offchain = { workspace = true } +sp-io = { workspace = true } sp-runtime = { workspace = true } -sp-session = { workspace = true } sp-staking = { workspace = true } -sp-transaction-pool = { workspace = true } +sp-core = { workspace = true } +sp-genesis-builder = { workspace = true } +sp-mmr-primitives = { workspace = true } +sp-session = { workspace = true } sp-version = { workspace = true } +frame-election-provider-support = { workspace = true } +sp-transaction-pool = { workspace = true } +sp-block-builder = { workspace = true } -frame-executive = { workspace = true } -frame-support = { workspace = true } -frame-system = { workspace = true } -frame-system-rpc-runtime-api = { workspace = true } pallet-authority-discovery = { workspace = true } pallet-authorship = { workspace = true } pallet-babe = { workspace = true } pallet-balances = { workspace = true } +pallet-transaction-payment = { workspace = true } +pallet-transaction-payment-rpc-runtime-api = { workspace = true } +frame-executive = { workspace = true } pallet-grandpa = { workspace = true } pallet-indices = { workspace = true } pallet-offences = { workspace = true } pallet-session = { workspace = true } +frame-support = { workspace = true } pallet-staking = { workspace = true } pallet-staking-reward-curve = { workspace = true, default-features = true } -pallet-sudo = { workspace = true } +frame-system = { workspace = true } +frame-system-rpc-runtime-api = { workspace = true } +test-runtime-constants = { workspace = true } pallet-timestamp = { workspace = true } -pallet-transaction-payment = { workspace = true } -pallet-transaction-payment-rpc-runtime-api = { workspace = true } +pallet-sudo = { workspace = true } pallet-vesting = { workspace = true } -test-runtime-constants = { workspace = true } -pallet-xcm = { workspace = true } -polkadot-primitives = { workspace = true } polkadot-runtime-common = { workspace = true } +polkadot-primitives = { workspace = true } +pallet-xcm = { workspace = true } polkadot-runtime-parachains = { workspace = true } -xcm = { workspace = true } xcm-builder = { workspace = true } xcm-executor = { workspace = true } +xcm = { workspace = true } [dev-dependencies] hex-literal = { workspace = true, default-features = true } -serde_json = { workspace = true, default-features = true } +tiny-keccak = { features = ["keccak"], workspace = true } sp-keyring = { workspace = true, default-features = true } sp-trie = { workspace = true, default-features = true } -tiny-keccak = { features = ["keccak"], workspace = true } +serde_json = { workspace = true, default-features = true } [build-dependencies] substrate-wasm-builder = { workspace = true, default-features = true } @@ -154,5 +154,4 @@ runtime-benchmarks = [ "sp-staking/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", - "xcm/runtime-benchmarks", ] diff --git a/polkadot/runtime/test-runtime/src/lib.rs b/polkadot/runtime/test-runtime/src/lib.rs index d4031f7ac57a..d2ed5abb6ed1 100644 --- a/polkadot/runtime/test-runtime/src/lib.rs +++ b/polkadot/runtime/test-runtime/src/lib.rs @@ -395,7 +395,7 @@ impl pallet_staking::Config for Runtime { type BenchmarkingConfig = polkadot_runtime_common::StakingBenchmarkingConfig; type EventListeners = (); type WeightInfo = (); - type DisablingStrategy = pallet_staking::UpToLimitWithReEnablingDisablingStrategy; + type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; } parameter_types! { @@ -584,7 +584,6 @@ impl parachains_paras::Config for Runtime { parameter_types! { pub const BrokerId: u32 = 10u32; - pub MaxXcmTransactWeight: Weight = Weight::from_parts(10_000_000, 10_000); } pub struct BrokerPot; @@ -658,7 +657,6 @@ impl coretime::Config for Runtime { type BrokerId = BrokerId; type WeightInfo = crate::coretime::TestWeightInfo; type SendXcm = DummyXcmSender; - type MaxXcmTransactWeight = MaxXcmTransactWeight; type BrokerPotLocation = BrokerPot; type AssetTransactor = (); type AccountToLocation = (); diff --git a/polkadot/runtime/westend/Cargo.toml b/polkadot/runtime/westend/Cargo.toml index e945e64e7fc0..f94301baab09 100644 --- a/polkadot/runtime/westend/Cargo.toml +++ b/polkadot/runtime/westend/Cargo.toml @@ -6,8 +6,6 @@ description = "Westend testnet Relay Chain runtime." authors.workspace = true edition.workspace = true license.workspace = true -homepage.workspace = true -repository.workspace = true [lints] workspace = true @@ -15,36 +13,36 @@ workspace = true [dependencies] bitvec = { features = ["alloc"], workspace = true } codec = { features = ["derive", "max-encoded-len"], workspace = true } -log = { workspace = true } scale-info = { features = ["derive"], workspace = true } +log = { workspace = true } serde = { workspace = true } serde_derive = { optional = true, workspace = true } serde_json = { features = ["alloc"], workspace = true } smallvec = { workspace = true, default-features = true } -binary-merkle-tree = { workspace = true } -sp-api = { workspace = true } -sp-application-crypto = { workspace = true } -sp-arithmetic = { workspace = true } sp-authority-discovery = { workspace = true } -sp-block-builder = { workspace = true } sp-consensus-babe = { workspace = true } sp-consensus-beefy = { workspace = true } sp-consensus-grandpa = { workspace = true } -sp-core = { workspace = true } -sp-genesis-builder = { workspace = true } +binary-merkle-tree = { workspace = true } sp-inherents = { workspace = true } +sp-offchain = { workspace = true } +sp-api = { workspace = true } +sp-application-crypto = { workspace = true } +sp-arithmetic = { workspace = true } +sp-genesis-builder = { workspace = true } sp-io = { workspace = true } -sp-keyring = { workspace = true } sp-mmr-primitives = { workspace = true } -sp-npos-elections = { workspace = true } -sp-offchain = { workspace = true } sp-runtime = { workspace = true } -sp-session = { workspace = true } sp-staking = { workspace = true } +sp-core = { workspace = true } +sp-session = { workspace = true } sp-storage = { workspace = true } -sp-transaction-pool = { workspace = true } sp-version = { workspace = true } +sp-transaction-pool = { workspace = true } +sp-block-builder = { workspace = true } +sp-npos-elections = { workspace = true } +sp-keyring = { workspace = true } frame-election-provider-support = { workspace = true } frame-executive = { workspace = true } @@ -52,6 +50,7 @@ frame-metadata-hash-extension = { workspace = true } frame-support = { features = ["experimental", "tuples-96"], workspace = true } frame-system = { workspace = true } frame-system-rpc-runtime-api = { workspace = true } +westend-runtime-constants = { workspace = true } pallet-asset-rate = { workspace = true } pallet-authority-discovery = { workspace = true } pallet-authorship = { workspace = true } @@ -61,11 +60,9 @@ pallet-balances = { workspace = true } pallet-beefy = { workspace = true } pallet-beefy-mmr = { workspace = true } pallet-collective = { workspace = true } -pallet-conviction-voting = { workspace = true } -pallet-delegated-staking = { workspace = true } pallet-democracy = { workspace = true } -pallet-election-provider-multi-phase = { workspace = true } pallet-elections-phragmen = { workspace = true } +pallet-election-provider-multi-phase = { workspace = true } pallet-fast-unstake = { workspace = true } pallet-grandpa = { workspace = true } pallet-identity = { workspace = true } @@ -76,59 +73,60 @@ pallet-migrations = { workspace = true } pallet-mmr = { workspace = true } pallet-multisig = { workspace = true } pallet-nomination-pools = { workspace = true } -pallet-nomination-pools-runtime-api = { workspace = true } +pallet-conviction-voting = { workspace = true } pallet-offences = { workspace = true } pallet-parameters = { workspace = true } pallet-preimage = { workspace = true } pallet-proxy = { workspace = true } pallet-recovery = { workspace = true } pallet-referenda = { workspace = true } -pallet-root-testing = { workspace = true } pallet-scheduler = { workspace = true } pallet-session = { workspace = true } pallet-society = { workspace = true } pallet-staking = { workspace = true } pallet-staking-runtime-api = { workspace = true } +pallet-delegated-staking = { workspace = true } pallet-state-trie-migration = { workspace = true } pallet-sudo = { workspace = true } pallet-timestamp = { workspace = true } pallet-transaction-payment = { workspace = true } pallet-transaction-payment-rpc-runtime-api = { workspace = true } +pallet-nomination-pools-runtime-api = { workspace = true } pallet-treasury = { workspace = true } pallet-utility = { workspace = true } pallet-vesting = { workspace = true } pallet-whitelist = { workspace = true } pallet-xcm = { workspace = true } pallet-xcm-benchmarks = { optional = true, workspace = true } -westend-runtime-constants = { workspace = true } +pallet-root-testing = { workspace = true } frame-benchmarking = { optional = true, workspace = true } -frame-system-benchmarking = { optional = true, workspace = true } frame-try-runtime = { optional = true, workspace = true } -hex-literal = { workspace = true, default-features = true } +frame-system-benchmarking = { optional = true, workspace = true } pallet-election-provider-support-benchmarking = { optional = true, workspace = true } pallet-nomination-pools-benchmarking = { optional = true, workspace = true } pallet-offences-benchmarking = { optional = true, workspace = true } pallet-session-benchmarking = { optional = true, workspace = true } +hex-literal = { workspace = true, default-features = true } -polkadot-parachain-primitives = { workspace = true } -polkadot-primitives = { workspace = true } polkadot-runtime-common = { workspace = true } +polkadot-primitives = { workspace = true } +polkadot-parachain-primitives = { workspace = true } polkadot-runtime-parachains = { workspace = true } xcm = { workspace = true } -xcm-builder = { workspace = true } xcm-executor = { workspace = true } +xcm-builder = { workspace = true } xcm-runtime-apis = { workspace = true } [dev-dependencies] approx = { workspace = true } -remote-externalities = { workspace = true, default-features = true } -serde_json = { workspace = true, default-features = true } -sp-keyring = { workspace = true, default-features = true } -sp-tracing = { workspace = true } tiny-keccak = { features = ["keccak"], workspace = true } +sp-keyring = { workspace = true, default-features = true } +serde_json = { workspace = true, default-features = true } +remote-externalities = { workspace = true, default-features = true } tokio = { features = ["macros"], workspace = true, default-features = true } +sp-tracing = { workspace = true } [build-dependencies] substrate-wasm-builder = { workspace = true, default-features = true } @@ -298,7 +296,6 @@ runtime-benchmarks = [ "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", "xcm-runtime-apis/runtime-benchmarks", - "xcm/runtime-benchmarks", ] try-runtime = [ "frame-election-provider-support/try-runtime", diff --git a/polkadot/runtime/westend/constants/Cargo.toml b/polkadot/runtime/westend/constants/Cargo.toml index f3dbcc309ee1..27d5b19b8e77 100644 --- a/polkadot/runtime/westend/constants/Cargo.toml +++ b/polkadot/runtime/westend/constants/Cargo.toml @@ -5,8 +5,6 @@ description = "Constants used throughout the Westend network." authors.workspace = true edition.workspace = true license.workspace = true -homepage.workspace = true -repository.workspace = true [package.metadata.polkadot-sdk] exclude-from-umbrella = true @@ -20,9 +18,9 @@ smallvec = { workspace = true, default-features = true } frame-support = { workspace = true } polkadot-primitives = { workspace = true } polkadot-runtime-common = { workspace = true } -sp-core = { workspace = true } sp-runtime = { workspace = true } sp-weights = { workspace = true } +sp-core = { workspace = true } xcm = { workspace = true } xcm-builder = { workspace = true } diff --git a/polkadot/runtime/westend/src/genesis_config_presets.rs b/polkadot/runtime/westend/src/genesis_config_presets.rs index ea5aff554e8c..b074d54fb582 100644 --- a/polkadot/runtime/westend/src/genesis_config_presets.rs +++ b/polkadot/runtime/westend/src/genesis_config_presets.rs @@ -133,8 +133,7 @@ fn default_parachains_host_configuration( }, node_features: bitvec::vec::BitVec::from_element( 1u8 << (FeatureIndex::ElasticScalingMVP as usize) | - 1u8 << (FeatureIndex::EnableAssignmentsV2 as usize) | - 1u8 << (FeatureIndex::CandidateReceiptV2 as usize), + 1u8 << (FeatureIndex::EnableAssignmentsV2 as usize), ), scheduler_params: SchedulerParams { lookahead: 2, @@ -223,7 +222,7 @@ fn westend_staging_testnet_config_genesis() -> serde_json::Value { // // SECRET_SEED="slow awkward present example safe bundle science ocean cradle word tennis earn" // subkey inspect -n polkadot "$SECRET_SEED" - let endowed_accounts: Vec = vec![ + let endowed_accounts = vec![ // 15S75FkhCWEowEGfxWwVfrW3LQuy8w8PNhVmrzfsVhCMjUh1 hex!["c416837e232d9603e83162ef4bda08e61580eeefe60fe92fc044aa508559ae42"].into(), ]; @@ -339,7 +338,7 @@ fn westend_staging_testnet_config_genesis() -> serde_json::Value { const ENDOWMENT: u128 = 1_000_000 * WND; const STASH: u128 = 100 * WND; - build_struct_json_patch!(RuntimeGenesisConfig { + let config = RuntimeGenesisConfig { balances: BalancesConfig { balances: endowed_accounts .iter() @@ -365,6 +364,7 @@ fn westend_staging_testnet_config_genesis() -> serde_json::Value { ) }) .collect::>(), + ..Default::default() }, staking: StakingConfig { validator_count: 50, @@ -376,12 +376,19 @@ fn westend_staging_testnet_config_genesis() -> serde_json::Value { invulnerables: initial_authorities.iter().map(|x| x.0.clone()).collect::>(), force_era: Forcing::ForceNone, slash_reward_fraction: Perbill::from_percent(10), + ..Default::default() }, - babe: BabeConfig { epoch_config: BABE_GENESIS_EPOCH_CONFIG }, + babe: BabeConfig { epoch_config: BABE_GENESIS_EPOCH_CONFIG, ..Default::default() }, sudo: SudoConfig { key: Some(endowed_accounts[0].clone()) }, configuration: ConfigurationConfig { config: default_parachains_host_configuration() }, - registrar: RegistrarConfig { next_free_para_id: polkadot_primitives::LOWEST_PUBLIC_ID }, - }) + registrar: RegistrarConfig { + next_free_para_id: polkadot_primitives::LOWEST_PUBLIC_ID, + ..Default::default() + }, + ..Default::default() + }; + + serde_json::to_value(config).expect("Could not build genesis config.") } //development diff --git a/polkadot/runtime/westend/src/impls.rs b/polkadot/runtime/westend/src/impls.rs index 0e0d345a0ed4..d7281dad56d4 100644 --- a/polkadot/runtime/westend/src/impls.rs +++ b/polkadot/runtime/westend/src/impls.rs @@ -21,7 +21,7 @@ use core::marker::PhantomData; use frame_support::pallet_prelude::DispatchResult; use frame_system::RawOrigin; use polkadot_primitives::Balance; -use polkadot_runtime_common::identity_migrator::{OnReapIdentity, WeightInfo}; +use polkadot_runtime_common::identity_migrator::OnReapIdentity; use westend_runtime_constants::currency::*; use xcm::{latest::prelude::*, VersionedLocation, VersionedXcm}; use xcm_executor::traits::TransactAsset; @@ -88,10 +88,7 @@ where AccountId: Into<[u8; 32]> + Clone + Encode, { fn on_reap_identity(who: &AccountId, fields: u32, subs: u32) -> DispatchResult { - use crate::{ - impls::IdentityMigratorCalls::PokeDeposit, - weights::polkadot_runtime_common_identity_migrator::WeightInfo as MigratorWeights, - }; + use crate::impls::IdentityMigratorCalls::PokeDeposit; let total_to_send = Self::calculate_remote_deposit(fields, subs); @@ -144,7 +141,6 @@ where .into(); let poke = PeopleRuntimePallets::::IdentityMigrator(PokeDeposit(who.clone())); - let remote_weight_limit = MigratorWeights::::poke_deposit().saturating_mul(2); // Actual program to execute on People Chain. let program: Xcm<()> = Xcm(vec![ @@ -161,11 +157,7 @@ where .into(), }, // Poke the deposit to reserve the appropriate amount on the parachain. - Transact { - origin_kind: OriginKind::Superuser, - call: poke.encode().into(), - fallback_max_weight: Some(remote_weight_limit), - }, + Transact { origin_kind: OriginKind::Superuser, call: poke.encode().into() }, ]); // send @@ -176,9 +168,4 @@ where )?; Ok(()) } - - #[cfg(feature = "runtime-benchmarks")] - fn ensure_successful_identity_reaping(_: &AccountId, _: u32, _: u32) { - crate::Dmp::make_parachain_reachable(1004); - } } diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index cd8eb4d2505a..4c04af111f81 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -172,10 +172,10 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: alloc::borrow::Cow::Borrowed("westend"), impl_name: alloc::borrow::Cow::Borrowed("parity-westend"), authoring_version: 2, - spec_version: 1_017_001, + spec_version: 1_016_001, impl_version: 0, apis: RUNTIME_API_VERSIONS, - transaction_version: 27, + transaction_version: 26, system_version: 1, }; @@ -543,7 +543,7 @@ impl Get for MaybeSignedPhase { fn get() -> u32 { // 1 day = 4 eras -> 1 week = 28 eras. We want to disable signed phase once a week to test // the fallback unsigned phase is able to compute elections on Westend. - if pallet_staking::CurrentEra::::get().unwrap_or(1) % 28 == 0 { + if Staking::current_era().unwrap_or(1) % 28 == 0 { 0 } else { SignedPhase::get() @@ -755,7 +755,7 @@ impl pallet_staking::Config for Runtime { type BenchmarkingConfig = polkadot_runtime_common::StakingBenchmarkingConfig; type EventListeners = (NominationPools, DelegatedStaking); type WeightInfo = weights::pallet_staking::WeightInfo; - type DisablingStrategy = pallet_staking::UpToLimitWithReEnablingDisablingStrategy; + type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; } impl pallet_fast_unstake::Config for Runtime { @@ -1004,7 +1004,6 @@ impl pallet_multisig::Config for Runtime { type DepositFactor = DepositFactor; type MaxSignatories = MaxSignatories; type WeightInfo = weights::pallet_multisig::WeightInfo; - type BlockNumberProvider = frame_system::Pallet; } parameter_types! { @@ -1018,7 +1017,6 @@ impl pallet_recovery::Config for Runtime { type RuntimeEvent = RuntimeEvent; type WeightInfo = (); type RuntimeCall = RuntimeCall; - type BlockNumberProvider = System; type Currency = Balances; type ConfigDepositBase = ConfigDepositBase; type FriendDepositFactor = FriendDepositFactor; @@ -1206,7 +1204,6 @@ impl pallet_proxy::Config for Runtime { type CallHasher = BlakeTwo256; type AnnouncementDepositBase = AnnouncementDepositBase; type AnnouncementDepositFactor = AnnouncementDepositFactor; - type BlockNumberProvider = frame_system::Pallet; } impl parachains_origin::Config for Runtime {} @@ -1327,7 +1324,6 @@ impl parachains_scheduler::Config for Runtime { parameter_types! { pub const BrokerId: u32 = BROKER_ID; pub const BrokerPalletId: PalletId = PalletId(*b"py/broke"); - pub MaxXcmTransactWeight: Weight = Weight::from_parts(200_000_000, 20_000); } pub struct BrokerPot; @@ -1351,7 +1347,6 @@ impl coretime::Config for Runtime { xcm_config::ThisNetwork, ::AccountId, >; - type MaxXcmTransactWeight = MaxXcmTransactWeight; } parameter_types! { @@ -1841,7 +1836,6 @@ pub mod migrations { >, parachains_shared::migration::MigrateToV1, parachains_scheduler::migration::MigrateV2ToV3, - pallet_staking::migrations::v16::MigrateV15ToV16, // permanent pallet_xcm::migration::MigrateToLatestXcmVersion, ); @@ -2640,14 +2634,14 @@ sp_api::impl_runtime_apis! { ExistentialDepositAsset, xcm_config::PriceForChildParachainDelivery, AssetHubParaId, - Dmp, + (), >, polkadot_runtime_common::xcm_sender::ToParachainDeliveryHelper< xcm_config::XcmConfig, ExistentialDepositAsset, xcm_config::PriceForChildParachainDelivery, RandomParaId, - Dmp, + (), > ); @@ -2713,7 +2707,7 @@ sp_api::impl_runtime_apis! { ExistentialDepositAsset, xcm_config::PriceForChildParachainDelivery, AssetHubParaId, - Dmp, + (), >; fn valid_destination() -> Result { Ok(AssetHub::get()) @@ -2802,9 +2796,8 @@ sp_api::impl_runtime_apis! { } fn alias_origin() -> Result<(Location, Location), BenchmarkError> { - let origin = Location::new(0, [Parachain(1000)]); - let target = Location::new(0, [Parachain(1000), AccountId32 { id: [128u8; 32], network: None }]); - Ok((origin, target)) + // The XCM executor of Westend doesn't have a configured `Aliasers` + Err(BenchmarkError::Skip) } } diff --git a/polkadot/runtime/westend/src/tests.rs b/polkadot/runtime/westend/src/tests.rs index fcdaf7ff2de6..02fd6b61496b 100644 --- a/polkadot/runtime/westend/src/tests.rs +++ b/polkadot/runtime/westend/src/tests.rs @@ -23,7 +23,7 @@ use approx::assert_relative_eq; use frame_support::traits::WhitelistedStorageKeys; use pallet_staking::EraPayout; use sp_core::{crypto::Ss58Codec, hexdisplay::HexDisplay}; -use sp_keyring::Sr25519Keyring::Alice; +use sp_keyring::AccountKeyring::Alice; use xcm_runtime_apis::conversions::LocationToAccountHelper; const MILLISECONDS_PER_HOUR: u64 = 60 * 60 * 1000; diff --git a/polkadot/runtime/westend/src/weights/pallet_balances.rs b/polkadot/runtime/westend/src/weights/pallet_balances.rs index deaf8840462b..5e91f31920ca 100644 --- a/polkadot/runtime/westend/src/weights/pallet_balances.rs +++ b/polkadot/runtime/westend/src/weights/pallet_balances.rs @@ -17,27 +17,25 @@ //! Autogenerated weights for `pallet_balances` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-12-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-05-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `95c137a642c3`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-unxyhko3-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("westend-dev")`, DB CACHE: 1024 // Executed Command: // target/production/polkadot // benchmark // pallet -// --extrinsic=* -// --chain=westend-dev -// --pallet=pallet_balances -// --header=/__w/polkadot-sdk/polkadot-sdk/polkadot/file_header.txt -// --output=./polkadot/runtime/westend/src/weights -// --wasm-execution=compiled // --steps=50 // --repeat=20 +// --extrinsic=* +// --wasm-execution=compiled // --heap-pages=4096 -// --no-storage-info -// --no-min-squares -// --no-median-slopes +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_balances +// --chain=westend-dev +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/westend/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -56,8 +54,8 @@ impl pallet_balances::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `3593` - // Minimum execution time: 51_474_000 picoseconds. - Weight::from_parts(52_840_000, 0) + // Minimum execution time: 43_248_000 picoseconds. + Weight::from_parts(43_872_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -68,8 +66,8 @@ impl pallet_balances::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `3593` - // Minimum execution time: 39_875_000 picoseconds. - Weight::from_parts(41_408_000, 0) + // Minimum execution time: 33_990_000 picoseconds. + Weight::from_parts(34_693_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -80,8 +78,8 @@ impl pallet_balances::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `174` // Estimated: `3593` - // Minimum execution time: 19_614_000 picoseconds. - Weight::from_parts(20_194_000, 0) + // Minimum execution time: 12_681_000 picoseconds. + Weight::from_parts(13_183_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -92,8 +90,8 @@ impl pallet_balances::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `174` // Estimated: `3593` - // Minimum execution time: 27_430_000 picoseconds. - Weight::from_parts(28_151_000, 0) + // Minimum execution time: 17_474_000 picoseconds. + Weight::from_parts(18_063_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -104,8 +102,8 @@ impl pallet_balances::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `103` // Estimated: `6196` - // Minimum execution time: 54_131_000 picoseconds. - Weight::from_parts(54_810_000, 0) + // Minimum execution time: 45_699_000 picoseconds. + Weight::from_parts(46_099_000, 0) .saturating_add(Weight::from_parts(0, 6196)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) @@ -116,8 +114,8 @@ impl pallet_balances::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `3593` - // Minimum execution time: 48_692_000 picoseconds. - Weight::from_parts(51_416_000, 0) + // Minimum execution time: 42_453_000 picoseconds. + Weight::from_parts(43_133_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -128,8 +126,8 @@ impl pallet_balances::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `174` // Estimated: `3593` - // Minimum execution time: 22_604_000 picoseconds. - Weight::from_parts(23_336_000, 0) + // Minimum execution time: 15_066_000 picoseconds. + Weight::from_parts(15_605_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -141,11 +139,11 @@ impl pallet_balances::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0 + u * (136 ±0)` // Estimated: `990 + u * (2603 ±0)` - // Minimum execution time: 18_118_000 picoseconds. - Weight::from_parts(18_352_000, 0) + // Minimum execution time: 14_180_000 picoseconds. + Weight::from_parts(14_598_000, 0) .saturating_add(Weight::from_parts(0, 990)) - // Standard Error: 14_688 - .saturating_add(Weight::from_parts(15_412_440, 0).saturating_mul(u.into())) + // Standard Error: 13_221 + .saturating_add(Weight::from_parts(13_422_901, 0).saturating_mul(u.into())) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(u.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(u.into()))) .saturating_add(Weight::from_parts(0, 2603).saturating_mul(u.into())) @@ -154,24 +152,24 @@ impl pallet_balances::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_779_000 picoseconds. - Weight::from_parts(7_246_000, 0) + // Minimum execution time: 5_130_000 picoseconds. + Weight::from_parts(5_257_000, 0) .saturating_add(Weight::from_parts(0, 0)) } fn burn_allow_death() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 30_935_000 picoseconds. - Weight::from_parts(32_251_000, 0) + // Minimum execution time: 27_328_000 picoseconds. + Weight::from_parts(27_785_000, 0) .saturating_add(Weight::from_parts(0, 0)) } fn burn_keep_alive() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 21_002_000 picoseconds. - Weight::from_parts(21_760_000, 0) + // Minimum execution time: 17_797_000 picoseconds. + Weight::from_parts(18_103_000, 0) .saturating_add(Weight::from_parts(0, 0)) } } diff --git a/polkadot/runtime/westend/src/weights/pallet_xcm.rs b/polkadot/runtime/westend/src/weights/pallet_xcm.rs index e2c0232139fb..10725cecf249 100644 --- a/polkadot/runtime/westend/src/weights/pallet_xcm.rs +++ b/polkadot/runtime/westend/src/weights/pallet_xcm.rs @@ -17,27 +17,25 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-12-18, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `3a528d69c69e`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("westend-dev")`, DB CACHE: 1024 // Executed Command: // target/production/polkadot // benchmark // pallet -// --extrinsic=* -// --chain=westend-dev -// --pallet=pallet_xcm -// --header=/__w/polkadot-sdk/polkadot-sdk/polkadot/file_header.txt -// --output=./polkadot/runtime/westend/src/weights -// --wasm-execution=compiled // --steps=50 // --repeat=20 +// --extrinsic=* +// --wasm-execution=compiled // --heap-pages=4096 -// --no-storage-info -// --no-min-squares -// --no-median-slopes +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_xcm +// --chain=westend-dev +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/westend/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -56,46 +54,38 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Paras::Heads` (r:1 w:0) - /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) fn send() -> Weight { // Proof Size summary in bytes: - // Measured: `212` - // Estimated: `3677` - // Minimum execution time: 41_425_000 picoseconds. - Weight::from_parts(43_275_000, 0) - .saturating_add(Weight::from_parts(0, 3677)) - .saturating_add(T::DbWeight::get().reads(5)) + // Measured: `147` + // Estimated: `3612` + // Minimum execution time: 25_725_000 picoseconds. + Weight::from_parts(26_174_000, 0) + .saturating_add(Weight::from_parts(0, 3612)) + .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `System::Account` (r:2 w:2) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `XcmPallet::ShouldRecordXcm` (r:1 w:0) - /// Proof: `XcmPallet::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Paras::Heads` (r:1 w:0) - /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) fn teleport_assets() -> Weight { // Proof Size summary in bytes: - // Measured: `315` + // Measured: `250` // Estimated: `6196` - // Minimum execution time: 145_227_000 picoseconds. - Weight::from_parts(151_656_000, 0) + // Minimum execution time: 113_140_000 picoseconds. + Weight::from_parts(116_204_000, 0) .saturating_add(Weight::from_parts(0, 6196)) - .saturating_add(T::DbWeight::get().reads(8)) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) } - /// Storage: `XcmPallet::ShouldRecordXcm` (r:1 w:0) - /// Proof: `XcmPallet::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `System::Account` (r:2 w:2) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) @@ -104,54 +94,47 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Paras::Heads` (r:1 w:0) - /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) fn reserve_transfer_assets() -> Weight { // Proof Size summary in bytes: - // Measured: `367` + // Measured: `302` // Estimated: `6196` - // Minimum execution time: 141_439_000 picoseconds. - Weight::from_parts(146_252_000, 0) + // Minimum execution time: 108_571_000 picoseconds. + Weight::from_parts(110_650_000, 0) .saturating_add(Weight::from_parts(0, 6196)) - .saturating_add(T::DbWeight::get().reads(8)) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `System::Account` (r:2 w:2) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `XcmPallet::ShouldRecordXcm` (r:1 w:0) - /// Proof: `XcmPallet::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Paras::Heads` (r:1 w:0) - /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) fn transfer_assets() -> Weight { // Proof Size summary in bytes: - // Measured: `315` + // Measured: `250` // Estimated: `6196` - // Minimum execution time: 146_651_000 picoseconds. - Weight::from_parts(150_134_000, 0) + // Minimum execution time: 111_836_000 picoseconds. + Weight::from_parts(114_435_000, 0) .saturating_add(Weight::from_parts(0, 6196)) - .saturating_add(T::DbWeight::get().reads(8)) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) } - /// Storage: `XcmPallet::ShouldRecordXcm` (r:1 w:0) - /// Proof: `XcmPallet::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Benchmark::Override` (r:0 w:0) + /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) fn execute() -> Weight { // Proof Size summary in bytes: // Measured: `0` - // Estimated: `1485` - // Minimum execution time: 9_663_000 picoseconds. - Weight::from_parts(10_012_000, 0) - .saturating_add(Weight::from_parts(0, 1485)) - .saturating_add(T::DbWeight::get().reads(1)) + // Estimated: `0` + // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. + Weight::from_parts(18_446_744_073_709_551_000, 0) + .saturating_add(Weight::from_parts(0, 0)) } /// Storage: `XcmPallet::SupportedVersion` (r:0 w:1) /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -159,8 +142,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_113_000 picoseconds. - Weight::from_parts(8_469_000, 0) + // Minimum execution time: 7_160_000 picoseconds. + Weight::from_parts(7_477_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -168,8 +151,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_493_000 picoseconds. - Weight::from_parts(2_630_000, 0) + // Minimum execution time: 1_934_000 picoseconds. + Weight::from_parts(2_053_000, 0) .saturating_add(Weight::from_parts(0, 0)) } /// Storage: `XcmPallet::VersionNotifiers` (r:1 w:1) @@ -182,20 +165,18 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Paras::Heads` (r:1 w:0) - /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `XcmPallet::Queries` (r:0 w:1) /// Proof: `XcmPallet::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_subscribe_version_notify() -> Weight { // Proof Size summary in bytes: - // Measured: `212` - // Estimated: `3677` - // Minimum execution time: 47_890_000 picoseconds. - Weight::from_parts(49_994_000, 0) - .saturating_add(Weight::from_parts(0, 3677)) - .saturating_add(T::DbWeight::get().reads(7)) + // Measured: `147` + // Estimated: `3612` + // Minimum execution time: 31_123_000 picoseconds. + Weight::from_parts(31_798_000, 0) + .saturating_add(Weight::from_parts(0, 3612)) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(5)) } /// Storage: `XcmPallet::VersionNotifiers` (r:1 w:1) @@ -206,20 +187,18 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Paras::Heads` (r:1 w:0) - /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `XcmPallet::Queries` (r:0 w:1) /// Proof: `XcmPallet::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_unsubscribe_version_notify() -> Weight { // Proof Size summary in bytes: - // Measured: `392` - // Estimated: `3857` - // Minimum execution time: 52_967_000 picoseconds. - Weight::from_parts(55_345_000, 0) - .saturating_add(Weight::from_parts(0, 3857)) - .saturating_add(T::DbWeight::get().reads(6)) + // Measured: `327` + // Estimated: `3792` + // Minimum execution time: 35_175_000 picoseconds. + Weight::from_parts(36_098_000, 0) + .saturating_add(Weight::from_parts(0, 3792)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `XcmPallet::XcmExecutionSuspended` (r:0 w:1) @@ -228,45 +207,45 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_451_000 picoseconds. - Weight::from_parts(2_623_000, 0) + // Minimum execution time: 1_974_000 picoseconds. + Weight::from_parts(2_096_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `XcmPallet::SupportedVersion` (r:6 w:2) + /// Storage: `XcmPallet::SupportedVersion` (r:5 w:2) /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_supported_version() -> Weight { // Proof Size summary in bytes: // Measured: `22` - // Estimated: `15862` - // Minimum execution time: 22_292_000 picoseconds. - Weight::from_parts(22_860_000, 0) - .saturating_add(Weight::from_parts(0, 15862)) - .saturating_add(T::DbWeight::get().reads(6)) + // Estimated: `13387` + // Minimum execution time: 16_626_000 picoseconds. + Weight::from_parts(17_170_000, 0) + .saturating_add(Weight::from_parts(0, 13387)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `XcmPallet::VersionNotifiers` (r:6 w:2) + /// Storage: `XcmPallet::VersionNotifiers` (r:5 w:2) /// Proof: `XcmPallet::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notifiers() -> Weight { // Proof Size summary in bytes: // Measured: `26` - // Estimated: `15866` - // Minimum execution time: 21_847_000 picoseconds. - Weight::from_parts(22_419_000, 0) - .saturating_add(Weight::from_parts(0, 15866)) - .saturating_add(T::DbWeight::get().reads(6)) + // Estimated: `13391` + // Minimum execution time: 16_937_000 picoseconds. + Weight::from_parts(17_447_000, 0) + .saturating_add(Weight::from_parts(0, 13391)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `XcmPallet::VersionNotifyTargets` (r:7 w:0) + /// Storage: `XcmPallet::VersionNotifyTargets` (r:6 w:0) /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn already_notified_target() -> Weight { // Proof Size summary in bytes: // Measured: `40` - // Estimated: `18355` - // Minimum execution time: 24_764_000 picoseconds. - Weight::from_parts(25_873_000, 0) - .saturating_add(Weight::from_parts(0, 18355)) - .saturating_add(T::DbWeight::get().reads(7)) + // Estimated: `15880` + // Minimum execution time: 19_157_000 picoseconds. + Weight::from_parts(19_659_000, 0) + .saturating_add(Weight::from_parts(0, 15880)) + .saturating_add(T::DbWeight::get().reads(6)) } /// Storage: `XcmPallet::VersionNotifyTargets` (r:2 w:1) /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -274,62 +253,62 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:0) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Paras::Heads` (r:1 w:0) - /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) fn notify_current_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `211` - // Estimated: `6151` - // Minimum execution time: 36_482_000 picoseconds. - Weight::from_parts(37_672_000, 0) - .saturating_add(Weight::from_parts(0, 6151)) + // Measured: `183` + // Estimated: `6123` + // Minimum execution time: 30_699_000 picoseconds. + Weight::from_parts(31_537_000, 0) + .saturating_add(Weight::from_parts(0, 6123)) .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(1)) + .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: `XcmPallet::VersionNotifyTargets` (r:5 w:0) + /// Storage: `XcmPallet::VersionNotifyTargets` (r:4 w:0) /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn notify_target_migration_fail() -> Weight { // Proof Size summary in bytes: - // Measured: `40` - // Estimated: `13405` - // Minimum execution time: 17_580_000 picoseconds. - Weight::from_parts(17_908_000, 0) - .saturating_add(Weight::from_parts(0, 13405)) - .saturating_add(T::DbWeight::get().reads(5)) + // Measured: `69` + // Estimated: `10959` + // Minimum execution time: 12_303_000 picoseconds. + Weight::from_parts(12_670_000, 0) + .saturating_add(Weight::from_parts(0, 10959)) + .saturating_add(T::DbWeight::get().reads(4)) } - /// Storage: `XcmPallet::VersionNotifyTargets` (r:6 w:2) + /// Storage: `XcmPallet::VersionNotifyTargets` (r:5 w:2) /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notify_targets() -> Weight { // Proof Size summary in bytes: // Measured: `33` - // Estimated: `15873` - // Minimum execution time: 21_946_000 picoseconds. - Weight::from_parts(22_548_000, 0) - .saturating_add(Weight::from_parts(0, 15873)) - .saturating_add(T::DbWeight::get().reads(6)) + // Estimated: `13398` + // Minimum execution time: 17_129_000 picoseconds. + Weight::from_parts(17_668_000, 0) + .saturating_add(Weight::from_parts(0, 13398)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `XcmPallet::VersionNotifyTargets` (r:6 w:1) + /// Storage: `XcmPallet::VersionNotifyTargets` (r:5 w:2) /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:0) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Paras::Heads` (r:1 w:0) - /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_and_notify_old_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `211` - // Estimated: `16051` - // Minimum execution time: 47_261_000 picoseconds. - Weight::from_parts(48_970_000, 0) - .saturating_add(Weight::from_parts(0, 16051)) - .saturating_add(T::DbWeight::get().reads(10)) - .saturating_add(T::DbWeight::get().writes(1)) + // Measured: `183` + // Estimated: `13548` + // Minimum execution time: 39_960_000 picoseconds. + Weight::from_parts(41_068_000, 0) + .saturating_add(Weight::from_parts(0, 13548)) + .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `XcmPallet::QueryCounter` (r:1 w:1) /// Proof: `XcmPallet::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) @@ -339,8 +318,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `1485` - // Minimum execution time: 2_794_000 picoseconds. - Weight::from_parts(2_895_000, 0) + // Minimum execution time: 2_333_000 picoseconds. + Weight::from_parts(2_504_000, 0) .saturating_add(Weight::from_parts(0, 1485)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -351,24 +330,22 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7576` // Estimated: `11041` - // Minimum execution time: 25_946_000 picoseconds. - Weight::from_parts(26_503_000, 0) + // Minimum execution time: 22_932_000 picoseconds. + Weight::from_parts(23_307_000, 0) .saturating_add(Weight::from_parts(0, 11041)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `XcmPallet::ShouldRecordXcm` (r:1 w:0) - /// Proof: `XcmPallet::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `XcmPallet::AssetTraps` (r:1 w:1) /// Proof: `XcmPallet::AssetTraps` (`max_values`: None, `max_size`: None, mode: `Measured`) fn claim_assets() -> Weight { // Proof Size summary in bytes: // Measured: `23` // Estimated: `3488` - // Minimum execution time: 40_780_000 picoseconds. - Weight::from_parts(41_910_000, 0) + // Minimum execution time: 34_558_000 picoseconds. + Weight::from_parts(35_299_000, 0) .saturating_add(Weight::from_parts(0, 3488)) - .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } } diff --git a/polkadot/runtime/westend/src/weights/polkadot_runtime_parachains_disputes_slashing.rs b/polkadot/runtime/westend/src/weights/polkadot_runtime_parachains_disputes_slashing.rs index f4dbca0f29ff..a035ea2b0b5e 100644 --- a/polkadot/runtime/westend/src/weights/polkadot_runtime_parachains_disputes_slashing.rs +++ b/polkadot/runtime/westend/src/weights/polkadot_runtime_parachains_disputes_slashing.rs @@ -85,7 +85,7 @@ impl polkadot_runtime_parachains::disputes::slashing::W /// Storage: Staking UnappliedSlashes (r:1 w:1) /// Proof Skipped: Staking UnappliedSlashes (max_values: None, max_size: None, mode: Measured) /// The range of component `n` is `[4, 300]`. - fn report_dispute_lost_unsigned(n: u32, ) -> Weight { + fn report_dispute_lost(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `4531 + n * (189 ±0)` // Estimated: `7843 + n * (192 ±0)` diff --git a/polkadot/runtime/westend/src/weights/xcm/mod.rs b/polkadot/runtime/westend/src/weights/xcm/mod.rs index a5fb82a66837..e5f4a0d7ca8e 100644 --- a/polkadot/runtime/westend/src/weights/xcm/mod.rs +++ b/polkadot/runtime/westend/src/weights/xcm/mod.rs @@ -27,7 +27,6 @@ use xcm::{ use pallet_xcm_benchmarks_fungible::WeightInfo as XcmBalancesWeight; use pallet_xcm_benchmarks_generic::WeightInfo as XcmGeneric; -use sp_runtime::BoundedVec; use xcm::latest::AssetTransferFilter; /// Types of asset supported by the westend runtime. @@ -115,11 +114,7 @@ impl XcmWeightInfo for WestendXcmWeight { fn transfer_reserve_asset(assets: &Assets, _dest: &Location, _xcm: &Xcm<()>) -> Weight { assets.weigh_assets(XcmBalancesWeight::::transfer_reserve_asset()) } - fn transact( - _origin_kind: &OriginKind, - _fallback_max_weight: &Option, - _call: &DoubleEncoded, - ) -> Weight { + fn transact(_origin_kind: &OriginKind, _call: &DoubleEncoded) -> Weight { XcmGeneric::::transact() } fn hrmp_new_channel_open_request( @@ -209,17 +204,11 @@ impl XcmWeightInfo for WestendXcmWeight { fn clear_error() -> Weight { XcmGeneric::::clear_error() } - fn set_hints(hints: &BoundedVec) -> Weight { - let mut weight = Weight::zero(); - for hint in hints { - match hint { - AssetClaimer { .. } => { - weight = weight.saturating_add(XcmGeneric::::asset_claimer()); - }, - } - } - weight + + fn set_asset_claimer(_location: &Location) -> Weight { + XcmGeneric::::set_asset_claimer() } + fn claim_asset(_assets: &Assets, _ticket: &Location) -> Weight { XcmGeneric::::claim_asset() } @@ -299,14 +288,12 @@ impl XcmWeightInfo for WestendXcmWeight { XcmGeneric::::clear_topic() } fn alias_origin(_: &Location) -> Weight { - XcmGeneric::::alias_origin() + // XCM Executor does not currently support alias origin operations + Weight::MAX } fn unpaid_execution(_: &WeightLimit, _: &Option) -> Weight { XcmGeneric::::unpaid_execution() } - fn execute_with_origin(_: &Option, _: &Xcm) -> Weight { - XcmGeneric::::execute_with_origin() - } } #[test] diff --git a/polkadot/runtime/westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs b/polkadot/runtime/westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs index 4e10e72356ab..2ad1cd6359a6 100644 --- a/polkadot/runtime/westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs +++ b/polkadot/runtime/westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs @@ -17,28 +17,26 @@ //! Autogenerated weights for `pallet_xcm_benchmarks::generic` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-12-10, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-09-05, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `aa8403b52523`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-svzsllib-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: Compiled, CHAIN: Some("westend-dev"), DB CACHE: 1024 // Executed Command: // target/production/polkadot // benchmark // pallet -// --extrinsic=* -// --chain=westend-dev -// --pallet=pallet_xcm_benchmarks::generic -// --header=/__w/polkadot-sdk/polkadot-sdk/polkadot/file_header.txt -// --output=./polkadot/runtime/westend/src/weights/xcm -// --wasm-execution=compiled // --steps=50 // --repeat=20 +// --extrinsic=* +// --wasm-execution=compiled // --heap-pages=4096 -// --template=polkadot/xcm/pallet-xcm-benchmarks/template.hbs -// --no-storage-info -// --no-min-squares -// --no-median-slopes +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_xcm_benchmarks::generic +// --chain=westend-dev +// --header=./polkadot/file_header.txt +// --template=./polkadot/xcm/pallet-xcm-benchmarks/template.hbs +// --output=./polkadot/runtime/westend/src/weights/xcm/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -65,8 +63,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `351` // Estimated: `6196` - // Minimum execution time: 74_868_000 picoseconds. - Weight::from_parts(77_531_000, 6196) + // Minimum execution time: 67_813_000 picoseconds. + Weight::from_parts(69_357_000, 6196) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -74,22 +72,22 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 688_000 picoseconds. - Weight::from_parts(733_000, 0) + // Minimum execution time: 716_000 picoseconds. + Weight::from_parts(780_000, 0) } pub(crate) fn pay_fees() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_491_000 picoseconds. - Weight::from_parts(3_667_000, 0) + // Minimum execution time: 1_601_000 picoseconds. + Weight::from_parts(1_680_000, 0) } - pub(crate) fn asset_claimer() -> Weight { + pub(crate) fn set_asset_claimer() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 757_000 picoseconds. - Weight::from_parts(804_000, 0) + // Minimum execution time: 707_000 picoseconds. + Weight::from_parts(749_000, 0) } /// Storage: `XcmPallet::Queries` (r:1 w:0) /// Proof: `XcmPallet::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -97,65 +95,58 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `3465` - // Minimum execution time: 6_322_000 picoseconds. - Weight::from_parts(6_565_000, 3465) + // Minimum execution time: 6_574_000 picoseconds. + Weight::from_parts(6_790_000, 3465) .saturating_add(T::DbWeight::get().reads(1)) } pub(crate) fn transact() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_841_000 picoseconds. - Weight::from_parts(8_240_000, 0) + // Minimum execution time: 7_232_000 picoseconds. + Weight::from_parts(7_422_000, 0) } pub(crate) fn refund_surplus() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_327_000 picoseconds. - Weight::from_parts(1_460_000, 0) + // Minimum execution time: 1_180_000 picoseconds. + Weight::from_parts(1_250_000, 0) } pub(crate) fn set_error_handler() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 680_000 picoseconds. - Weight::from_parts(752_000, 0) + // Minimum execution time: 702_000 picoseconds. + Weight::from_parts(766_000, 0) } pub(crate) fn set_appendix() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 712_000 picoseconds. - Weight::from_parts(764_000, 0) + // Minimum execution time: 700_000 picoseconds. + Weight::from_parts(757_000, 0) } pub(crate) fn clear_error() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 663_000 picoseconds. - Weight::from_parts(712_000, 0) + // Minimum execution time: 686_000 picoseconds. + Weight::from_parts(751_000, 0) } pub(crate) fn descend_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 756_000 picoseconds. - Weight::from_parts(801_000, 0) - } - pub(crate) fn execute_with_origin() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 773_000 picoseconds. - Weight::from_parts(822_000, 0) + // Minimum execution time: 705_000 picoseconds. + Weight::from_parts(765_000, 0) } pub(crate) fn clear_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 669_000 picoseconds. - Weight::from_parts(750_000, 0) + // Minimum execution time: 687_000 picoseconds. + Weight::from_parts(741_000, 0) } /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -171,8 +162,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `351` // Estimated: `6196` - // Minimum execution time: 73_173_000 picoseconds. - Weight::from_parts(75_569_000, 6196) + // Minimum execution time: 65_398_000 picoseconds. + Weight::from_parts(67_140_000, 6196) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -182,8 +173,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `23` // Estimated: `3488` - // Minimum execution time: 9_851_000 picoseconds. - Weight::from_parts(10_087_000, 3488) + // Minimum execution time: 9_653_000 picoseconds. + Weight::from_parts(9_944_000, 3488) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -191,8 +182,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 673_000 picoseconds. - Weight::from_parts(744_000, 0) + // Minimum execution time: 698_000 picoseconds. + Weight::from_parts(759_000, 0) } /// Storage: `XcmPallet::VersionNotifyTargets` (r:1 w:1) /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -208,8 +199,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `147` // Estimated: `3612` - // Minimum execution time: 35_714_000 picoseconds. - Weight::from_parts(36_987_000, 3612) + // Minimum execution time: 31_300_000 picoseconds. + Weight::from_parts(31_989_000, 3612) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(3)) } @@ -219,44 +210,44 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_128_000 picoseconds. - Weight::from_parts(3_364_000, 0) + // Minimum execution time: 2_863_000 picoseconds. + Weight::from_parts(3_027_000, 0) .saturating_add(T::DbWeight::get().writes(1)) } pub(crate) fn burn_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_070_000 picoseconds. - Weight::from_parts(1_188_000, 0) + // Minimum execution time: 1_046_000 picoseconds. + Weight::from_parts(1_125_000, 0) } pub(crate) fn expect_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 764_000 picoseconds. - Weight::from_parts(863_000, 0) + // Minimum execution time: 811_000 picoseconds. + Weight::from_parts(871_000, 0) } pub(crate) fn expect_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 675_000 picoseconds. - Weight::from_parts(755_000, 0) + // Minimum execution time: 707_000 picoseconds. + Weight::from_parts(741_000, 0) } pub(crate) fn expect_error() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 666_000 picoseconds. - Weight::from_parts(745_000, 0) + // Minimum execution time: 687_000 picoseconds. + Weight::from_parts(741_000, 0) } pub(crate) fn expect_transact_status() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 838_000 picoseconds. - Weight::from_parts(918_000, 0) + // Minimum execution time: 861_000 picoseconds. + Weight::from_parts(931_000, 0) } /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -272,8 +263,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `351` // Estimated: `6196` - // Minimum execution time: 82_721_000 picoseconds. - Weight::from_parts(85_411_000, 6196) + // Minimum execution time: 74_622_000 picoseconds. + Weight::from_parts(77_059_000, 6196) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -281,8 +272,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_138_000 picoseconds. - Weight::from_parts(8_344_000, 0) + // Minimum execution time: 7_603_000 picoseconds. + Weight::from_parts(7_871_000, 0) } /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -298,8 +289,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `351` // Estimated: `6196` - // Minimum execution time: 73_617_000 picoseconds. - Weight::from_parts(76_999_000, 6196) + // Minimum execution time: 65_617_000 picoseconds. + Weight::from_parts(66_719_000, 6196) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -307,42 +298,35 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 714_000 picoseconds. - Weight::from_parts(806_000, 0) + // Minimum execution time: 738_000 picoseconds. + Weight::from_parts(779_000, 0) } pub(crate) fn set_topic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 676_000 picoseconds. - Weight::from_parts(720_000, 0) + // Minimum execution time: 688_000 picoseconds. + Weight::from_parts(755_000, 0) } pub(crate) fn clear_topic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 666_000 picoseconds. - Weight::from_parts(731_000, 0) + // Minimum execution time: 684_000 picoseconds. + Weight::from_parts(722_000, 0) } pub(crate) fn set_fees_mode() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 662_000 picoseconds. - Weight::from_parts(696_000, 0) + // Minimum execution time: 694_000 picoseconds. + Weight::from_parts(738_000, 0) } pub(crate) fn unpaid_execution() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 693_000 picoseconds. - Weight::from_parts(760_000, 0) - } - pub(crate) fn alias_origin() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 705_000 picoseconds. - Weight::from_parts(746_000, 0) + // Minimum execution time: 713_000 picoseconds. + Weight::from_parts(776_000, 0) } } diff --git a/polkadot/runtime/westend/src/xcm_config.rs b/polkadot/runtime/westend/src/xcm_config.rs index 3f6a7304c8a9..f8bb2676de3f 100644 --- a/polkadot/runtime/westend/src/xcm_config.rs +++ b/polkadot/runtime/westend/src/xcm_config.rs @@ -38,14 +38,13 @@ use westend_runtime_constants::{ }; use xcm::latest::{prelude::*, WESTEND_GENESIS_HASH}; use xcm_builder::{ - AccountId32Aliases, AliasChildLocation, AllowExplicitUnpaidExecutionFrom, - AllowKnownQueryResponses, AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, - ChildParachainAsNative, ChildParachainConvertsVia, DescribeAllTerminal, DescribeFamily, - FrameTransactionalProcessor, FungibleAdapter, HashedDescription, IsChildSystemParachain, - IsConcrete, MintLocation, OriginToPluralityVoice, SendXcmFeeToAccount, - SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, - TrailingSetTopicAsId, UsingComponents, WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, - XcmFeeManagerFromComponents, + AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowKnownQueryResponses, + AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, ChildParachainAsNative, + ChildParachainConvertsVia, DescribeAllTerminal, DescribeFamily, FrameTransactionalProcessor, + FungibleAdapter, HashedDescription, IsChildSystemParachain, IsConcrete, MintLocation, + OriginToPluralityVoice, SendXcmFeeToAccount, SignedAccountId32AsNative, SignedToAccountId32, + SovereignSignedViaLocation, TakeWeightCredit, TrailingSetTopicAsId, UsingComponents, + WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, XcmFeeManagerFromComponents, }; use xcm_executor::XcmExecutor; @@ -184,11 +183,6 @@ pub type Barrier = TrailingSetTopicAsId<( /// We only waive fees for system functions, which these locations represent. pub type WaivedLocations = (SystemParachains, Equals, LocalPlurality); -/// We let locations alias into child locations of their own. -/// This is a very simple aliasing rule, mimicking the behaviour of -/// the `DescendOrigin` instruction. -pub type Aliasers = AliasChildLocation; - pub struct XcmConfig; impl xcm_executor::Config for XcmConfig { type RuntimeCall = RuntimeCall; @@ -222,7 +216,7 @@ impl xcm_executor::Config for XcmConfig { type UniversalAliases = Nothing; type CallDispatcher = RuntimeCall; type SafeCallFilter = Everything; - type Aliasers = Aliasers; + type Aliasers = Nothing; type TransactionalProcessor = FrameTransactionalProcessor; type HrmpNewChannelOpenRequestHandler = (); type HrmpChannelAcceptedHandler = (); diff --git a/polkadot/statement-table/Cargo.toml b/polkadot/statement-table/Cargo.toml index 1155600f3d0c..53ea0b74463b 100644 --- a/polkadot/statement-table/Cargo.toml +++ b/polkadot/statement-table/Cargo.toml @@ -5,14 +5,12 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "Stores messages other authorities issue about candidates in Polkadot." -homepage.workspace = true -repository.workspace = true [lints] workspace = true [dependencies] codec = { features = ["derive"], workspace = true } -gum = { workspace = true, default-features = true } -polkadot-primitives = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } +gum = { workspace = true, default-features = true } diff --git a/polkadot/statement-table/src/generic.rs b/polkadot/statement-table/src/generic.rs index 4ab6e27d2c74..e3c470fcdeec 100644 --- a/polkadot/statement-table/src/generic.rs +++ b/polkadot/statement-table/src/generic.rs @@ -62,6 +62,14 @@ pub trait Context { fn get_group_size(&self, group: &Self::GroupId) -> Option; } +/// Table configuration. +pub struct Config { + /// When this is true, the table will allow multiple seconded candidates + /// per authority. This flag means that higher-level code is responsible for + /// bounding the number of candidates. + pub allow_multiple_seconded: bool, +} + /// Statements circulated among peers. #[derive(PartialEq, Eq, Debug, Clone, Encode, Decode)] pub enum Statement { @@ -135,6 +143,15 @@ impl DoubleSign { } } +/// Misbehavior: declaring multiple candidates. +#[derive(PartialEq, Eq, Debug, Clone)] +pub struct MultipleCandidates { + /// The first candidate seen. + pub first: (Candidate, Signature), + /// The second candidate seen. + pub second: (Candidate, Signature), +} + /// Misbehavior: submitted statement for wrong group. #[derive(PartialEq, Eq, Debug, Clone)] pub struct UnauthorizedStatement { @@ -148,6 +165,8 @@ pub struct UnauthorizedStatement { pub enum Misbehavior { /// Voted invalid and valid on validity. ValidityDoubleVote(ValidityDoubleVote), + /// Submitted multiple candidates. + MultipleCandidates(MultipleCandidates), /// Submitted a message that was unauthorized. UnauthorizedStatement(UnauthorizedStatement), /// Submitted two valid signatures for the same message. @@ -281,14 +300,17 @@ pub struct Table { authority_data: HashMap>, detected_misbehavior: HashMap>>, candidate_votes: HashMap>, + config: Config, } impl Table { - pub fn new() -> Self { + /// Create a new `Table` from a `Config`. + pub fn new(config: Config) -> Self { Table { authority_data: HashMap::default(), detected_misbehavior: HashMap::default(), candidate_votes: HashMap::default(), + config, } } @@ -386,7 +408,33 @@ impl Table { // if digest is different, fetch candidate and // note misbehavior. let existing = occ.get_mut(); - if existing.proposals.iter().any(|(ref od, _)| od == &digest) { + + if !self.config.allow_multiple_seconded && existing.proposals.len() == 1 { + let (old_digest, old_sig) = &existing.proposals[0]; + + if old_digest != &digest { + const EXISTENCE_PROOF: &str = + "when proposal first received from authority, candidate \ + votes entry is created. proposal here is `Some`, therefore \ + candidate votes entry exists; qed"; + + let old_candidate = self + .candidate_votes + .get(old_digest) + .expect(EXISTENCE_PROOF) + .candidate + .clone(); + + return Err(Misbehavior::MultipleCandidates(MultipleCandidates { + first: (old_candidate, old_sig.clone()), + second: (candidate, signature.clone()), + })) + } + + false + } else if self.config.allow_multiple_seconded && + existing.proposals.iter().any(|(ref od, _)| od == &digest) + { false } else { existing.proposals.push((digest.clone(), signature.clone())); @@ -543,6 +591,14 @@ mod tests { use super::*; use std::collections::HashMap; + fn create_single_seconded() -> Table { + Table::new(Config { allow_multiple_seconded: false }) + } + + fn create_many_seconded() -> Table { + Table::new(Config { allow_multiple_seconded: true }) + } + #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] struct AuthorityId(usize); @@ -590,6 +646,42 @@ mod tests { } } + #[test] + fn submitting_two_candidates_can_be_misbehavior() { + let context = TestContext { + authorities: { + let mut map = HashMap::new(); + map.insert(AuthorityId(1), GroupId(2)); + map + }, + }; + + let mut table = create_single_seconded(); + let statement_a = SignedStatement { + statement: Statement::Seconded(Candidate(2, 100)), + signature: Signature(1), + sender: AuthorityId(1), + }; + + let statement_b = SignedStatement { + statement: Statement::Seconded(Candidate(2, 999)), + signature: Signature(1), + sender: AuthorityId(1), + }; + + table.import_statement(&context, GroupId(2), statement_a); + assert!(!table.detected_misbehavior.contains_key(&AuthorityId(1))); + + table.import_statement(&context, GroupId(2), statement_b); + assert_eq!( + table.detected_misbehavior[&AuthorityId(1)][0], + Misbehavior::MultipleCandidates(MultipleCandidates { + first: (Candidate(2, 100), Signature(1)), + second: (Candidate(2, 999), Signature(1)), + }) + ); + } + #[test] fn submitting_two_candidates_can_be_allowed() { let context = TestContext { @@ -600,7 +692,7 @@ mod tests { }, }; - let mut table = Table::new(); + let mut table = create_many_seconded(); let statement_a = SignedStatement { statement: Statement::Seconded(Candidate(2, 100)), signature: Signature(1), @@ -630,7 +722,7 @@ mod tests { }, }; - let mut table = Table::new(); + let mut table = create_single_seconded(); let statement = SignedStatement { statement: Statement::Seconded(Candidate(2, 100)), signature: Signature(1), @@ -662,7 +754,7 @@ mod tests { }, }; - let mut table = Table::new(); + let mut table = create_single_seconded(); let candidate_a = SignedStatement { statement: Statement::Seconded(Candidate(2, 100)), @@ -706,7 +798,7 @@ mod tests { }, }; - let mut table = Table::new(); + let mut table = create_single_seconded(); let statement = SignedStatement { statement: Statement::Seconded(Candidate(2, 100)), signature: Signature(1), @@ -736,7 +828,7 @@ mod tests { }, }; - let mut table = Table::new(); + let mut table = create_single_seconded(); let statement = SignedStatement { statement: Statement::Seconded(Candidate(2, 100)), signature: Signature(1), @@ -804,7 +896,7 @@ mod tests { }; // have 2/3 validity guarantors note validity. - let mut table = Table::new(); + let mut table = create_single_seconded(); let statement = SignedStatement { statement: Statement::Seconded(Candidate(2, 100)), signature: Signature(1), @@ -838,7 +930,7 @@ mod tests { }, }; - let mut table = Table::new(); + let mut table = create_single_seconded(); let statement = SignedStatement { statement: Statement::Seconded(Candidate(2, 100)), signature: Signature(1), @@ -865,7 +957,7 @@ mod tests { }, }; - let mut table = Table::new(); + let mut table = create_single_seconded(); let statement = SignedStatement { statement: Statement::Seconded(Candidate(2, 100)), signature: Signature(1), diff --git a/polkadot/statement-table/src/lib.rs b/polkadot/statement-table/src/lib.rs index c8ad28437f88..68febf76feb3 100644 --- a/polkadot/statement-table/src/lib.rs +++ b/polkadot/statement-table/src/lib.rs @@ -29,7 +29,7 @@ pub mod generic; -pub use generic::{Context, Table}; +pub use generic::{Config, Context, Table}; /// Concrete instantiations suitable for v2 primitives. pub mod v2 { diff --git a/polkadot/utils/generate-bags/Cargo.toml b/polkadot/utils/generate-bags/Cargo.toml index 3006d8325ef9..16205b0f51f5 100644 --- a/polkadot/utils/generate-bags/Cargo.toml +++ b/polkadot/utils/generate-bags/Cargo.toml @@ -5,8 +5,6 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "CLI to generate voter bags for Polkadot runtimes" -homepage.workspace = true -repository.workspace = true [lints] workspace = true diff --git a/polkadot/utils/remote-ext-tests/bags-list/Cargo.toml b/polkadot/utils/remote-ext-tests/bags-list/Cargo.toml index 1a6c23e0518e..206ca8cf19a9 100644 --- a/polkadot/utils/remote-ext-tests/bags-list/Cargo.toml +++ b/polkadot/utils/remote-ext-tests/bags-list/Cargo.toml @@ -13,10 +13,10 @@ workspace = true westend-runtime = { workspace = true } westend-runtime-constants = { workspace = true, default-features = true } -frame-system = { workspace = true, default-features = true } pallet-bags-list-remote-tests = { workspace = true } -sp-core = { workspace = true, default-features = true } sp-tracing = { workspace = true, default-features = true } +frame-system = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } clap = { features = ["derive"], workspace = true } log = { workspace = true, default-features = true } diff --git a/polkadot/xcm/Cargo.toml b/polkadot/xcm/Cargo.toml index e90354e4e6ac..86c7067ad6fa 100644 --- a/polkadot/xcm/Cargo.toml +++ b/polkadot/xcm/Cargo.toml @@ -5,8 +5,6 @@ version = "7.0.0" authors.workspace = true edition.workspace = true license.workspace = true -homepage.workspace = true -repository.workspace = true [lints] workspace = true @@ -14,23 +12,23 @@ workspace = true [dependencies] array-bytes = { workspace = true, default-features = true } bounded-collections = { features = ["serde"], workspace = true } -codec = { features = ["derive", "max-encoded-len"], workspace = true } derivative = { features = ["use_core"], workspace = true } -environmental = { workspace = true } -frame-support = { workspace = true } -hex-literal = { workspace = true, default-features = true } impl-trait-for-tuples = { workspace = true } log = { workspace = true } +codec = { features = ["derive", "max-encoded-len"], workspace = true } scale-info = { features = ["derive", "serde"], workspace = true } -schemars = { default-features = true, optional = true, workspace = true } -serde = { features = ["alloc", "derive", "rc"], workspace = true } sp-runtime = { workspace = true } sp-weights = { features = ["serde"], workspace = true } +serde = { features = ["alloc", "derive", "rc"], workspace = true } +schemars = { default-features = true, optional = true, workspace = true } xcm-procedural = { workspace = true, default-features = true } +environmental = { workspace = true } +hex-literal = { workspace = true, default-features = true } +frame-support = { workspace = true } [dev-dependencies] -hex = { workspace = true, default-features = true } sp-io = { workspace = true, default-features = true } +hex = { workspace = true, default-features = true } [features] default = ["std"] @@ -51,7 +49,3 @@ json-schema = [ "dep:schemars", "sp-weights/json-schema", ] -runtime-benchmarks = [ - "frame-support/runtime-benchmarks", - "sp-runtime/runtime-benchmarks", -] diff --git a/polkadot/xcm/docs/Cargo.toml b/polkadot/xcm/docs/Cargo.toml index 6fa7ea9a23a9..9d8f4c0a6430 100644 --- a/polkadot/xcm/docs/Cargo.toml +++ b/polkadot/xcm/docs/Cargo.toml @@ -10,30 +10,30 @@ publish = false [dependencies] # For XCM stuff -pallet-xcm = { workspace = true, default-features = true } xcm = { workspace = true, default-features = true } -xcm-builder = { workspace = true, default-features = true } xcm-executor = { workspace = true, default-features = true } +xcm-builder = { workspace = true, default-features = true } xcm-simulator = { workspace = true, default-features = true } +pallet-xcm = { workspace = true, default-features = true } # For building FRAME runtimes -codec = { workspace = true, default-features = true } frame = { features = ["experimental", "runtime"], workspace = true, default-features = true } +codec = { workspace = true, default-features = true } +scale-info = { workspace = true } polkadot-parachain-primitives = { workspace = true, default-features = true } -polkadot-primitives = { workspace = true, default-features = true } polkadot-runtime-parachains = { workspace = true, default-features = true } -scale-info = { workspace = true } -sp-io = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } sp-std = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } # Some pallets -pallet-balances = { workspace = true, default-features = true } pallet-message-queue = { workspace = true, default-features = true } +pallet-balances = { workspace = true, default-features = true } # For building docs -docify = { workspace = true } simple-mermaid = { git = "https://github.com/kianenigma/simple-mermaid.git", branch = "main" } +docify = { workspace = true } [dev-dependencies] test-log = { workspace = true } diff --git a/polkadot/xcm/pallet-xcm-benchmarks/Cargo.toml b/polkadot/xcm/pallet-xcm-benchmarks/Cargo.toml index 5d5926ae01e0..b07bdfdca3d1 100644 --- a/polkadot/xcm/pallet-xcm-benchmarks/Cargo.toml +++ b/polkadot/xcm/pallet-xcm-benchmarks/Cargo.toml @@ -5,8 +5,6 @@ edition.workspace = true license.workspace = true version = "7.0.0" description = "Benchmarks for the XCM pallet" -homepage.workspace = true -repository.workspace = true [lints] workspace = true @@ -16,20 +14,20 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { workspace = true } -frame-benchmarking = { workspace = true } +scale-info = { features = ["derive"], workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } -log = { workspace = true, default-features = true } -scale-info = { features = ["derive"], workspace = true } -sp-io = { workspace = true } sp-runtime = { workspace = true } +sp-io = { workspace = true } +xcm-executor = { workspace = true } +frame-benchmarking = { workspace = true } xcm = { workspace = true } xcm-builder = { workspace = true } -xcm-executor = { workspace = true } +log = { workspace = true, default-features = true } [dev-dependencies] -pallet-assets = { workspace = true, default-features = true } pallet-balances = { workspace = true, default-features = true } +pallet-assets = { workspace = true, default-features = true } sp-tracing = { workspace = true, default-features = true } xcm = { workspace = true, default-features = true } # temp @@ -64,5 +62,4 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", - "xcm/runtime-benchmarks", ] diff --git a/polkadot/xcm/pallet-xcm-benchmarks/src/fungible/benchmarking.rs b/polkadot/xcm/pallet-xcm-benchmarks/src/fungible/benchmarking.rs index 4428076aa077..303ff9493f71 100644 --- a/polkadot/xcm/pallet-xcm-benchmarks/src/fungible/benchmarking.rs +++ b/polkadot/xcm/pallet-xcm-benchmarks/src/fungible/benchmarking.rs @@ -231,13 +231,6 @@ benchmarks_instance_pallet! { let dest_account = T::AccountIdConverter::convert_location(&dest_location).unwrap(); assert!(T::TransactAsset::balance(&dest_account).is_zero()); - // Ensure that origin can send to destination (e.g. setup delivery fees, ensure router setup, ...) - let (_, _) = T::DeliveryHelper::ensure_successful_delivery( - &Default::default(), - &dest_location, - FeeReason::ChargeFees, - ); - let mut executor = new_executor::(Default::default()); executor.set_holding(holding.into()); let instruction = Instruction::>::DepositAsset { @@ -264,13 +257,6 @@ benchmarks_instance_pallet! { let dest_account = T::AccountIdConverter::convert_location(&dest_location).unwrap(); assert!(T::TransactAsset::balance(&dest_account).is_zero()); - // Ensure that origin can send to destination (e.g. setup delivery fees, ensure router setup, ...) - let (_, _) = T::DeliveryHelper::ensure_successful_delivery( - &Default::default(), - &dest_location, - FeeReason::ChargeFees, - ); - let mut executor = new_executor::(Default::default()); executor.set_holding(holding.into()); let instruction = Instruction::>::DepositReserveAsset { @@ -295,20 +281,12 @@ benchmarks_instance_pallet! { // Checked account starts at zero assert!(T::CheckedAccount::get().map_or(true, |(c, _)| T::TransactAsset::balance(&c).is_zero())); - let dest_location = T::valid_destination()?; - - // Ensure that origin can send to destination (e.g. setup delivery fees, ensure router setup, ...) - let (_, _) = T::DeliveryHelper::ensure_successful_delivery( - &Default::default(), - &dest_location, - FeeReason::ChargeFees, - ); let mut executor = new_executor::(Default::default()); executor.set_holding(holding.into()); let instruction = Instruction::>::InitiateTeleport { assets: asset.into(), - dest: dest_location, + dest: T::valid_destination()?, xcm: Xcm::new(), }; let xcm = Xcm(vec![instruction]); @@ -325,15 +303,6 @@ benchmarks_instance_pallet! { let (sender_account, sender_location) = account_and_location::(1); let asset = T::get_asset(); let mut holding = T::worst_case_holding(1); - let dest_location = T::valid_destination()?; - - // Ensure that origin can send to destination (e.g. setup delivery fees, ensure router setup, ...) - let (_, _) = T::DeliveryHelper::ensure_successful_delivery( - &sender_location, - &dest_location, - FeeReason::ChargeFees, - ); - let sender_account_balance_before = T::TransactAsset::balance(&sender_account); // Add our asset to the holding. @@ -342,7 +311,7 @@ benchmarks_instance_pallet! { let mut executor = new_executor::(sender_location); executor.set_holding(holding.into()); let instruction = Instruction::>::InitiateTransfer { - destination: dest_location, + destination: T::valid_destination()?, // ReserveDeposit is the most expensive filter. remote_fees: Some(AssetTransferFilter::ReserveDeposit(asset.clone().into())), // It's more expensive if we reanchor the origin. diff --git a/polkadot/xcm/pallet-xcm-benchmarks/src/generic/benchmarking.rs b/polkadot/xcm/pallet-xcm-benchmarks/src/generic/benchmarking.rs index 1c62bb5886d8..0d80ef89a1ce 100644 --- a/polkadot/xcm/pallet-xcm-benchmarks/src/generic/benchmarking.rs +++ b/polkadot/xcm/pallet-xcm-benchmarks/src/generic/benchmarking.rs @@ -13,14 +13,13 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -#![cfg(feature = "runtime-benchmarks")] use super::*; use crate::{account_and_location, new_executor, EnsureDelivery, XcmCallOf}; use alloc::{vec, vec::Vec}; use codec::Encode; -use frame_benchmarking::v2::*; -use frame_support::{traits::fungible::Inspect, BoundedVec}; +use frame_benchmarking::{benchmarks, BenchmarkError}; +use frame_support::traits::fungible::Inspect; use xcm::{ latest::{prelude::*, MaxDispatchErrorLen, MaybeErrorCode, Weight, MAX_ITEMS_IN_ASSETS}, DoubleEncoded, @@ -30,21 +29,16 @@ use xcm_executor::{ ExecutorError, FeesMode, }; -#[benchmarks] -mod benchmarks { - use super::*; - - #[benchmark] - fn report_holding() -> Result<(), BenchmarkError> { +benchmarks! { + report_holding { let (sender_account, sender_location) = account_and_location::(1); let destination = T::valid_destination().map_err(|_| BenchmarkError::Skip)?; - let (expected_fees_mode, expected_assets_in_holding) = - T::DeliveryHelper::ensure_successful_delivery( - &sender_location, - &destination, - FeeReason::Report, - ); + let (expected_fees_mode, expected_assets_in_holding) = T::DeliveryHelper::ensure_successful_delivery( + &sender_location, + &destination, + FeeReason::Report, + ); let sender_account_balance_before = T::TransactAsset::balance(&sender_account); // generate holding and add possible required fees @@ -70,33 +64,21 @@ mod benchmarks { query_id: Default::default(), max_weight: Weight::MAX, }, - // Worst case is looking through all holdings for every asset explicitly - respecting - // the limit `MAX_ITEMS_IN_ASSETS`. - assets: Definite( - holding - .into_inner() - .into_iter() - .take(MAX_ITEMS_IN_ASSETS) - .collect::>() - .into(), - ), + // Worst case is looking through all holdings for every asset explicitly - respecting the limit `MAX_ITEMS_IN_ASSETS`. + assets: Definite(holding.into_inner().into_iter().take(MAX_ITEMS_IN_ASSETS).collect::>().into()), }; let xcm = Xcm(vec![instruction]); - #[block] - { - executor.bench_process(xcm)?; - } + } : { + executor.bench_process(xcm)?; + } verify { // Check we charged the delivery fees assert!(T::TransactAsset::balance(&sender_account) <= sender_account_balance_before); - - Ok(()) } // This benchmark does not use any additional orders or instructions. This should be managed // by the `deep` and `shallow` implementation. - #[benchmark] - fn buy_execution() -> Result<(), BenchmarkError> { + buy_execution { let holding = T::worst_case_holding(0).into(); let mut executor = new_executor::(Default::default()); @@ -110,16 +92,13 @@ mod benchmarks { }; let xcm = Xcm(vec![instruction]); - #[block] - { - executor.bench_process(xcm)?; - } + } : { + executor.bench_process(xcm)?; + } verify { - Ok(()) } - #[benchmark] - fn pay_fees() -> Result<(), BenchmarkError> { + pay_fees { let holding = T::worst_case_holding(0).into(); let mut executor = new_executor::(Default::default()); @@ -132,57 +111,40 @@ mod benchmarks { let instruction = Instruction::>::PayFees { asset: fee_asset }; let xcm = Xcm(vec![instruction]); - #[block] - { - executor.bench_process(xcm)?; - } - Ok(()) - } + } : { + executor.bench_process(xcm)?; + } verify {} - #[benchmark] - fn asset_claimer() -> Result<(), BenchmarkError> { + set_asset_claimer { let mut executor = new_executor::(Default::default()); let (_, sender_location) = account_and_location::(1); - let instruction = Instruction::SetHints { - hints: BoundedVec::::truncate_from(vec![AssetClaimer { - location: sender_location.clone(), - }]), - }; + let instruction = Instruction::SetAssetClaimer{ location:sender_location.clone() }; let xcm = Xcm(vec![instruction]); - #[block] - { - executor.bench_process(xcm)?; - } + }: { + executor.bench_process(xcm)?; + } verify { assert_eq!(executor.asset_claimer(), Some(sender_location.clone())); - - Ok(()) } - #[benchmark] - fn query_response() -> Result<(), BenchmarkError> { + query_response { let mut executor = new_executor::(Default::default()); let (query_id, response) = T::worst_case_response(); let max_weight = Weight::MAX; let querier: Option = Some(Here.into()); let instruction = Instruction::QueryResponse { query_id, response, max_weight, querier }; let xcm = Xcm(vec![instruction]); - - #[block] - { - executor.bench_process(xcm)?; - } + }: { + executor.bench_process(xcm)?; + } verify { // The assert above is enough to show this XCM succeeded - - Ok(()) } // We don't care about the call itself, since that is accounted for in the weight parameter // and included in the final weight calculation. So this is just the overhead of submitting // a noop call. - #[benchmark] - fn transact() -> Result<(), BenchmarkError> { + transact { let (origin, noop_call) = T::transact_origin_and_runtime_call()?; let mut executor = new_executor::(origin); let double_encoded_noop_call: DoubleEncoded<_> = noop_call.encode().into(); @@ -190,148 +152,104 @@ mod benchmarks { let instruction = Instruction::Transact { origin_kind: OriginKind::SovereignAccount, call: double_encoded_noop_call, - fallback_max_weight: None, }; let xcm = Xcm(vec![instruction]); - #[block] - { - executor.bench_process(xcm)?; - } + }: { + executor.bench_process(xcm)?; + } verify { // TODO Make the assertion configurable? - - Ok(()) } - #[benchmark] - fn refund_surplus() -> Result<(), BenchmarkError> { + refund_surplus { let mut executor = new_executor::(Default::default()); let holding_assets = T::worst_case_holding(1); // We can already buy execution since we'll load the holding register manually let asset_for_fees = T::fee_asset().unwrap(); - let previous_xcm = Xcm(vec![BuyExecution { - fees: asset_for_fees, - weight_limit: Limited(Weight::from_parts(1337, 1337)), - }]); + let previous_xcm = Xcm(vec![BuyExecution { fees: asset_for_fees, weight_limit: Limited(Weight::from_parts(1337, 1337)) }]); executor.set_holding(holding_assets.into()); executor.set_total_surplus(Weight::from_parts(1337, 1337)); executor.set_total_refunded(Weight::zero()); - executor - .bench_process(previous_xcm) - .expect("Holding has been loaded, so we can buy execution here"); + executor.bench_process(previous_xcm).expect("Holding has been loaded, so we can buy execution here"); let instruction = Instruction::>::RefundSurplus; let xcm = Xcm(vec![instruction]); - #[block] - { - let _result = executor.bench_process(xcm)?; - } + } : { + let result = executor.bench_process(xcm)?; + } verify { assert_eq!(executor.total_surplus(), &Weight::from_parts(1337, 1337)); assert_eq!(executor.total_refunded(), &Weight::from_parts(1337, 1337)); - - Ok(()) } - #[benchmark] - fn set_error_handler() -> Result<(), BenchmarkError> { + set_error_handler { let mut executor = new_executor::(Default::default()); let instruction = Instruction::>::SetErrorHandler(Xcm(vec![])); let xcm = Xcm(vec![instruction]); - #[block] - { - executor.bench_process(xcm)?; - } + } : { + executor.bench_process(xcm)?; + } verify { assert_eq!(executor.error_handler(), &Xcm(vec![])); - - Ok(()) } - #[benchmark] - fn set_appendix() -> Result<(), BenchmarkError> { + set_appendix { let mut executor = new_executor::(Default::default()); let appendix = Xcm(vec![]); let instruction = Instruction::>::SetAppendix(appendix); let xcm = Xcm(vec![instruction]); - #[block] - { - executor.bench_process(xcm)?; - } + } : { + executor.bench_process(xcm)?; + } verify { assert_eq!(executor.appendix(), &Xcm(vec![])); - Ok(()) } - #[benchmark] - fn clear_error() -> Result<(), BenchmarkError> { + clear_error { let mut executor = new_executor::(Default::default()); executor.set_error(Some((5u32, XcmError::Overflow))); let instruction = Instruction::>::ClearError; let xcm = Xcm(vec![instruction]); - #[block] - { - executor.bench_process(xcm)?; - } - assert!(executor.error().is_none()); - Ok(()) + } : { + executor.bench_process(xcm)?; + } verify { + assert!(executor.error().is_none()) } - #[benchmark] - fn descend_origin() -> Result<(), BenchmarkError> { + descend_origin { let mut executor = new_executor::(Default::default()); let who = Junctions::from([OnlyChild, OnlyChild]); let instruction = Instruction::DescendOrigin(who.clone()); let xcm = Xcm(vec![instruction]); - #[block] - { - executor.bench_process(xcm)?; - } - assert_eq!(executor.origin(), &Some(Location { parents: 0, interior: who }),); - - Ok(()) - } - - #[benchmark] - fn execute_with_origin() -> Result<(), BenchmarkError> { - let mut executor = new_executor::(Default::default()); - let who: Junctions = Junctions::from([AccountId32 { id: [0u8; 32], network: None }]); - let instruction = Instruction::ExecuteWithOrigin { - descendant_origin: Some(who.clone()), - xcm: Xcm(vec![]), - }; - let xcm = Xcm(vec![instruction]); - #[block] - { - executor.bench_process(xcm)?; - } - assert_eq!(executor.origin(), &Some(Location { parents: 0, interior: Here }),); - - Ok(()) + } : { + executor.bench_process(xcm)?; + } verify { + assert_eq!( + executor.origin(), + &Some(Location { + parents: 0, + interior: who, + }), + ); } - #[benchmark] - fn clear_origin() -> Result<(), BenchmarkError> { + clear_origin { let mut executor = new_executor::(Default::default()); let instruction = Instruction::ClearOrigin; let xcm = Xcm(vec![instruction]); - #[block] - { - executor.bench_process(xcm)?; - } + } : { + executor.bench_process(xcm)?; + } verify { assert_eq!(executor.origin(), &None); - Ok(()) } - #[benchmark] - fn report_error() -> Result<(), BenchmarkError> { + report_error { let (sender_account, sender_location) = account_and_location::(1); let query_id = Default::default(); let max_weight = Default::default(); let destination = T::valid_destination().map_err(|_| BenchmarkError::Skip)?; - let (expected_fees_mode, expected_assets_in_holding) = - T::DeliveryHelper::ensure_successful_delivery( - &sender_location, - &destination, - FeeReason::Report, - ); + let (expected_fees_mode, expected_assets_in_holding) = T::DeliveryHelper::ensure_successful_delivery( + &sender_location, + &destination, + FeeReason::Report, + ); let sender_account_balance_before = T::TransactAsset::balance(&sender_account); let mut executor = new_executor::(sender_location); @@ -343,21 +261,18 @@ mod benchmarks { } executor.set_error(Some((0u32, XcmError::Unimplemented))); - let instruction = - Instruction::ReportError(QueryResponseInfo { query_id, destination, max_weight }); + let instruction = Instruction::ReportError(QueryResponseInfo { + query_id, destination, max_weight + }); let xcm = Xcm(vec![instruction]); - #[block] - { - executor.bench_process(xcm)?; - } + }: { + executor.bench_process(xcm)?; + } verify { // Check we charged the delivery fees assert!(T::TransactAsset::balance(&sender_account) <= sender_account_balance_before); - - Ok(()) } - #[benchmark] - fn claim_asset() -> Result<(), BenchmarkError> { + claim_asset { use xcm_executor::traits::DropAssets; let (origin, ticket, assets) = T::claimable_asset()?; @@ -366,7 +281,11 @@ mod benchmarks { ::AssetTrap::drop_assets( &origin, assets.clone().into(), - &XcmContext { origin: Some(origin.clone()), message_id: [0; 32], topic: None }, + &XcmContext { + origin: Some(origin.clone()), + message_id: [0; 32], + topic: None, + }, ); // Assets should be in the trap now. @@ -374,32 +293,28 @@ mod benchmarks { let mut executor = new_executor::(origin); let instruction = Instruction::ClaimAsset { assets: assets.clone(), ticket }; let xcm = Xcm(vec![instruction]); - #[block] - { - executor.bench_process(xcm)?; - } + } :{ + executor.bench_process(xcm)?; + } verify { assert!(executor.holding().ensure_contains(&assets).is_ok()); - Ok(()) } - #[benchmark] - fn trap() -> Result<(), BenchmarkError> { + trap { let mut executor = new_executor::(Default::default()); let instruction = Instruction::Trap(10); let xcm = Xcm(vec![instruction]); // In order to access result in the verification below, it needs to be defined here. - let result; - #[block] - { - result = executor.bench_process(xcm); - } - assert!(matches!(result, Err(ExecutorError { xcm_error: XcmError::Trap(10), .. }))); - - Ok(()) + let mut _result = Ok(()); + } : { + _result = executor.bench_process(xcm); + } verify { + assert!(matches!(_result, Err(ExecutorError { + xcm_error: XcmError::Trap(10), + .. + }))); } - #[benchmark] - fn subscribe_version() -> Result<(), BenchmarkError> { + subscribe_version { use xcm_executor::traits::VersionChangeNotifier; let origin = T::subscribe_origin()?; let query_id = Default::default(); @@ -407,55 +322,40 @@ mod benchmarks { let mut executor = new_executor::(origin.clone()); let instruction = Instruction::SubscribeVersion { query_id, max_response_weight }; let xcm = Xcm(vec![instruction]); - - T::DeliveryHelper::ensure_successful_delivery(&origin, &origin, FeeReason::QueryPallet); - - #[block] - { - executor.bench_process(xcm)?; - } - assert!(::SubscriptionService::is_subscribed( - &origin - )); - Ok(()) + } : { + executor.bench_process(xcm)?; + } verify { + assert!(::SubscriptionService::is_subscribed(&origin)); } - #[benchmark] - fn unsubscribe_version() -> Result<(), BenchmarkError> { + unsubscribe_version { use xcm_executor::traits::VersionChangeNotifier; // First we need to subscribe to notifications. let (origin, _) = T::transact_origin_and_runtime_call()?; - - T::DeliveryHelper::ensure_successful_delivery(&origin, &origin, FeeReason::QueryPallet); - let query_id = Default::default(); let max_response_weight = Default::default(); ::SubscriptionService::start( &origin, query_id, max_response_weight, - &XcmContext { origin: Some(origin.clone()), message_id: [0; 32], topic: None }, - ) - .map_err(|_| "Could not start subscription")?; - assert!(::SubscriptionService::is_subscribed( - &origin - )); + &XcmContext { + origin: Some(origin.clone()), + message_id: [0; 32], + topic: None, + }, + ).map_err(|_| "Could not start subscription")?; + assert!(::SubscriptionService::is_subscribed(&origin)); let mut executor = new_executor::(origin.clone()); let instruction = Instruction::UnsubscribeVersion; let xcm = Xcm(vec![instruction]); - #[block] - { - executor.bench_process(xcm)?; - } - assert!(!::SubscriptionService::is_subscribed( - &origin - )); - Ok(()) + } : { + executor.bench_process(xcm)?; + } verify { + assert!(!::SubscriptionService::is_subscribed(&origin)); } - #[benchmark] - fn burn_asset() -> Result<(), BenchmarkError> { + burn_asset { let holding = T::worst_case_holding(0); let assets = holding.clone(); @@ -464,16 +364,13 @@ mod benchmarks { let instruction = Instruction::BurnAsset(assets.into()); let xcm = Xcm(vec![instruction]); - #[block] - { - executor.bench_process(xcm)?; - } + }: { + executor.bench_process(xcm)?; + } verify { assert!(executor.holding().is_empty()); - Ok(()) } - #[benchmark] - fn expect_asset() -> Result<(), BenchmarkError> { + expect_asset { let holding = T::worst_case_holding(0); let assets = holding.clone(); @@ -482,86 +379,71 @@ mod benchmarks { let instruction = Instruction::ExpectAsset(assets.into()); let xcm = Xcm(vec![instruction]); - #[block] - { - executor.bench_process(xcm)?; - } + }: { + executor.bench_process(xcm)?; + } verify { // `execute` completing successfully is as good as we can check. - - Ok(()) } - #[benchmark] - fn expect_origin() -> Result<(), BenchmarkError> { + expect_origin { let expected_origin = Parent.into(); let mut executor = new_executor::(Default::default()); let instruction = Instruction::ExpectOrigin(Some(expected_origin)); let xcm = Xcm(vec![instruction]); let mut _result = Ok(()); - #[block] - { - _result = executor.bench_process(xcm); - } - assert!(matches!( - _result, - Err(ExecutorError { xcm_error: XcmError::ExpectationFalse, .. }) - )); - - Ok(()) + }: { + _result = executor.bench_process(xcm); + } verify { + assert!(matches!(_result, Err(ExecutorError { + xcm_error: XcmError::ExpectationFalse, + .. + }))); } - #[benchmark] - fn expect_error() -> Result<(), BenchmarkError> { + expect_error { let mut executor = new_executor::(Default::default()); executor.set_error(Some((3u32, XcmError::Overflow))); let instruction = Instruction::ExpectError(None); let xcm = Xcm(vec![instruction]); let mut _result = Ok(()); - #[block] - { - _result = executor.bench_process(xcm); - } - assert!(matches!( - _result, - Err(ExecutorError { xcm_error: XcmError::ExpectationFalse, .. }) - )); - - Ok(()) + }: { + _result = executor.bench_process(xcm); + } verify { + assert!(matches!(_result, Err(ExecutorError { + xcm_error: XcmError::ExpectationFalse, + .. + }))); } - #[benchmark] - fn expect_transact_status() -> Result<(), BenchmarkError> { + expect_transact_status { let mut executor = new_executor::(Default::default()); - let worst_error = - || -> MaybeErrorCode { vec![0; MaxDispatchErrorLen::get() as usize].into() }; + let worst_error = || -> MaybeErrorCode { + vec![0; MaxDispatchErrorLen::get() as usize].into() + }; executor.set_transact_status(worst_error()); let instruction = Instruction::ExpectTransactStatus(worst_error()); let xcm = Xcm(vec![instruction]); let mut _result = Ok(()); - #[block] - { - _result = executor.bench_process(xcm); - } + }: { + _result = executor.bench_process(xcm); + } verify { assert!(matches!(_result, Ok(..))); - Ok(()) } - #[benchmark] - fn query_pallet() -> Result<(), BenchmarkError> { + query_pallet { let (sender_account, sender_location) = account_and_location::(1); let query_id = Default::default(); let destination = T::valid_destination().map_err(|_| BenchmarkError::Skip)?; let max_weight = Default::default(); - let (expected_fees_mode, expected_assets_in_holding) = - T::DeliveryHelper::ensure_successful_delivery( - &sender_location, - &destination, - FeeReason::QueryPallet, - ); + let (expected_fees_mode, expected_assets_in_holding) = T::DeliveryHelper::ensure_successful_delivery( + &sender_location, + &destination, + FeeReason::QueryPallet, + ); let sender_account_balance_before = T::TransactAsset::balance(&sender_account); let mut executor = new_executor::(sender_location); if let Some(expected_fees_mode) = expected_fees_mode { @@ -577,19 +459,15 @@ mod benchmarks { response_info: QueryResponseInfo { destination, query_id, max_weight }, }; let xcm = Xcm(vec![instruction]); - #[block] - { - executor.bench_process(xcm)?; - } + }: { + executor.bench_process(xcm)?; + } verify { // Check we charged the delivery fees assert!(T::TransactAsset::balance(&sender_account) <= sender_account_balance_before); // TODO: Potentially add new trait to XcmSender to detect a queued outgoing message. #4426 - - Ok(()) } - #[benchmark] - fn expect_pallet() -> Result<(), BenchmarkError> { + expect_pallet { let mut executor = new_executor::(Default::default()); let valid_pallet = T::valid_pallet(); let instruction = Instruction::ExpectPallet { @@ -600,27 +478,23 @@ mod benchmarks { min_crate_minor: valid_pallet.crate_version.minor.into(), }; let xcm = Xcm(vec![instruction]); - #[block] - { - executor.bench_process(xcm)?; - } + }: { + executor.bench_process(xcm)?; + } verify { // the execution succeeding is all we need to verify this xcm was successful - Ok(()) } - #[benchmark] - fn report_transact_status() -> Result<(), BenchmarkError> { + report_transact_status { let (sender_account, sender_location) = account_and_location::(1); let query_id = Default::default(); let destination = T::valid_destination().map_err(|_| BenchmarkError::Skip)?; let max_weight = Default::default(); - let (expected_fees_mode, expected_assets_in_holding) = - T::DeliveryHelper::ensure_successful_delivery( - &sender_location, - &destination, - FeeReason::Report, - ); + let (expected_fees_mode, expected_assets_in_holding) = T::DeliveryHelper::ensure_successful_delivery( + &sender_location, + &destination, + FeeReason::Report, + ); let sender_account_balance_before = T::TransactAsset::balance(&sender_account); let mut executor = new_executor::(sender_location); @@ -638,102 +512,84 @@ mod benchmarks { max_weight, }); let xcm = Xcm(vec![instruction]); - #[block] - { - executor.bench_process(xcm)?; - } + }: { + executor.bench_process(xcm)?; + } verify { // Check we charged the delivery fees assert!(T::TransactAsset::balance(&sender_account) <= sender_account_balance_before); // TODO: Potentially add new trait to XcmSender to detect a queued outgoing message. #4426 - Ok(()) } - #[benchmark] - fn clear_transact_status() -> Result<(), BenchmarkError> { + clear_transact_status { let mut executor = new_executor::(Default::default()); executor.set_transact_status(b"MyError".to_vec().into()); let instruction = Instruction::ClearTransactStatus; let xcm = Xcm(vec![instruction]); - #[block] - { - executor.bench_process(xcm)?; - } + }: { + executor.bench_process(xcm)?; + } verify { assert_eq!(executor.transact_status(), &MaybeErrorCode::Success); - Ok(()) } - #[benchmark] - fn set_topic() -> Result<(), BenchmarkError> { + set_topic { let mut executor = new_executor::(Default::default()); let instruction = Instruction::SetTopic([1; 32]); let xcm = Xcm(vec![instruction]); - #[block] - { - executor.bench_process(xcm)?; - } + }: { + executor.bench_process(xcm)?; + } verify { assert_eq!(executor.topic(), &Some([1; 32])); - Ok(()) } - #[benchmark] - fn clear_topic() -> Result<(), BenchmarkError> { + clear_topic { let mut executor = new_executor::(Default::default()); executor.set_topic(Some([2; 32])); let instruction = Instruction::ClearTopic; let xcm = Xcm(vec![instruction]); - #[block] - { - executor.bench_process(xcm)?; - } + }: { + executor.bench_process(xcm)?; + } verify { assert_eq!(executor.topic(), &None); - Ok(()) } - #[benchmark] - fn exchange_asset() -> Result<(), BenchmarkError> { + exchange_asset { let (give, want) = T::worst_case_asset_exchange().map_err(|_| BenchmarkError::Skip)?; let assets = give.clone(); let mut executor = new_executor::(Default::default()); executor.set_holding(give.into()); - let instruction = - Instruction::ExchangeAsset { give: assets.into(), want: want.clone(), maximal: true }; + let instruction = Instruction::ExchangeAsset { + give: assets.into(), + want: want.clone(), + maximal: true, + }; let xcm = Xcm(vec![instruction]); - #[block] - { - executor.bench_process(xcm)?; - } + }: { + executor.bench_process(xcm)?; + } verify { assert_eq!(executor.holding(), &want.into()); - Ok(()) } - #[benchmark] - fn universal_origin() -> Result<(), BenchmarkError> { + universal_origin { let (origin, alias) = T::universal_alias().map_err(|_| BenchmarkError::Skip)?; let mut executor = new_executor::(origin); let instruction = Instruction::UniversalOrigin(alias); let xcm = Xcm(vec![instruction]); - #[block] - { - executor.bench_process(xcm)?; - } + }: { + executor.bench_process(xcm)?; + } verify { use frame_support::traits::Get; let universal_location = ::UniversalLocation::get(); - assert_eq!( - executor.origin(), - &Some(Junctions::from([alias]).relative_to(&universal_location)) - ); - - Ok(()) + assert_eq!(executor.origin(), &Some(Junctions::from([alias]).relative_to(&universal_location))); } - #[benchmark] - fn export_message(x: Linear<1, 1000>) -> Result<(), BenchmarkError> { + export_message { + let x in 1 .. 1000; // The `inner_xcm` influences `ExportMessage` total weight based on // `inner_xcm.encoded_size()`, so for this benchmark use smallest encoded instruction // to approximate weight per "unit" of encoded size; then actual weight can be estimated @@ -743,12 +599,11 @@ mod benchmarks { // Get `origin`, `network` and `destination` from configured runtime. let (origin, network, destination) = T::export_message_origin_and_destination()?; - let (expected_fees_mode, expected_assets_in_holding) = - T::DeliveryHelper::ensure_successful_delivery( - &origin, - &destination.clone().into(), - FeeReason::Export { network, destination: destination.clone() }, - ); + let (expected_fees_mode, expected_assets_in_holding) = T::DeliveryHelper::ensure_successful_delivery( + &origin, + &destination.clone().into(), + FeeReason::Export { network, destination: destination.clone() }, + ); let sender_account = T::AccountIdConverter::convert_location(&origin).unwrap(); let sender_account_balance_before = T::TransactAsset::balance(&sender_account); @@ -759,39 +614,37 @@ mod benchmarks { if let Some(expected_assets_in_holding) = expected_assets_in_holding { executor.set_holding(expected_assets_in_holding.into()); } - let xcm = - Xcm(vec![ExportMessage { network, destination: destination.clone(), xcm: inner_xcm }]); - #[block] - { - executor.bench_process(xcm)?; - } + let xcm = Xcm(vec![ExportMessage { + network, destination: destination.clone(), xcm: inner_xcm, + }]); + }: { + executor.bench_process(xcm)?; + } verify { // Check we charged the delivery fees assert!(T::TransactAsset::balance(&sender_account) <= sender_account_balance_before); // TODO: Potentially add new trait to XcmSender to detect a queued outgoing message. #4426 - Ok(()) } - #[benchmark] - fn set_fees_mode() -> Result<(), BenchmarkError> { + set_fees_mode { let mut executor = new_executor::(Default::default()); executor.set_fees_mode(FeesMode { jit_withdraw: false }); let instruction = Instruction::SetFeesMode { jit_withdraw: true }; let xcm = Xcm(vec![instruction]); - #[block] - { - executor.bench_process(xcm)?; - } + }: { + executor.bench_process(xcm)?; + } verify { assert_eq!(executor.fees_mode(), &FeesMode { jit_withdraw: true }); - Ok(()) } - #[benchmark] - fn lock_asset() -> Result<(), BenchmarkError> { + lock_asset { let (unlocker, owner, asset) = T::unlockable_asset()?; - let (expected_fees_mode, expected_assets_in_holding) = - T::DeliveryHelper::ensure_successful_delivery(&owner, &unlocker, FeeReason::LockAsset); + let (expected_fees_mode, expected_assets_in_holding) = T::DeliveryHelper::ensure_successful_delivery( + &owner, + &unlocker, + FeeReason::LockAsset, + ); let sender_account = T::AccountIdConverter::convert_location(&owner).unwrap(); let sender_account_balance_before = T::TransactAsset::balance(&sender_account); @@ -811,18 +664,15 @@ mod benchmarks { let instruction = Instruction::LockAsset { asset, unlocker }; let xcm = Xcm(vec![instruction]); - #[block] - { - executor.bench_process(xcm)?; - } + }: { + executor.bench_process(xcm)?; + } verify { // Check delivery fees assert!(T::TransactAsset::balance(&sender_account) <= sender_account_balance_before); // TODO: Potentially add new trait to XcmSender to detect a queued outgoing message. #4426 - Ok(()) } - #[benchmark] - fn unlock_asset() -> Result<(), BenchmarkError> { + unlock_asset { use xcm_executor::traits::{AssetLock, Enact}; let (unlocker, owner, asset) = T::unlockable_asset()?; @@ -842,15 +692,13 @@ mod benchmarks { // ... then unlock them with the UnlockAsset instruction. let instruction = Instruction::UnlockAsset { asset, target: owner }; let xcm = Xcm(vec![instruction]); - #[block] - { - executor.bench_process(xcm)?; - } - Ok(()) + }: { + executor.bench_process(xcm)?; + } verify { + } - #[benchmark] - fn note_unlockable() -> Result<(), BenchmarkError> { + note_unlockable { use xcm_executor::traits::{AssetLock, Enact}; let (unlocker, owner, asset) = T::unlockable_asset()?; @@ -870,15 +718,13 @@ mod benchmarks { // ... then note them as unlockable with the NoteUnlockable instruction. let instruction = Instruction::NoteUnlockable { asset, owner }; let xcm = Xcm(vec![instruction]); - #[block] - { - executor.bench_process(xcm)?; - } - Ok(()) + }: { + executor.bench_process(xcm)?; + } verify { + } - #[benchmark] - fn request_unlock() -> Result<(), BenchmarkError> { + request_unlock { use xcm_executor::traits::{AssetLock, Enact}; let (locker, owner, asset) = T::unlockable_asset()?; @@ -893,12 +739,11 @@ mod benchmarks { .enact() .map_err(|_| BenchmarkError::Skip)?; - let (expected_fees_mode, expected_assets_in_holding) = - T::DeliveryHelper::ensure_successful_delivery( - &owner, - &locker, - FeeReason::RequestUnlock, - ); + let (expected_fees_mode, expected_assets_in_holding) = T::DeliveryHelper::ensure_successful_delivery( + &owner, + &locker, + FeeReason::RequestUnlock, + ); let sender_account = T::AccountIdConverter::convert_location(&owner).unwrap(); let sender_account_balance_before = T::TransactAsset::balance(&sender_account); @@ -912,18 +757,15 @@ mod benchmarks { } let instruction = Instruction::RequestUnlock { asset, locker }; let xcm = Xcm(vec![instruction]); - #[block] - { - executor.bench_process(xcm)?; - } + }: { + executor.bench_process(xcm)?; + } verify { // Check we charged the delivery fees assert!(T::TransactAsset::balance(&sender_account) <= sender_account_balance_before); // TODO: Potentially add new trait to XcmSender to detect a queued outgoing message. #4426 - Ok(()) } - #[benchmark] - fn unpaid_execution() -> Result<(), BenchmarkError> { + unpaid_execution { let mut executor = new_executor::(Default::default()); executor.set_origin(Some(Here.into())); @@ -933,27 +775,21 @@ mod benchmarks { }; let xcm = Xcm(vec![instruction]); - #[block] - { - executor.bench_process(xcm)?; - } - Ok(()) + }: { + executor.bench_process(xcm)?; } - #[benchmark] - fn alias_origin() -> Result<(), BenchmarkError> { + alias_origin { let (origin, target) = T::alias_origin().map_err(|_| BenchmarkError::Skip)?; let mut executor = new_executor::(origin); let instruction = Instruction::AliasOrigin(target.clone()); let xcm = Xcm(vec![instruction]); - #[block] - { - executor.bench_process(xcm)?; - } + }: { + executor.bench_process(xcm)?; + } verify { assert_eq!(executor.origin(), &Some(target)); - Ok(()) } impl_benchmark_test_suite!( diff --git a/polkadot/xcm/pallet-xcm/Cargo.toml b/polkadot/xcm/pallet-xcm/Cargo.toml index 85beba03b157..4d44d75e34dd 100644 --- a/polkadot/xcm/pallet-xcm/Cargo.toml +++ b/polkadot/xcm/pallet-xcm/Cargo.toml @@ -5,8 +5,6 @@ description = "A pallet for handling XCM programs." authors.workspace = true edition.workspace = true license.workspace = true -homepage.workspace = true -repository.workspace = true [lints] workspace = true @@ -25,8 +23,8 @@ sp-io = { workspace = true } sp-runtime = { workspace = true } xcm = { workspace = true } -xcm-builder = { workspace = true } xcm-executor = { workspace = true } +xcm-builder = { workspace = true } xcm-runtime-apis = { workspace = true } # marked optional, used in benchmarking @@ -35,8 +33,8 @@ pallet-balances = { optional = true, workspace = true } [dev-dependencies] pallet-assets = { workspace = true, default-features = true } -polkadot-parachain-primitives = { workspace = true, default-features = true } polkadot-runtime-parachains = { workspace = true, default-features = true } +polkadot-parachain-primitives = { workspace = true, default-features = true } [features] default = ["std"] @@ -70,7 +68,6 @@ runtime-benchmarks = [ "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", "xcm-runtime-apis/runtime-benchmarks", - "xcm/runtime-benchmarks", ] try-runtime = [ "frame-support/try-runtime", diff --git a/polkadot/xcm/pallet-xcm/src/benchmarking.rs b/polkadot/xcm/pallet-xcm/src/benchmarking.rs index 3ca048057ee4..e493d4838f5c 100644 --- a/polkadot/xcm/pallet-xcm/src/benchmarking.rs +++ b/polkadot/xcm/pallet-xcm/src/benchmarking.rs @@ -15,7 +15,7 @@ // along with Polkadot. If not, see . use super::*; -use frame_benchmarking::v2::*; +use frame_benchmarking::{benchmarks, whitelisted_caller, BenchmarkError, BenchmarkResult}; use frame_support::{assert_ok, weights::Weight}; use frame_system::RawOrigin; use xcm::latest::prelude::*; @@ -83,41 +83,25 @@ pub trait Config: crate::Config { fn get_asset() -> Asset; } -#[benchmarks] -mod benchmarks { - use super::*; - - #[benchmark] - fn send() -> Result<(), BenchmarkError> { +benchmarks! { + send { let send_origin = T::SendXcmOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; if T::SendXcmOrigin::try_origin(send_origin.clone()).is_err() { return Err(BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX))) } let msg = Xcm(vec![ClearOrigin]); - let versioned_dest: VersionedLocation = T::reachable_dest() - .ok_or(BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)))? - .into(); + let versioned_dest: VersionedLocation = T::reachable_dest().ok_or( + BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)), + )? + .into(); let versioned_msg = VersionedXcm::from(msg); + }: _>(send_origin, Box::new(versioned_dest), Box::new(versioned_msg)) - // Ensure that origin can send to destination - // (e.g. setup delivery fees, ensure router setup, ...) - T::DeliveryHelper::ensure_successful_delivery( - &Default::default(), - &versioned_dest.clone().try_into().unwrap(), - FeeReason::ChargeFees, - ); - - #[extrinsic_call] - _(send_origin as RuntimeOrigin, Box::new(versioned_dest), Box::new(versioned_msg)); - - Ok(()) - } - - #[benchmark] - fn teleport_assets() -> Result<(), BenchmarkError> { - let (asset, destination) = T::teleportable_asset_and_dest() - .ok_or(BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)))?; + teleport_assets { + let (asset, destination) = T::teleportable_asset_and_dest().ok_or( + BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)), + )?; let assets: Assets = asset.clone().into(); @@ -125,13 +109,11 @@ mod benchmarks { let send_origin = RawOrigin::Signed(caller.clone()); let origin_location = T::ExecuteXcmOrigin::try_origin(send_origin.clone().into()) .map_err(|_| BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)))?; - if !T::XcmTeleportFilter::contains(&(origin_location.clone(), assets.clone().into_inner())) - { + if !T::XcmTeleportFilter::contains(&(origin_location.clone(), assets.clone().into_inner())) { return Err(BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX))) } - // Ensure that origin can send to destination - // (e.g. setup delivery fees, ensure router setup, ...) + // Ensure that origin can send to destination (e.g. setup delivery fees, ensure router setup, ...) let (_, _) = T::DeliveryHelper::ensure_successful_delivery( &origin_location, &destination, @@ -145,23 +127,18 @@ mod benchmarks { &Asset { fun: Fungible(*amount), id: asset.id }, &origin_location, None, - ) - .map_err(|error| { - tracing::error!("Fungible asset couldn't be deposited, error: {:?}", error); - BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)) - })?; - }, - NonFungible(_instance) => { - ::AssetTransactor::deposit_asset( - &asset, - &origin_location, - None, - ) - .map_err(|error| { - tracing::error!("Nonfungible asset couldn't be deposited, error: {:?}", error); - BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)) + ).map_err(|error| { + tracing::error!("Fungible asset couldn't be deposited, error: {:?}", error); + BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)) })?; }, + NonFungible(instance) => { + ::AssetTransactor::deposit_asset(&asset, &origin_location, None) + .map_err(|error| { + tracing::error!("Nonfungible asset couldn't be deposited, error: {:?}", error); + BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)) + })?; + } }; let recipient = [0u8; 32]; @@ -169,23 +146,12 @@ mod benchmarks { let versioned_beneficiary: VersionedLocation = AccountId32 { network: None, id: recipient.into() }.into(); let versioned_assets: VersionedAssets = assets.into(); + }: _>(send_origin.into(), Box::new(versioned_dest), Box::new(versioned_beneficiary), Box::new(versioned_assets), 0) - #[extrinsic_call] - _( - send_origin, - Box::new(versioned_dest), - Box::new(versioned_beneficiary), - Box::new(versioned_assets), - 0, - ); - - Ok(()) - } - - #[benchmark] - fn reserve_transfer_assets() -> Result<(), BenchmarkError> { - let (asset, destination) = T::reserve_transferable_asset_and_dest() - .ok_or(BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)))?; + reserve_transfer_assets { + let (asset, destination) = T::reserve_transferable_asset_and_dest().ok_or( + BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)), + )?; let assets: Assets = asset.clone().into(); @@ -193,15 +159,11 @@ mod benchmarks { let send_origin = RawOrigin::Signed(caller.clone()); let origin_location = T::ExecuteXcmOrigin::try_origin(send_origin.clone().into()) .map_err(|_| BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)))?; - if !T::XcmReserveTransferFilter::contains(&( - origin_location.clone(), - assets.clone().into_inner(), - )) { + if !T::XcmReserveTransferFilter::contains(&(origin_location.clone(), assets.clone().into_inner())) { return Err(BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX))) } - // Ensure that origin can send to destination - // (e.g. setup delivery fees, ensure router setup, ...) + // Ensure that origin can send to destination (e.g. setup delivery fees, ensure router setup, ...) let (_, _) = T::DeliveryHelper::ensure_successful_delivery( &origin_location, &destination, @@ -215,23 +177,18 @@ mod benchmarks { &Asset { fun: Fungible(*amount), id: asset.id.clone() }, &origin_location, None, - ) - .map_err(|error| { - tracing::error!("Fungible asset couldn't be deposited, error: {:?}", error); - BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)) - })?; - }, - NonFungible(_instance) => { - ::AssetTransactor::deposit_asset( - &asset, - &origin_location, - None, - ) - .map_err(|error| { - tracing::error!("Nonfungible asset couldn't be deposited, error: {:?}", error); - BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)) + ).map_err(|error| { + tracing::error!("Fungible asset couldn't be deposited, error: {:?}", error); + BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)) })?; }, + NonFungible(instance) => { + ::AssetTransactor::deposit_asset(&asset, &origin_location, None) + .map_err(|error| { + tracing::error!("Nonfungible asset couldn't be deposited, error: {:?}", error); + BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)) + })?; + } }; let recipient = [0u8; 32]; @@ -239,16 +196,8 @@ mod benchmarks { let versioned_beneficiary: VersionedLocation = AccountId32 { network: None, id: recipient.into() }.into(); let versioned_assets: VersionedAssets = assets.into(); - - #[extrinsic_call] - _( - send_origin, - Box::new(versioned_dest), - Box::new(versioned_beneficiary), - Box::new(versioned_assets), - 0, - ); - + }: _>(send_origin.into(), Box::new(versioned_dest), Box::new(versioned_beneficiary), Box::new(versioned_assets), 0) + verify { match &asset.fun { Fungible(amount) => { assert_ok!(::AssetTransactor::withdraw_asset( @@ -257,22 +206,20 @@ mod benchmarks { None, )); }, - NonFungible(_instance) => { + NonFungible(instance) => { assert_ok!(::AssetTransactor::withdraw_asset( &asset, &destination, None, )); - }, + } }; - - Ok(()) } - #[benchmark] - fn transfer_assets() -> Result<(), BenchmarkError> { - let (assets, _fee_index, destination, verify_fn) = T::set_up_complex_asset_transfer() - .ok_or(BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)))?; + transfer_assets { + let (assets, fee_index, destination, verify) = T::set_up_complex_asset_transfer().ok_or( + BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)), + )?; let caller: T::AccountId = whitelisted_caller(); let send_origin = RawOrigin::Signed(caller.clone()); let recipient = [0u8; 32]; @@ -280,32 +227,13 @@ mod benchmarks { let versioned_beneficiary: VersionedLocation = AccountId32 { network: None, id: recipient.into() }.into(); let versioned_assets: VersionedAssets = assets.into(); - - // Ensure that origin can send to destination - // (e.g. setup delivery fees, ensure router setup, ...) - T::DeliveryHelper::ensure_successful_delivery( - &Default::default(), - &versioned_dest.clone().try_into().unwrap(), - FeeReason::ChargeFees, - ); - - #[extrinsic_call] - _( - send_origin, - Box::new(versioned_dest), - Box::new(versioned_beneficiary), - Box::new(versioned_assets), - 0, - WeightLimit::Unlimited, - ); - + }: _>(send_origin.into(), Box::new(versioned_dest), Box::new(versioned_beneficiary), Box::new(versioned_assets), 0, WeightLimit::Unlimited) + verify { // run provided verification function - verify_fn(); - Ok(()) + verify(); } - #[benchmark] - fn execute() -> Result<(), BenchmarkError> { + execute { let execute_origin = T::ExecuteXcmOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; let origin_location = T::ExecuteXcmOrigin::try_origin(execute_origin.clone()) @@ -315,287 +243,147 @@ mod benchmarks { return Err(BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX))) } let versioned_msg = VersionedXcm::from(msg); + }: _>(execute_origin, Box::new(versioned_msg), Weight::MAX) - #[extrinsic_call] - _(execute_origin as RuntimeOrigin, Box::new(versioned_msg), Weight::MAX); - - Ok(()) - } - - #[benchmark] - fn force_xcm_version() -> Result<(), BenchmarkError> { - let loc = T::reachable_dest() - .ok_or(BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)))?; + force_xcm_version { + let loc = T::reachable_dest().ok_or( + BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)), + )?; let xcm_version = 2; + }: _(RawOrigin::Root, Box::new(loc), xcm_version) - #[extrinsic_call] - _(RawOrigin::Root, Box::new(loc), xcm_version); - - Ok(()) - } - - #[benchmark] - fn force_default_xcm_version() { - #[extrinsic_call] - _(RawOrigin::Root, Some(2)) - } - - #[benchmark] - fn force_subscribe_version_notify() -> Result<(), BenchmarkError> { - let versioned_loc: VersionedLocation = T::reachable_dest() - .ok_or(BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)))? - .into(); - - // Ensure that origin can send to destination - // (e.g. setup delivery fees, ensure router setup, ...) - T::DeliveryHelper::ensure_successful_delivery( - &Default::default(), - &versioned_loc.clone().try_into().unwrap(), - FeeReason::ChargeFees, - ); - - #[extrinsic_call] - _(RawOrigin::Root, Box::new(versioned_loc)); + force_default_xcm_version {}: _(RawOrigin::Root, Some(2)) - Ok(()) - } + force_subscribe_version_notify { + let versioned_loc: VersionedLocation = T::reachable_dest().ok_or( + BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)), + )? + .into(); + }: _(RawOrigin::Root, Box::new(versioned_loc)) - #[benchmark] - fn force_unsubscribe_version_notify() -> Result<(), BenchmarkError> { - let loc = T::reachable_dest() - .ok_or(BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)))?; + force_unsubscribe_version_notify { + let loc = T::reachable_dest().ok_or( + BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)), + )?; let versioned_loc: VersionedLocation = loc.clone().into(); - - // Ensure that origin can send to destination - // (e.g. setup delivery fees, ensure router setup, ...) - T::DeliveryHelper::ensure_successful_delivery( - &Default::default(), - &versioned_loc.clone().try_into().unwrap(), - FeeReason::ChargeFees, - ); - let _ = crate::Pallet::::request_version_notify(loc); + }: _(RawOrigin::Root, Box::new(versioned_loc)) - #[extrinsic_call] - _(RawOrigin::Root, Box::new(versioned_loc)); - - Ok(()) - } + force_suspension {}: _(RawOrigin::Root, true) - #[benchmark] - fn force_suspension() { - #[extrinsic_call] - _(RawOrigin::Root, true) - } - - #[benchmark] - fn migrate_supported_version() { + migrate_supported_version { let old_version = XCM_VERSION - 1; let loc = VersionedLocation::from(Location::from(Parent)); SupportedVersion::::insert(old_version, loc, old_version); - - #[block] - { - crate::Pallet::::check_xcm_version_change( - VersionMigrationStage::MigrateSupportedVersion, - Weight::zero(), - ); - } + }: { + crate::Pallet::::check_xcm_version_change(VersionMigrationStage::MigrateSupportedVersion, Weight::zero()); } - #[benchmark] - fn migrate_version_notifiers() { + migrate_version_notifiers { let old_version = XCM_VERSION - 1; let loc = VersionedLocation::from(Location::from(Parent)); VersionNotifiers::::insert(old_version, loc, 0); - - #[block] - { - crate::Pallet::::check_xcm_version_change( - VersionMigrationStage::MigrateVersionNotifiers, - Weight::zero(), - ); - } + }: { + crate::Pallet::::check_xcm_version_change(VersionMigrationStage::MigrateVersionNotifiers, Weight::zero()); } - #[benchmark] - fn already_notified_target() -> Result<(), BenchmarkError> { - let loc = T::reachable_dest().ok_or(BenchmarkError::Override( - BenchmarkResult::from_weight(T::DbWeight::get().reads(1)), - ))?; + already_notified_target { + let loc = T::reachable_dest().ok_or( + BenchmarkError::Override(BenchmarkResult::from_weight(T::DbWeight::get().reads(1))), + )?; let loc = VersionedLocation::from(loc); let current_version = T::AdvertisedXcmVersion::get(); - VersionNotifyTargets::::insert( - current_version, - loc, - (0, Weight::zero(), current_version), - ); - - #[block] - { - crate::Pallet::::check_xcm_version_change( - VersionMigrationStage::NotifyCurrentTargets(None), - Weight::zero(), - ); - } - - Ok(()) + VersionNotifyTargets::::insert(current_version, loc, (0, Weight::zero(), current_version)); + }: { + crate::Pallet::::check_xcm_version_change(VersionMigrationStage::NotifyCurrentTargets(None), Weight::zero()); } - #[benchmark] - fn notify_current_targets() -> Result<(), BenchmarkError> { - let loc = T::reachable_dest().ok_or(BenchmarkError::Override( - BenchmarkResult::from_weight(T::DbWeight::get().reads_writes(1, 3)), - ))?; + notify_current_targets { + let loc = T::reachable_dest().ok_or( + BenchmarkError::Override(BenchmarkResult::from_weight(T::DbWeight::get().reads_writes(1, 3))), + )?; let loc = VersionedLocation::from(loc); let current_version = T::AdvertisedXcmVersion::get(); let old_version = current_version - 1; VersionNotifyTargets::::insert(current_version, loc, (0, Weight::zero(), old_version)); - - #[block] - { - crate::Pallet::::check_xcm_version_change( - VersionMigrationStage::NotifyCurrentTargets(None), - Weight::zero(), - ); - } - - Ok(()) + }: { + crate::Pallet::::check_xcm_version_change(VersionMigrationStage::NotifyCurrentTargets(None), Weight::zero()); } - #[benchmark] - fn notify_target_migration_fail() { + notify_target_migration_fail { let newer_xcm_version = xcm::prelude::XCM_VERSION; let older_xcm_version = newer_xcm_version - 1; - let bad_location: Location = Plurality { id: BodyId::Unit, part: BodyPart::Voice }.into(); + let bad_location: Location = Plurality { + id: BodyId::Unit, + part: BodyPart::Voice, + }.into(); let bad_location = VersionedLocation::from(bad_location) .into_version(older_xcm_version) .expect("Version convertion should work"); let current_version = T::AdvertisedXcmVersion::get(); - VersionNotifyTargets::::insert( - current_version, - bad_location, - (0, Weight::zero(), current_version), - ); - - #[block] - { - crate::Pallet::::check_xcm_version_change( - VersionMigrationStage::MigrateAndNotifyOldTargets, - Weight::zero(), - ); - } + VersionNotifyTargets::::insert(current_version, bad_location, (0, Weight::zero(), current_version)); + }: { + crate::Pallet::::check_xcm_version_change(VersionMigrationStage::MigrateAndNotifyOldTargets, Weight::zero()); } - #[benchmark] - fn migrate_version_notify_targets() { + migrate_version_notify_targets { let current_version = T::AdvertisedXcmVersion::get(); let old_version = current_version - 1; let loc = VersionedLocation::from(Location::from(Parent)); VersionNotifyTargets::::insert(old_version, loc, (0, Weight::zero(), current_version)); - - #[block] - { - crate::Pallet::::check_xcm_version_change( - VersionMigrationStage::MigrateAndNotifyOldTargets, - Weight::zero(), - ); - } + }: { + crate::Pallet::::check_xcm_version_change(VersionMigrationStage::MigrateAndNotifyOldTargets, Weight::zero()); } - #[benchmark] - fn migrate_and_notify_old_targets() -> Result<(), BenchmarkError> { - let loc = T::reachable_dest().ok_or(BenchmarkError::Override( - BenchmarkResult::from_weight(T::DbWeight::get().reads_writes(1, 3)), - ))?; + migrate_and_notify_old_targets { + let loc = T::reachable_dest().ok_or( + BenchmarkError::Override(BenchmarkResult::from_weight(T::DbWeight::get().reads_writes(1, 3))), + )?; let loc = VersionedLocation::from(loc); let old_version = T::AdvertisedXcmVersion::get() - 1; VersionNotifyTargets::::insert(old_version, loc, (0, Weight::zero(), old_version)); - - #[block] - { - crate::Pallet::::check_xcm_version_change( - VersionMigrationStage::MigrateAndNotifyOldTargets, - Weight::zero(), - ); - } - - Ok(()) + }: { + crate::Pallet::::check_xcm_version_change(VersionMigrationStage::MigrateAndNotifyOldTargets, Weight::zero()); } - #[benchmark] - fn new_query() { + new_query { let responder = Location::from(Parent); let timeout = 1u32.into(); let match_querier = Location::from(Here); - - #[block] - { - crate::Pallet::::new_query(responder, timeout, match_querier); - } + }: { + crate::Pallet::::new_query(responder, timeout, match_querier); } - #[benchmark] - fn take_response() { + take_response { let responder = Location::from(Parent); let timeout = 1u32.into(); let match_querier = Location::from(Here); let query_id = crate::Pallet::::new_query(responder, timeout, match_querier); - let infos = (0..xcm::v3::MaxPalletsInfo::get()) - .map(|_| { - PalletInfo::new( - u32::MAX, - (0..xcm::v3::MaxPalletNameLen::get()) - .map(|_| 97u8) - .collect::>() - .try_into() - .unwrap(), - (0..xcm::v3::MaxPalletNameLen::get()) - .map(|_| 97u8) - .collect::>() - .try_into() - .unwrap(), - u32::MAX, - u32::MAX, - u32::MAX, - ) - .unwrap() - }) - .collect::>(); - crate::Pallet::::expect_response( - query_id, - Response::PalletsInfo(infos.try_into().unwrap()), - ); - - #[block] - { - as QueryHandler>::take_response(query_id); - } + let infos = (0 .. xcm::v3::MaxPalletsInfo::get()).map(|_| PalletInfo::new( + u32::MAX, + (0..xcm::v3::MaxPalletNameLen::get()).map(|_| 97u8).collect::>().try_into().unwrap(), + (0..xcm::v3::MaxPalletNameLen::get()).map(|_| 97u8).collect::>().try_into().unwrap(), + u32::MAX, + u32::MAX, + u32::MAX, + ).unwrap()).collect::>(); + crate::Pallet::::expect_response(query_id, Response::PalletsInfo(infos.try_into().unwrap())); + }: { + as QueryHandler>::take_response(query_id); } - #[benchmark] - fn claim_assets() -> Result<(), BenchmarkError> { + claim_assets { let claim_origin = RawOrigin::Signed(whitelisted_caller()); - let claim_location = T::ExecuteXcmOrigin::try_origin(claim_origin.clone().into()) - .map_err(|_| BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)))?; + let claim_location = T::ExecuteXcmOrigin::try_origin(claim_origin.clone().into()).map_err(|_| BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)))?; let asset: Asset = T::get_asset(); // Trap assets for claiming later crate::Pallet::::drop_assets( &claim_location, asset.clone().into(), - &XcmContext { origin: None, message_id: [0u8; 32], topic: None }, + &XcmContext { origin: None, message_id: [0u8; 32], topic: None } ); let versioned_assets = VersionedAssets::from(Assets::from(asset)); - - #[extrinsic_call] - _( - claim_origin, - Box::new(versioned_assets), - Box::new(VersionedLocation::from(claim_location)), - ); - - Ok(()) - } + }: _>(claim_origin.into(), Box::new(versioned_assets), Box::new(VersionedLocation::from(claim_location))) impl_benchmark_test_suite!( Pallet, diff --git a/polkadot/xcm/pallet-xcm/src/lib.rs b/polkadot/xcm/pallet-xcm/src/lib.rs index 6360298b21c3..4a97546b38d1 100644 --- a/polkadot/xcm/pallet-xcm/src/lib.rs +++ b/polkadot/xcm/pallet-xcm/src/lib.rs @@ -75,7 +75,6 @@ use xcm_runtime_apis::{ #[cfg(any(feature = "try-runtime", test))] use sp_runtime::TryRuntimeError; -use xcm_executor::traits::{FeeManager, FeeReason}; pub trait WeightInfo { fn send() -> Weight; @@ -241,7 +240,7 @@ pub mod pallet { type XcmExecuteFilter: Contains<(Location, Xcm<::RuntimeCall>)>; /// Something to execute an XCM message. - type XcmExecutor: ExecuteXcm<::RuntimeCall> + XcmAssetTransfers + FeeManager; + type XcmExecutor: ExecuteXcm<::RuntimeCall> + XcmAssetTransfers; /// Our XCM filter which messages to be teleported using the dedicated extrinsic must pass. type XcmTeleportFilter: Contains<(Location, Vec)>; @@ -363,10 +362,7 @@ pub mod pallet { let message: Xcm<()> = (*message).try_into().map_err(|()| Error::::BadVersion)?; let message_id = Self::send_xcm(interior, dest.clone(), message.clone()) - .map_err(|error| { - tracing::error!(target: "xcm::pallet_xcm::send", ?error, ?dest, ?message, "XCM send failed with error"); - Error::::from(error) - })?; + .map_err(Error::::from)?; let e = Event::Sent { origin: origin_location, destination: dest, message, message_id }; Self::deposit_event(e); Ok(message_id) @@ -1803,10 +1799,7 @@ impl Pallet { if let Some(remote_xcm) = remote_xcm { let (ticket, price) = validate_send::(dest.clone(), remote_xcm.clone()) - .map_err(|error| { - tracing::error!(target: "xcm::pallet_xcm::execute_xcm_transfer", ?error, ?dest, ?remote_xcm, "XCM validate_send failed with error"); - Error::::from(error) - })?; + .map_err(Error::::from)?; if origin != Here.into_location() { Self::charge_fees(origin.clone(), price.clone()).map_err(|error| { tracing::error!( @@ -1816,11 +1809,7 @@ impl Pallet { Error::::FeesNotMet })?; } - let message_id = T::XcmRouter::deliver(ticket) - .map_err(|error| { - tracing::error!(target: "xcm::pallet_xcm::execute_xcm_transfer", ?error, ?dest, ?remote_xcm, "XCM deliver failed with error"); - Error::::from(error) - })?; + let message_id = T::XcmRouter::deliver(ticket).map_err(Error::::from)?; let e = Event::Sent { origin, destination: dest, message: remote_xcm, message_id }; Self::deposit_event(e); @@ -2479,17 +2468,17 @@ impl Pallet { mut message: Xcm<()>, ) -> Result { let interior = interior.into(); - let local_origin = interior.clone().into(); let dest = dest.into(); - let is_waived = - ::is_waived(Some(&local_origin), FeeReason::ChargeFees); - if interior != Junctions::Here { + let maybe_fee_payer = if interior != Junctions::Here { message.0.insert(0, DescendOrigin(interior.clone())); - } + Some(interior.into()) + } else { + None + }; tracing::debug!(target: "xcm::send_xcm", "{:?}, {:?}", dest.clone(), message.clone()); let (ticket, price) = validate_send::(dest, message)?; - if !is_waived { - Self::charge_fees(local_origin, price).map_err(|e| { + if let Some(fee_payer) = maybe_fee_payer { + Self::charge_fees(fee_payer, price).map_err(|e| { tracing::error!( target: "xcm::pallet_xcm::send_xcm", ?e, diff --git a/polkadot/xcm/procedural/Cargo.toml b/polkadot/xcm/procedural/Cargo.toml index 0843da86f038..83b35d19cf7e 100644 --- a/polkadot/xcm/procedural/Cargo.toml +++ b/polkadot/xcm/procedural/Cargo.toml @@ -6,8 +6,6 @@ edition.workspace = true license.workspace = true version = "7.0.0" publish = true -homepage.workspace = true -repository.workspace = true [lints] workspace = true @@ -16,15 +14,13 @@ workspace = true proc-macro = true [dependencies] -Inflector = { workspace = true } proc-macro2 = { workspace = true } quote = { workspace = true } syn = { workspace = true } +Inflector = { workspace = true } [dev-dependencies] trybuild = { features = ["diff"], workspace = true } # NOTE: we have to explicitly specify `std` because of trybuild # https://github.com/paritytech/polkadot-sdk/pull/5167 xcm = { workspace = true, default-features = true, features = ["std"] } -# For testing macros. -frame-support = { workspace = true } diff --git a/polkadot/xcm/procedural/src/builder_pattern.rs b/polkadot/xcm/procedural/src/builder_pattern.rs index 34b89f13422c..b65290332af9 100644 --- a/polkadot/xcm/procedural/src/builder_pattern.rs +++ b/polkadot/xcm/procedural/src/builder_pattern.rs @@ -20,8 +20,8 @@ use inflector::Inflector; use proc_macro2::TokenStream as TokenStream2; use quote::{format_ident, quote}; use syn::{ - Data, DataEnum, DeriveInput, Error, Expr, ExprLit, Fields, GenericArgument, Ident, Lit, Meta, - MetaNameValue, PathArguments, Result, Type, TypePath, Variant, + Data, DataEnum, DeriveInput, Error, Expr, ExprLit, Fields, Ident, Lit, Meta, MetaNameValue, + Result, Variant, }; pub fn derive(input: DeriveInput) -> Result { @@ -29,7 +29,7 @@ pub fn derive(input: DeriveInput) -> Result { Data::Enum(data_enum) => data_enum, _ => return Err(Error::new_spanned(&input, "Expected the `Instruction` enum")), }; - let builder_raw_impl = generate_builder_raw_impl(&input.ident, data_enum)?; + let builder_raw_impl = generate_builder_raw_impl(&input.ident, data_enum); let builder_impl = generate_builder_impl(&input.ident, data_enum)?; let builder_unpaid_impl = generate_builder_unpaid_impl(&input.ident, data_enum)?; let output = quote! { @@ -83,12 +83,54 @@ pub fn derive(input: DeriveInput) -> Result { Ok(output) } -fn generate_builder_raw_impl(name: &Ident, data_enum: &DataEnum) -> Result { - let methods = data_enum - .variants - .iter() - .map(|variant| convert_variant_to_method(name, variant, None)) - .collect::>>()?; +fn generate_builder_raw_impl(name: &Ident, data_enum: &DataEnum) -> TokenStream2 { + let methods = data_enum.variants.iter().map(|variant| { + let variant_name = &variant.ident; + let method_name_string = &variant_name.to_string().to_snake_case(); + let method_name = syn::Ident::new(method_name_string, variant_name.span()); + let docs = get_doc_comments(variant); + let method = match &variant.fields { + Fields::Unit => { + quote! { + pub fn #method_name(mut self) -> Self { + self.instructions.push(#name::::#variant_name); + self + } + } + }, + Fields::Unnamed(fields) => { + let arg_names: Vec<_> = fields + .unnamed + .iter() + .enumerate() + .map(|(index, _)| format_ident!("arg{}", index)) + .collect(); + let arg_types: Vec<_> = fields.unnamed.iter().map(|field| &field.ty).collect(); + quote! { + pub fn #method_name(mut self, #(#arg_names: impl Into<#arg_types>),*) -> Self { + #(let #arg_names = #arg_names.into();)* + self.instructions.push(#name::::#variant_name(#(#arg_names),*)); + self + } + } + }, + Fields::Named(fields) => { + let arg_names: Vec<_> = fields.named.iter().map(|field| &field.ident).collect(); + let arg_types: Vec<_> = fields.named.iter().map(|field| &field.ty).collect(); + quote! { + pub fn #method_name(mut self, #(#arg_names: impl Into<#arg_types>),*) -> Self { + #(let #arg_names = #arg_names.into();)* + self.instructions.push(#name::::#variant_name { #(#arg_names),* }); + self + } + } + }, + }; + quote! { + #(#docs)* + #method + } + }); let output = quote! { impl XcmBuilder { #(#methods)* @@ -98,7 +140,7 @@ fn generate_builder_raw_impl(name: &Ident, data_enum: &DataEnum) -> Result Result { @@ -123,17 +165,11 @@ fn generate_builder_impl(name: &Ident, data_enum: &DataEnum) -> Result>>()?; @@ -142,14 +178,57 @@ fn generate_builder_impl(name: &Ident, data_enum: &DataEnum) -> Result }), - )?; + let variant_name = &variant.ident; + let method_name_string = &variant_name.to_string().to_snake_case(); + let method_name = syn::Ident::new(method_name_string, variant_name.span()); + let docs = get_doc_comments(variant); + let method = match &variant.fields { + Fields::Unnamed(fields) => { + let arg_names: Vec<_> = fields + .unnamed + .iter() + .enumerate() + .map(|(index, _)| format_ident!("arg{}", index)) + .collect(); + let arg_types: Vec<_> = fields.unnamed.iter().map(|field| &field.ty).collect(); + quote! { + #(#docs)* + pub fn #method_name(self, #(#arg_names: impl Into<#arg_types>),*) -> XcmBuilder { + let mut new_instructions = self.instructions; + #(let #arg_names = #arg_names.into();)* + new_instructions.push(#name::::#variant_name(#(#arg_names),*)); + XcmBuilder { + instructions: new_instructions, + state: core::marker::PhantomData, + } + } + } + }, + Fields::Named(fields) => { + let arg_names: Vec<_> = fields.named.iter().map(|field| &field.ident).collect(); + let arg_types: Vec<_> = fields.named.iter().map(|field| &field.ty).collect(); + quote! { + #(#docs)* + pub fn #method_name(self, #(#arg_names: impl Into<#arg_types>),*) -> XcmBuilder { + let mut new_instructions = self.instructions; + #(let #arg_names = #arg_names.into();)* + new_instructions.push(#name::::#variant_name { #(#arg_names),* }); + XcmBuilder { + instructions: new_instructions, + state: core::marker::PhantomData, + } + } + } + }, + _ => + return Err(Error::new_spanned( + variant, + "Instructions that load the holding register should take operands", + )), + }; Ok(method) }) - .collect::>>()?; + .collect::, _>>()?; let first_impl = quote! { impl XcmBuilder { @@ -161,12 +240,27 @@ fn generate_builder_impl(name: &Ident, data_enum: &DataEnum) -> Result = data_enum .variants .iter() - .filter(|variant| variant.ident == "ClearOrigin" || variant.ident == "SetHints") + .filter(|variant| variant.ident == "ClearOrigin") .map(|variant| { - let method = convert_variant_to_method(name, variant, None)?; + let variant_name = &variant.ident; + let method_name_string = &variant_name.to_string().to_snake_case(); + let method_name = syn::Ident::new(method_name_string, variant_name.span()); + let docs = get_doc_comments(variant); + let method = match &variant.fields { + Fields::Unit => { + quote! { + #(#docs)* + pub fn #method_name(mut self) -> XcmBuilder { + self.instructions.push(#name::::#variant_name); + self + } + } + }, + _ => return Err(Error::new_spanned(variant, "ClearOrigin should have no fields")), + }; Ok(method) }) - .collect::>>()?; + .collect::, _>>()?; // Then we require fees to be paid let pay_fees_variants = data_enum @@ -201,12 +295,36 @@ fn generate_builder_impl(name: &Ident, data_enum: &DataEnum) -> Result }), - )?; - Ok(method) + let variant_name = &variant.ident; + let method_name_string = &variant_name.to_string().to_snake_case(); + let method_name = syn::Ident::new(method_name_string, variant_name.span()); + let docs = get_doc_comments(variant); + let fields = match &variant.fields { + Fields::Named(fields) => { + let arg_names: Vec<_> = + fields.named.iter().map(|field| &field.ident).collect(); + let arg_types: Vec<_> = + fields.named.iter().map(|field| &field.ty).collect(); + quote! { + #(#docs)* + pub fn #method_name(self, #(#arg_names: impl Into<#arg_types>),*) -> XcmBuilder { + let mut new_instructions = self.instructions; + #(let #arg_names = #arg_names.into();)* + new_instructions.push(#name::::#variant_name { #(#arg_names),* }); + XcmBuilder { + instructions: new_instructions, + state: core::marker::PhantomData, + } + } + } + }, + _ => + return Err(Error::new_spanned( + variant, + "Both BuyExecution and PayFees have named fields", + )), + }; + Ok(fields) }) .collect::>>()?; @@ -231,156 +349,35 @@ fn generate_builder_unpaid_impl(name: &Ident, data_enum: &DataEnum) -> Result }), - )?; + let unpaid_execution_ident = &unpaid_execution_variant.ident; + let unpaid_execution_method_name = Ident::new( + &unpaid_execution_ident.to_string().to_snake_case(), + unpaid_execution_ident.span(), + ); + let docs = get_doc_comments(unpaid_execution_variant); + let fields = match &unpaid_execution_variant.fields { + Fields::Named(fields) => fields, + _ => + return Err(Error::new_spanned( + unpaid_execution_variant, + "UnpaidExecution should have named fields", + )), + }; + let arg_names: Vec<_> = fields.named.iter().map(|field| &field.ident).collect(); + let arg_types: Vec<_> = fields.named.iter().map(|field| &field.ty).collect(); Ok(quote! { impl XcmBuilder { - #method - } - }) -} - -// Have to call with `XcmBuilder` in allowed_after_load_holding_methods. -fn convert_variant_to_method( - name: &Ident, - variant: &Variant, - maybe_return_type: Option, -) -> Result { - let variant_name = &variant.ident; - let method_name_string = &variant_name.to_string().to_snake_case(); - let method_name = syn::Ident::new(method_name_string, variant_name.span()); - let docs = get_doc_comments(variant); - let method = match &variant.fields { - Fields::Unit => - if let Some(return_type) = maybe_return_type { - quote! { - pub fn #method_name(self) -> #return_type { - let mut new_instructions = self.instructions; - new_instructions.push(#name::::#variant_name); - XcmBuilder { - instructions: new_instructions, - state: core::marker::PhantomData, - } - } - } - } else { - quote! { - pub fn #method_name(mut self) -> Self { - self.instructions.push(#name::::#variant_name); - self - } - } - }, - Fields::Unnamed(fields) => { - let arg_names: Vec<_> = fields - .unnamed - .iter() - .enumerate() - .map(|(index, _)| format_ident!("arg{}", index)) - .collect(); - let arg_types: Vec<_> = fields.unnamed.iter().map(|field| &field.ty).collect(); - if let Some(return_type) = maybe_return_type { - quote! { - pub fn #method_name(self, #(#arg_names: impl Into<#arg_types>),*) -> #return_type { - let mut new_instructions = self.instructions; - #(let #arg_names = #arg_names.into();)* - new_instructions.push(#name::::#variant_name(#(#arg_names),*)); - XcmBuilder { - instructions: new_instructions, - state: core::marker::PhantomData, - } - } - } - } else { - quote! { - pub fn #method_name(mut self, #(#arg_names: impl Into<#arg_types>),*) -> Self { - #(let #arg_names = #arg_names.into();)* - self.instructions.push(#name::::#variant_name(#(#arg_names),*)); - self - } - } - } - }, - Fields::Named(fields) => { - let normal_fields: Vec<_> = fields - .named - .iter() - .filter(|field| { - if let Type::Path(TypePath { path, .. }) = &field.ty { - for segment in &path.segments { - if segment.ident == format_ident!("BoundedVec") { - return false; - } - } - true - } else { - true - } - }) - .collect(); - let bounded_fields: Vec<_> = fields - .named - .iter() - .filter(|field| { - if let Type::Path(TypePath { path, .. }) = &field.ty { - for segment in &path.segments { - if segment.ident == format_ident!("BoundedVec") { - return true; - } - } - false - } else { - false - } - }) - .collect(); - let arg_names: Vec<_> = normal_fields.iter().map(|field| &field.ident).collect(); - let arg_types: Vec<_> = normal_fields.iter().map(|field| &field.ty).collect(); - let bounded_names: Vec<_> = bounded_fields.iter().map(|field| &field.ident).collect(); - let bounded_types = bounded_fields - .iter() - .map(|field| extract_generic_argument(&field.ty, 0, "BoundedVec's inner type")) - .collect::>>()?; - let bounded_sizes = bounded_fields - .iter() - .map(|field| extract_generic_argument(&field.ty, 1, "BoundedVec's size")) - .collect::>>()?; - let comma_in_the_middle = if normal_fields.is_empty() { - quote! {} - } else { - quote! {,} - }; - if let Some(return_type) = maybe_return_type { - quote! { - pub fn #method_name(self, #(#arg_names: impl Into<#arg_types>),* #comma_in_the_middle #(#bounded_names: Vec<#bounded_types>),*) -> #return_type { - let mut new_instructions = self.instructions; - #(let #arg_names = #arg_names.into();)* - #(let #bounded_names = BoundedVec::<#bounded_types, #bounded_sizes>::truncate_from(#bounded_names);)* - new_instructions.push(#name::::#variant_name { #(#arg_names),* #comma_in_the_middle #(#bounded_names),* }); - XcmBuilder { - instructions: new_instructions, - state: core::marker::PhantomData, - } - } - } - } else { - quote! { - pub fn #method_name(mut self, #(#arg_names: impl Into<#arg_types>),* #comma_in_the_middle #(#bounded_names: Vec<#bounded_types>),*) -> Self { - #(let #arg_names = #arg_names.into();)* - #(let #bounded_names = BoundedVec::<#bounded_types, #bounded_sizes>::truncate_from(#bounded_names);)* - self.instructions.push(#name::::#variant_name { #(#arg_names),* #comma_in_the_middle #(#bounded_names),* }); - self - } + #(#docs)* + pub fn #unpaid_execution_method_name(self, #(#arg_names: impl Into<#arg_types>),*) -> XcmBuilder { + let mut new_instructions = self.instructions; + #(let #arg_names = #arg_names.into();)* + new_instructions.push(#name::::#unpaid_execution_ident { #(#arg_names),* }); + XcmBuilder { + instructions: new_instructions, + state: core::marker::PhantomData, } } - }, - }; - Ok(quote! { - #(#docs)* - #method + } }) } @@ -398,40 +395,3 @@ fn get_doc_comments(variant: &Variant) -> Vec { .map(|doc| syn::parse_str::(&format!("/// {}", doc)).unwrap()) .collect() } - -fn extract_generic_argument<'a>( - field_ty: &'a Type, - index: usize, - expected_msg: &str, -) -> Result<&'a Ident> { - if let Type::Path(type_path) = field_ty { - if let Some(segment) = type_path.path.segments.last() { - if let PathArguments::AngleBracketed(angle_brackets) = &segment.arguments { - let args: Vec<_> = angle_brackets.args.iter().collect(); - if let Some(GenericArgument::Type(Type::Path(TypePath { path, .. }))) = - args.get(index) - { - return path.get_ident().ok_or_else(|| { - Error::new_spanned( - path, - format!("Expected an identifier for {}", expected_msg), - ) - }); - } - return Err(Error::new_spanned( - angle_brackets, - format!("Expected a generic argument at index {} for {}", index, expected_msg), - )); - } - return Err(Error::new_spanned( - &segment.arguments, - format!("Expected angle-bracketed arguments for {}", expected_msg), - )); - } - return Err(Error::new_spanned( - &type_path.path, - format!("Expected at least one path segment for {}", expected_msg), - )); - } - Err(Error::new_spanned(field_ty, format!("Expected a path type for {}", expected_msg))) -} diff --git a/polkadot/xcm/procedural/src/lib.rs b/polkadot/xcm/procedural/src/lib.rs index 0dd270286f69..9971fdceb69a 100644 --- a/polkadot/xcm/procedural/src/lib.rs +++ b/polkadot/xcm/procedural/src/lib.rs @@ -20,7 +20,6 @@ use proc_macro::TokenStream; use syn::{parse_macro_input, DeriveInput}; mod builder_pattern; -mod enum_variants; mod v3; mod v4; mod v5; @@ -87,11 +86,3 @@ pub fn derive_builder(input: TokenStream) -> TokenStream { .unwrap_or_else(syn::Error::into_compile_error) .into() } - -#[proc_macro_derive(NumVariants)] -pub fn derive_num_variants(input: TokenStream) -> TokenStream { - let input = parse_macro_input!(input as DeriveInput); - enum_variants::derive(input) - .unwrap_or_else(syn::Error::into_compile_error) - .into() -} diff --git a/polkadot/xcm/procedural/tests/builder_pattern.rs b/polkadot/xcm/procedural/tests/builder_pattern.rs index 3915621916d4..4202309bf3f7 100644 --- a/polkadot/xcm/procedural/tests/builder_pattern.rs +++ b/polkadot/xcm/procedural/tests/builder_pattern.rs @@ -17,7 +17,6 @@ //! Test the methods generated by the Builder derive macro. //! Tests directly on the actual Xcm struct and Instruction enum. -use frame_support::BoundedVec; use xcm::latest::prelude::*; #[test] @@ -101,61 +100,3 @@ fn default_builder_allows_clear_origin_before_buy_execution() { ]) ); } - -#[test] -fn bounded_vecs_use_vecs_and_truncate_them() { - let claimer = Location::parent(); - // We can use a vec instead of a bounded vec for specifying hints. - let xcm: Xcm<()> = Xcm::builder_unsafe() - .set_hints(vec![AssetClaimer { location: claimer.clone() }]) - .build(); - assert_eq!( - xcm, - Xcm(vec![SetHints { - hints: BoundedVec::::truncate_from(vec![AssetClaimer { - location: Location::parent() - },]), - },]) - ); - - // If we include more than the limit they'll get truncated. - let xcm: Xcm<()> = Xcm::builder_unsafe() - .set_hints(vec![ - AssetClaimer { location: claimer.clone() }, - AssetClaimer { location: Location::here() }, - ]) - .build(); - assert_eq!( - xcm, - Xcm(vec![SetHints { - hints: BoundedVec::::truncate_from(vec![AssetClaimer { - location: Location::parent() - },]), - },]) - ); - - let xcm: Xcm<()> = Xcm::builder() - .withdraw_asset((Here, 100u128)) - .set_hints(vec![AssetClaimer { location: claimer }]) - .clear_origin() - .pay_fees((Here, 10u128)) - .deposit_asset(All, [0u8; 32]) - .build(); - assert_eq!( - xcm, - Xcm(vec![ - WithdrawAsset(Asset { id: AssetId(Location::here()), fun: Fungible(100) }.into()), - SetHints { - hints: BoundedVec::::truncate_from(vec![AssetClaimer { - location: Location::parent() - }]) - }, - ClearOrigin, - PayFees { asset: Asset { id: AssetId(Location::here()), fun: Fungible(10) } }, - DepositAsset { - assets: All.into(), - beneficiary: AccountId32 { id: [0u8; 32], network: None }.into() - }, - ]) - ); -} diff --git a/polkadot/xcm/procedural/src/enum_variants.rs b/polkadot/xcm/procedural/tests/ui/builder_pattern/loads_holding_no_operands.rs similarity index 51% rename from polkadot/xcm/procedural/src/enum_variants.rs rename to polkadot/xcm/procedural/tests/ui/builder_pattern/loads_holding_no_operands.rs index f9f2d9e15675..070f0be6bacc 100644 --- a/polkadot/xcm/procedural/src/enum_variants.rs +++ b/polkadot/xcm/procedural/tests/ui/builder_pattern/loads_holding_no_operands.rs @@ -14,25 +14,19 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Simple derive macro for getting the number of variants in an enum. - -use proc_macro2::TokenStream as TokenStream2; -use quote::{format_ident, quote}; -use syn::{Data, DeriveInput, Error, Result}; - -pub fn derive(input: DeriveInput) -> Result { - let data_enum = match &input.data { - Data::Enum(data_enum) => data_enum, - _ => return Err(Error::new_spanned(&input, "Expected an enum.")), - }; - let ident = format_ident!("{}NumVariants", input.ident); - let number_of_variants: usize = data_enum.variants.iter().count(); - Ok(quote! { - pub struct #ident; - impl ::frame_support::traits::Get for #ident { - fn get() -> u32 { - #number_of_variants as u32 - } - } - }) +//! Test error when an instruction that loads the holding register doesn't take operands. + +use xcm_procedural::Builder; + +struct Xcm(pub Vec>); + +#[derive(Builder)] +enum Instruction { + #[builder(loads_holding)] + WithdrawAsset, + BuyExecution { fees: u128 }, + UnpaidExecution { weight_limit: (u32, u32) }, + Transact { call: Call }, } + +fn main() {} diff --git a/polkadot/xcm/procedural/tests/ui/builder_pattern/loads_holding_no_operands.stderr b/polkadot/xcm/procedural/tests/ui/builder_pattern/loads_holding_no_operands.stderr new file mode 100644 index 000000000000..0358a35ad3dd --- /dev/null +++ b/polkadot/xcm/procedural/tests/ui/builder_pattern/loads_holding_no_operands.stderr @@ -0,0 +1,6 @@ +error: Instructions that load the holding register should take operands + --> tests/ui/builder_pattern/loads_holding_no_operands.rs:25:5 + | +25 | / #[builder(loads_holding)] +26 | | WithdrawAsset, + | |_________________^ diff --git a/polkadot/xcm/procedural/tests/ui/builder_pattern/unexpected_attribute.stderr b/polkadot/xcm/procedural/tests/ui/builder_pattern/unexpected_attribute.stderr deleted file mode 100644 index c4d711e0d455..000000000000 --- a/polkadot/xcm/procedural/tests/ui/builder_pattern/unexpected_attribute.stderr +++ /dev/null @@ -1,5 +0,0 @@ -error: Expected `builder(loads_holding)` or `builder(pays_fees)` - --> tests/ui/builder_pattern/unexpected_attribute.rs:25:5 - | -25 | #[builder(funds_holding)] - | ^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/polkadot/xcm/procedural/tests/enum_variants.rs b/polkadot/xcm/procedural/tests/ui/builder_pattern/unpaid_execution_named_fields.rs similarity index 70% rename from polkadot/xcm/procedural/tests/enum_variants.rs rename to polkadot/xcm/procedural/tests/ui/builder_pattern/unpaid_execution_named_fields.rs index 4a5362c1579a..bb98d603fd91 100644 --- a/polkadot/xcm/procedural/tests/enum_variants.rs +++ b/polkadot/xcm/procedural/tests/ui/builder_pattern/unpaid_execution_named_fields.rs @@ -14,20 +14,17 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Test the struct generated by the `NumVariants` derive macro. +//! Test error when the `BuyExecution` instruction doesn't take named fields. -use frame_support::traits::Get; -use xcm_procedural::NumVariants; +use xcm_procedural::Builder; -#[allow(dead_code)] -#[derive(NumVariants)] -enum SomeEnum { - Variant1, - Variant2, - Variant3, -} +struct Xcm(pub Vec>); -#[test] -fn num_variants_works() { - assert_eq!(SomeEnumNumVariants::get(), 3); +#[derive(Builder)] +enum Instruction { + BuyExecution { fees: u128 }, + UnpaidExecution(u32, u32), + Transact { call: Call }, } + +fn main() {} diff --git a/polkadot/xcm/procedural/tests/ui/builder_pattern/unpaid_execution_named_fields.stderr b/polkadot/xcm/procedural/tests/ui/builder_pattern/unpaid_execution_named_fields.stderr new file mode 100644 index 000000000000..0a3c0a40a33b --- /dev/null +++ b/polkadot/xcm/procedural/tests/ui/builder_pattern/unpaid_execution_named_fields.stderr @@ -0,0 +1,5 @@ +error: UnpaidExecution should have named fields + --> tests/ui/builder_pattern/unpaid_execution_named_fields.rs:26:5 + | +26 | UnpaidExecution(u32, u32), + | ^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/polkadot/xcm/src/v3/traits.rs b/polkadot/xcm/src/v3/traits.rs index cbf85b454cc6..1c8620708922 100644 --- a/polkadot/xcm/src/v3/traits.rs +++ b/polkadot/xcm/src/v3/traits.rs @@ -547,13 +547,13 @@ impl SendXcm for Tuple { } /// Convenience function for using a `SendXcm` implementation. Just interprets the `dest` and wraps -/// both in `Some` before passing them as mutable references into `T::send_xcm`. +/// both in `Some` before passing them as as mutable references into `T::send_xcm`. pub fn validate_send(dest: MultiLocation, msg: Xcm<()>) -> SendResult { T::validate(&mut Some(dest), &mut Some(msg)) } /// Convenience function for using a `SendXcm` implementation. Just interprets the `dest` and wraps -/// both in `Some` before passing them as mutable references into `T::send_xcm`. +/// both in `Some` before passing them as as mutable references into `T::send_xcm`. /// /// Returns either `Ok` with the price of the delivery, or `Err` with the reason why the message /// could not be sent. diff --git a/polkadot/xcm/src/v4/mod.rs b/polkadot/xcm/src/v4/mod.rs index a0ce551b7608..545b75a99ff3 100644 --- a/polkadot/xcm/src/v4/mod.rs +++ b/polkadot/xcm/src/v4/mod.rs @@ -1314,22 +1314,8 @@ impl TryFrom> for Instructi HrmpChannelAccepted { recipient } => Self::HrmpChannelAccepted { recipient }, HrmpChannelClosing { initiator, sender, recipient } => Self::HrmpChannelClosing { initiator, sender, recipient }, - Transact { origin_kind, mut call, fallback_max_weight } => { - // We first try to decode the call, if we can't, we use the fallback weight, - // if there's no fallback, we just return `Weight::MAX`. - let require_weight_at_most = match call.take_decoded() { - Ok(decoded) => decoded.get_dispatch_info().call_weight, - Err(error) => { - let fallback_weight = fallback_max_weight.unwrap_or(Weight::MAX); - log::debug!( - target: "xcm::versions::v5Tov4", - "Couldn't decode call in Transact: {:?}, using fallback weight: {:?}", - error, - fallback_weight, - ); - fallback_weight - }, - }; + Transact { origin_kind, mut call } => { + let require_weight_at_most = call.take_decoded()?.get_dispatch_info().call_weight; Self::Transact { origin_kind, require_weight_at_most, call: call.into() } }, ReportError(response_info) => Self::ReportError(QueryResponseInfo { @@ -1435,11 +1421,8 @@ impl TryFrom> for Instructi weight_limit, check_origin: check_origin.map(|origin| origin.try_into()).transpose()?, }, - InitiateTransfer { .. } | - PayFees { .. } | - SetHints { .. } | - ExecuteWithOrigin { .. } => { - log::debug!(target: "xcm::versions::v5tov4", "`{new_instruction:?}` not supported by v4"); + InitiateTransfer { .. } | PayFees { .. } | SetAssetClaimer { .. } => { + log::debug!(target: "xcm::v5tov4", "`{new_instruction:?}` not supported by v4"); return Err(()); }, }) diff --git a/polkadot/xcm/src/v4/traits.rs b/polkadot/xcm/src/v4/traits.rs index 178093d27177..f32b26fb163d 100644 --- a/polkadot/xcm/src/v4/traits.rs +++ b/polkadot/xcm/src/v4/traits.rs @@ -289,13 +289,13 @@ impl SendXcm for Tuple { } /// Convenience function for using a `SendXcm` implementation. Just interprets the `dest` and wraps -/// both in `Some` before passing them as mutable references into `T::send_xcm`. +/// both in `Some` before passing them as as mutable references into `T::send_xcm`. pub fn validate_send(dest: Location, msg: Xcm<()>) -> SendResult { T::validate(&mut Some(dest), &mut Some(msg)) } /// Convenience function for using a `SendXcm` implementation. Just interprets the `dest` and wraps -/// both in `Some` before passing them as mutable references into `T::send_xcm`. +/// both in `Some` before passing them as as mutable references into `T::send_xcm`. /// /// Returns either `Ok` with the price of the delivery, or `Err` with the reason why the message /// could not be sent. diff --git a/polkadot/xcm/src/v5/junction.rs b/polkadot/xcm/src/v5/junction.rs index d86a762fcf44..952b61cd9ffe 100644 --- a/polkadot/xcm/src/v5/junction.rs +++ b/polkadot/xcm/src/v5/junction.rs @@ -143,20 +143,16 @@ pub enum NetworkId { /// The Kusama canary-net Relay-chain. Kusama, /// An Ethereum network specified by its chain ID. - #[codec(index = 7)] Ethereum { /// The EIP-155 chain ID. #[codec(compact)] chain_id: u64, }, /// The Bitcoin network, including hard-forks supported by Bitcoin Core development team. - #[codec(index = 8)] BitcoinCore, /// The Bitcoin network, including hard-forks supported by Bitcoin Cash developers. - #[codec(index = 9)] BitcoinCash, /// The Polkadot Bulletin chain. - #[codec(index = 10)] PolkadotBulletin, } diff --git a/polkadot/xcm/src/v5/mod.rs b/polkadot/xcm/src/v5/mod.rs index 21845d07529e..d455fa48adae 100644 --- a/polkadot/xcm/src/v5/mod.rs +++ b/polkadot/xcm/src/v5/mod.rs @@ -196,8 +196,6 @@ pub mod prelude { AssetInstance::{self, *}, Assets, BodyId, BodyPart, Error as XcmError, ExecuteXcm, Fungibility::{self, *}, - Hint::{self, *}, - HintNumVariants, Instruction::*, InteriorLocation, Junction::{self, *}, @@ -495,21 +493,13 @@ pub enum Instruction { /// /// - `origin_kind`: The means of expressing the message origin as a dispatch origin. /// - `call`: The encoded transaction to be applied. - /// - `fallback_max_weight`: Used for compatibility with previous versions. Corresponds to the - /// `require_weight_at_most` parameter in previous versions. If you don't care about - /// compatibility you can just put `None`. WARNING: If you do, your XCM might not work with - /// older versions. Make sure to dry-run and validate. /// /// Safety: No concerns. /// /// Kind: *Command*. /// /// Errors: - Transact { - origin_kind: OriginKind, - fallback_max_weight: Option, - call: DoubleEncoded, - }, + Transact { origin_kind: OriginKind, call: DoubleEncoded }, /// A message to notify about a new incoming HRMP channel. This message is meant to be sent by /// the relay-chain to a para. @@ -749,6 +739,15 @@ pub enum Instruction { /// Errors: None. ClearError, + /// Set asset claimer for all the trapped assets during the execution. + /// + /// - `location`: The claimer of any assets potentially trapped during the execution of current + /// XCM. It can be an arbitrary location, not necessarily the caller or origin. + /// + /// Kind: *Command* + /// + /// Errors: None. + SetAssetClaimer { location: Location }, /// Create some assets which are being held on behalf of the origin. /// /// - `assets`: The assets which are to be claimed. This must match exactly with the assets @@ -1110,44 +1109,6 @@ pub enum Instruction { assets: Vec, remote_xcm: Xcm<()>, }, - - /// Executes inner `xcm` with origin set to the provided `descendant_origin`. Once the inner - /// `xcm` is executed, the original origin (the one active for this instruction) is restored. - /// - /// Parameters: - /// - `descendant_origin`: The origin that will be used during the execution of the inner - /// `xcm`. If set to `None`, the inner `xcm` is executed with no origin. If set to `Some(o)`, - /// the inner `xcm` is executed as if there was a `DescendOrigin(o)` executed before it, and - /// runs the inner xcm with origin: `original_origin.append_with(o)`. - /// - `xcm`: Inner instructions that will be executed with the origin modified according to - /// `descendant_origin`. - /// - /// Safety: No concerns. - /// - /// Kind: *Command* - /// - /// Errors: - /// - `BadOrigin` - ExecuteWithOrigin { descendant_origin: Option, xcm: Xcm }, - - /// Set hints for XCM execution. - /// - /// These hints change the behaviour of the XCM program they are present in. - /// - /// Parameters: - /// - /// - `hints`: A bounded vector of `ExecutionHint`, specifying the different hints that will - /// be activated. - SetHints { hints: BoundedVec }, -} - -#[derive(Encode, Decode, TypeInfo, Debug, PartialEq, Eq, Clone, xcm_procedural::NumVariants)] -pub enum Hint { - /// Set asset claimer for all the trapped assets during the execution. - /// - /// - `location`: The claimer of any assets potentially trapped during the execution of current - /// XCM. It can be an arbitrary location, not necessarily the caller or origin. - AssetClaimer { location: Location }, } impl Xcm { @@ -1179,8 +1140,7 @@ impl Instruction { HrmpChannelAccepted { recipient } => HrmpChannelAccepted { recipient }, HrmpChannelClosing { initiator, sender, recipient } => HrmpChannelClosing { initiator, sender, recipient }, - Transact { origin_kind, call, fallback_max_weight } => - Transact { origin_kind, call: call.into(), fallback_max_weight }, + Transact { origin_kind, call } => Transact { origin_kind, call: call.into() }, ReportError(response_info) => ReportError(response_info), DepositAsset { assets, beneficiary } => DepositAsset { assets, beneficiary }, DepositReserveAsset { assets, dest, xcm } => DepositReserveAsset { assets, dest, xcm }, @@ -1196,7 +1156,7 @@ impl Instruction { SetErrorHandler(xcm) => SetErrorHandler(xcm.into()), SetAppendix(xcm) => SetAppendix(xcm.into()), ClearError => ClearError, - SetHints { hints } => SetHints { hints }, + SetAssetClaimer { location } => SetAssetClaimer { location }, ClaimAsset { assets, ticket } => ClaimAsset { assets, ticket }, Trap(code) => Trap(code), SubscribeVersion { query_id, max_response_weight } => @@ -1229,8 +1189,6 @@ impl Instruction { PayFees { asset } => PayFees { asset }, InitiateTransfer { destination, remote_fees, preserve_origin, assets, remote_xcm } => InitiateTransfer { destination, remote_fees, preserve_origin, assets, remote_xcm }, - ExecuteWithOrigin { descendant_origin, xcm } => - ExecuteWithOrigin { descendant_origin, xcm: xcm.into() }, } } } @@ -1248,8 +1206,7 @@ impl> GetWeight for Instruction { TransferAsset { assets, beneficiary } => W::transfer_asset(assets, beneficiary), TransferReserveAsset { assets, dest, xcm } => W::transfer_reserve_asset(&assets, dest, xcm), - Transact { origin_kind, fallback_max_weight, call } => - W::transact(origin_kind, fallback_max_weight, call), + Transact { origin_kind, call } => W::transact(origin_kind, call), HrmpNewChannelOpenRequest { sender, max_message_size, max_capacity } => W::hrmp_new_channel_open_request(sender, max_message_size, max_capacity), HrmpChannelAccepted { recipient } => W::hrmp_channel_accepted(recipient), @@ -1271,7 +1228,7 @@ impl> GetWeight for Instruction { SetErrorHandler(xcm) => W::set_error_handler(xcm), SetAppendix(xcm) => W::set_appendix(xcm), ClearError => W::clear_error(), - SetHints { hints } => W::set_hints(hints), + SetAssetClaimer { location } => W::set_asset_claimer(location), ClaimAsset { assets, ticket } => W::claim_asset(assets, ticket), Trap(code) => W::trap(code), SubscribeVersion { query_id, max_response_weight } => @@ -1304,8 +1261,6 @@ impl> GetWeight for Instruction { PayFees { asset } => W::pay_fees(asset), InitiateTransfer { destination, remote_fees, preserve_origin, assets, remote_xcm } => W::initiate_transfer(destination, remote_fees, preserve_origin, assets, remote_xcm), - ExecuteWithOrigin { descendant_origin, xcm } => - W::execute_with_origin(descendant_origin, xcm), } } } @@ -1365,11 +1320,8 @@ impl TryFrom> for Instruction { HrmpChannelAccepted { recipient } => Self::HrmpChannelAccepted { recipient }, HrmpChannelClosing { initiator, sender, recipient } => Self::HrmpChannelClosing { initiator, sender, recipient }, - Transact { origin_kind, require_weight_at_most, call } => Self::Transact { - origin_kind, - call: call.into(), - fallback_max_weight: Some(require_weight_at_most), - }, + Transact { origin_kind, require_weight_at_most: _, call } => + Self::Transact { origin_kind, call: call.into() }, ReportError(response_info) => Self::ReportError(QueryResponseInfo { query_id: response_info.query_id, destination: response_info.destination.try_into().map_err(|_| ())?, @@ -1602,59 +1554,6 @@ mod tests { assert_eq!(new_xcm, xcm); } - #[test] - fn transact_roundtrip_works() { - // We can convert as long as there's a fallback. - let xcm = Xcm::<()>(vec![ - WithdrawAsset((Here, 1u128).into()), - Transact { - origin_kind: OriginKind::SovereignAccount, - call: vec![200, 200, 200].into(), - fallback_max_weight: Some(Weight::from_parts(1_000_000, 1_024)), - }, - ]); - let old_xcm = OldXcm::<()>(vec![ - OldInstruction::WithdrawAsset((OldHere, 1u128).into()), - OldInstruction::Transact { - origin_kind: OriginKind::SovereignAccount, - call: vec![200, 200, 200].into(), - require_weight_at_most: Weight::from_parts(1_000_000, 1_024), - }, - ]); - assert_eq!(old_xcm, OldXcm::<()>::try_from(xcm.clone()).unwrap()); - let new_xcm: Xcm<()> = old_xcm.try_into().unwrap(); - assert_eq!(new_xcm, xcm); - - // If we have no fallback the resulting message won't know the weight. - let xcm_without_fallback = Xcm::<()>(vec![ - WithdrawAsset((Here, 1u128).into()), - Transact { - origin_kind: OriginKind::SovereignAccount, - call: vec![200, 200, 200].into(), - fallback_max_weight: None, - }, - ]); - let old_xcm = OldXcm::<()>(vec![ - OldInstruction::WithdrawAsset((OldHere, 1u128).into()), - OldInstruction::Transact { - origin_kind: OriginKind::SovereignAccount, - call: vec![200, 200, 200].into(), - require_weight_at_most: Weight::MAX, - }, - ]); - assert_eq!(old_xcm, OldXcm::<()>::try_from(xcm_without_fallback.clone()).unwrap()); - let new_xcm: Xcm<()> = old_xcm.try_into().unwrap(); - let xcm_with_max_weight_fallback = Xcm::<()>(vec![ - WithdrawAsset((Here, 1u128).into()), - Transact { - origin_kind: OriginKind::SovereignAccount, - call: vec![200, 200, 200].into(), - fallback_max_weight: Some(Weight::MAX), - }, - ]); - assert_eq!(new_xcm, xcm_with_max_weight_fallback); - } - #[test] fn decoding_respects_limit() { let max_xcm = Xcm::<()>(vec![ClearOrigin; MAX_INSTRUCTIONS_TO_DECODE as usize]); diff --git a/polkadot/xcm/src/v5/traits.rs b/polkadot/xcm/src/v5/traits.rs index 79d328561428..1f5041ca8d84 100644 --- a/polkadot/xcm/src/v5/traits.rs +++ b/polkadot/xcm/src/v5/traits.rs @@ -428,7 +428,6 @@ pub type SendResult = result::Result<(T, Assets), SendError>; /// let message = Xcm(vec![Instruction::Transact { /// origin_kind: OriginKind::Superuser, /// call: call.into(), -/// fallback_max_weight: None, /// }]); /// let message_hash = message.using_encoded(sp_io::hashing::blake2_256); /// @@ -460,10 +459,6 @@ pub trait SendXcm { /// Actually carry out the delivery operation for a previously validated message sending. fn deliver(ticket: Self::Ticket) -> result::Result; - - /// Ensure `[Self::delivery]` is successful for the given `location` when called in benchmarks. - #[cfg(feature = "runtime-benchmarks")] - fn ensure_successful_delivery(_location: Option) {} } #[impl_trait_for_tuples::impl_for_tuples(30)] @@ -504,23 +499,16 @@ impl SendXcm for Tuple { )* ); Err(SendError::Unroutable) } - - #[cfg(feature = "runtime-benchmarks")] - fn ensure_successful_delivery(location: Option) { - for_tuples!( #( - return Tuple::ensure_successful_delivery(location.clone()); - )* ); - } } /// Convenience function for using a `SendXcm` implementation. Just interprets the `dest` and wraps -/// both in `Some` before passing them as mutable references into `T::send_xcm`. +/// both in `Some` before passing them as as mutable references into `T::send_xcm`. pub fn validate_send(dest: Location, msg: Xcm<()>) -> SendResult { T::validate(&mut Some(dest), &mut Some(msg)) } /// Convenience function for using a `SendXcm` implementation. Just interprets the `dest` and wraps -/// both in `Some` before passing them as mutable references into `T::send_xcm`. +/// both in `Some` before passing them as as mutable references into `T::send_xcm`. /// /// Returns either `Ok` with the price of the delivery, or `Err` with the reason why the message /// could not be sent. diff --git a/polkadot/xcm/xcm-builder/Cargo.toml b/polkadot/xcm/xcm-builder/Cargo.toml index f75c984c068e..eaa115740f3e 100644 --- a/polkadot/xcm/xcm-builder/Cargo.toml +++ b/polkadot/xcm/xcm-builder/Cargo.toml @@ -5,42 +5,40 @@ authors.workspace = true edition.workspace = true license.workspace = true version = "7.0.0" -homepage.workspace = true -repository.workspace = true [lints] workspace = true [dependencies] -codec = { features = ["derive"], workspace = true } -frame-support = { workspace = true } -frame-system = { workspace = true } impl-trait-for-tuples = { workspace = true } -log = { workspace = true } -pallet-asset-conversion = { workspace = true } -pallet-transaction-payment = { workspace = true } +codec = { features = ["derive"], workspace = true } scale-info = { features = ["derive"], workspace = true } +xcm = { workspace = true } +xcm-executor = { workspace = true } sp-arithmetic = { workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } sp-weights = { workspace = true } -xcm = { workspace = true } -xcm-executor = { workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +pallet-transaction-payment = { workspace = true } +pallet-asset-conversion = { workspace = true } +log = { workspace = true } # Polkadot dependencies polkadot-parachain-primitives = { workspace = true } [dev-dependencies] -assert_matches = { workspace = true } -pallet-assets = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +primitive-types = { features = ["codec", "num-traits", "scale-info"], workspace = true } pallet-balances = { workspace = true, default-features = true } -pallet-salary = { workspace = true, default-features = true } pallet-xcm = { workspace = true, default-features = true } +pallet-salary = { workspace = true, default-features = true } +pallet-assets = { workspace = true, default-features = true } polkadot-primitives = { workspace = true, default-features = true } polkadot-runtime-parachains = { workspace = true, default-features = true } +assert_matches = { workspace = true } polkadot-test-runtime = { workspace = true } -primitive-types = { features = ["codec", "num-traits", "scale-info"], workspace = true } -sp-core = { workspace = true, default-features = true } [features] default = ["std"] @@ -59,7 +57,6 @@ runtime-benchmarks = [ "polkadot-test-runtime/runtime-benchmarks", "sp-runtime/runtime-benchmarks", "xcm-executor/runtime-benchmarks", - "xcm/runtime-benchmarks", ] std = [ "codec/std", diff --git a/polkadot/xcm/xcm-builder/src/barriers.rs b/polkadot/xcm/xcm-builder/src/barriers.rs index adba9a3ef79f..56a8493ef0ab 100644 --- a/polkadot/xcm/xcm-builder/src/barriers.rs +++ b/polkadot/xcm/xcm-builder/src/barriers.rs @@ -95,8 +95,7 @@ impl> ShouldExecute for AllowTopLevelPaidExecutionFrom })? .skip_inst_while(|inst| { matches!(inst, ClearOrigin | AliasOrigin(..)) || - matches!(inst, DescendOrigin(child) if child != &Here) || - matches!(inst, SetHints { .. }) + matches!(inst, DescendOrigin(child) if child != &Here) })? .match_next_inst(|inst| match inst { BuyExecution { weight_limit: Limited(ref mut weight), .. } diff --git a/polkadot/xcm/xcm-builder/src/pay.rs b/polkadot/xcm/xcm-builder/src/pay.rs index 0093051290b7..978c6870cdaf 100644 --- a/polkadot/xcm/xcm-builder/src/pay.rs +++ b/polkadot/xcm/xcm-builder/src/pay.rs @@ -70,8 +70,8 @@ impl< Router: SendXcm, Querier: QueryHandler, Timeout: Get, - Beneficiary: Clone + core::fmt::Debug, - AssetKind: core::fmt::Debug, + Beneficiary: Clone, + AssetKind, AssetKindToLocatableAsset: TryConvert, BeneficiaryRefToLocation: for<'a> TryConvert<&'a Beneficiary, Location>, > Pay @@ -144,9 +144,10 @@ impl< } #[cfg(feature = "runtime-benchmarks")] - fn ensure_successful(_: &Self::Beneficiary, asset_kind: Self::AssetKind, _: Self::Balance) { - let locatable = AssetKindToLocatableAsset::try_convert(asset_kind).unwrap(); - Router::ensure_successful_delivery(Some(locatable.location)); + fn ensure_successful(_: &Self::Beneficiary, _: Self::AssetKind, _: Self::Balance) { + // We cannot generally guarantee this will go through successfully since we don't have any + // control over the XCM transport layers. We just assume that the benchmark environment + // will be sending it somewhere sensible. } #[cfg(feature = "runtime-benchmarks")] diff --git a/polkadot/xcm/xcm-builder/src/process_xcm_message.rs b/polkadot/xcm/xcm-builder/src/process_xcm_message.rs index 67c05c116e9d..8dafbf66adf0 100644 --- a/polkadot/xcm/xcm-builder/src/process_xcm_message.rs +++ b/polkadot/xcm/xcm-builder/src/process_xcm_message.rs @@ -58,7 +58,7 @@ impl< let message = Xcm::::try_from(versioned_message).map_err(|_| { log::trace!( target: LOG_TARGET, - "Failed to convert `VersionedXcm` into `xcm::prelude::Xcm`!", + "Failed to convert `VersionedXcm` into `XcmV3`.", ); ProcessMessageError::Unsupported diff --git a/polkadot/xcm/xcm-builder/src/routing.rs b/polkadot/xcm/xcm-builder/src/routing.rs index 5b0d0a5f9835..fc2de89d2128 100644 --- a/polkadot/xcm/xcm-builder/src/routing.rs +++ b/polkadot/xcm/xcm-builder/src/routing.rs @@ -60,11 +60,6 @@ impl SendXcm for WithUniqueTopic { Inner::deliver(ticket)?; Ok(unique_id) } - - #[cfg(feature = "runtime-benchmarks")] - fn ensure_successful_delivery(location: Option) { - Inner::ensure_successful_delivery(location); - } } impl InspectMessageQueues for WithUniqueTopic { fn clear_messages() { @@ -119,11 +114,6 @@ impl SendXcm for WithTopicSource) { - Inner::ensure_successful_delivery(location); - } } /// Trait for a type which ensures all requirements for successful delivery with XCM transport @@ -221,9 +211,4 @@ impl SendXcm for EnsureDecodableXcm { fn deliver(ticket: Self::Ticket) -> Result { Inner::deliver(ticket) } - - #[cfg(feature = "runtime-benchmarks")] - fn ensure_successful_delivery(location: Option) { - Inner::ensure_successful_delivery(location); - } } diff --git a/polkadot/xcm/xcm-builder/src/tests/barriers.rs b/polkadot/xcm/xcm-builder/src/tests/barriers.rs index d8805274d3a5..cd2b6db66efc 100644 --- a/polkadot/xcm/xcm-builder/src/tests/barriers.rs +++ b/polkadot/xcm/xcm-builder/src/tests/barriers.rs @@ -333,26 +333,6 @@ fn allow_paid_should_deprivilege_origin() { assert_eq!(r, Err(ProcessMessageError::Overweight(Weight::from_parts(30, 30)))); } -#[test] -fn allow_paid_should_allow_hints() { - AllowPaidFrom::set(vec![Parent.into()]); - let fees = (Parent, 1).into(); - - let mut paying_message_with_hints = Xcm::<()>(vec![ - ReserveAssetDeposited((Parent, 100).into()), - SetHints { hints: vec![AssetClaimer { location: Location::here() }].try_into().unwrap() }, - BuyExecution { fees, weight_limit: Limited(Weight::from_parts(30, 30)) }, - DepositAsset { assets: AllCounted(1).into(), beneficiary: Here.into() }, - ]); - let r = AllowTopLevelPaidExecutionFrom::>::should_execute( - &Parent.into(), - paying_message_with_hints.inner_mut(), - Weight::from_parts(30, 30), - &mut props(Weight::zero()), - ); - assert_eq!(r, Ok(())); -} - #[test] fn suspension_should_work() { TestSuspender::set_suspended(true); diff --git a/polkadot/xcm/xcm-builder/src/tests/pay/pay.rs b/polkadot/xcm/xcm-builder/src/tests/pay/pay.rs index b4718edc6c98..062faee2abd9 100644 --- a/polkadot/xcm/xcm-builder/src/tests/pay/pay.rs +++ b/polkadot/xcm/xcm-builder/src/tests/pay/pay.rs @@ -22,7 +22,7 @@ use frame_support::{assert_ok, traits::tokens::Pay}; /// Type representing both a location and an asset that is held at that location. /// The id of the held asset is relative to the location where it is being held. -#[derive(Encode, Decode, Clone, PartialEq, Eq, Debug)] +#[derive(Encode, Decode, Clone, PartialEq, Eq)] pub struct AssetKind { destination: Location, asset_id: AssetId, diff --git a/polkadot/xcm/xcm-builder/src/tests/transacting.rs b/polkadot/xcm/xcm-builder/src/tests/transacting.rs index ba932beaeb3d..8963e7147fdc 100644 --- a/polkadot/xcm/xcm-builder/src/tests/transacting.rs +++ b/polkadot/xcm/xcm-builder/src/tests/transacting.rs @@ -23,7 +23,6 @@ fn transacting_should_work() { let message = Xcm::(vec![Transact { origin_kind: OriginKind::Native, call: TestCall::Any(Weight::from_parts(50, 50), None).encode().into(), - fallback_max_weight: None, }]); let mut hash = fake_message_hash(&message); let weight_limit = Weight::from_parts(60, 60); @@ -44,7 +43,6 @@ fn transacting_should_respect_max_weight_requirement() { let message = Xcm::(vec![Transact { origin_kind: OriginKind::Native, call: TestCall::Any(Weight::from_parts(50, 50), None).encode().into(), - fallback_max_weight: None, }]); let mut hash = fake_message_hash(&message); let weight_limit = Weight::from_parts(60, 60); @@ -67,7 +65,6 @@ fn transacting_should_refund_weight() { call: TestCall::Any(Weight::from_parts(50, 50), Some(Weight::from_parts(30, 30))) .encode() .into(), - fallback_max_weight: None, }]); let mut hash = fake_message_hash(&message); let weight_limit = Weight::from_parts(60, 60); @@ -99,7 +96,6 @@ fn paid_transacting_should_refund_payment_for_unused_weight() { call: TestCall::Any(Weight::from_parts(50, 50), Some(Weight::from_parts(10, 10))) .encode() .into(), - fallback_max_weight: None, }, RefundSurplus, DepositAsset { assets: AllCounted(1).into(), beneficiary: one }, @@ -128,7 +124,6 @@ fn report_successful_transact_status_should_work() { Transact { origin_kind: OriginKind::Native, call: TestCall::Any(Weight::from_parts(50, 50), None).encode().into(), - fallback_max_weight: None, }, ReportTransactStatus(QueryResponseInfo { destination: Parent.into(), @@ -164,7 +159,6 @@ fn report_failed_transact_status_should_work() { Transact { origin_kind: OriginKind::Native, call: TestCall::OnlyRoot(Weight::from_parts(50, 50), None).encode().into(), - fallback_max_weight: None, }, ReportTransactStatus(QueryResponseInfo { destination: Parent.into(), @@ -200,7 +194,6 @@ fn expect_successful_transact_status_should_work() { Transact { origin_kind: OriginKind::Native, call: TestCall::Any(Weight::from_parts(50, 50), None).encode().into(), - fallback_max_weight: None, }, ExpectTransactStatus(MaybeErrorCode::Success), ]); @@ -219,7 +212,6 @@ fn expect_successful_transact_status_should_work() { Transact { origin_kind: OriginKind::Native, call: TestCall::OnlyRoot(Weight::from_parts(50, 50), None).encode().into(), - fallback_max_weight: None, }, ExpectTransactStatus(MaybeErrorCode::Success), ]); @@ -246,7 +238,6 @@ fn expect_failed_transact_status_should_work() { Transact { origin_kind: OriginKind::Native, call: TestCall::OnlyRoot(Weight::from_parts(50, 50), None).encode().into(), - fallback_max_weight: None, }, ExpectTransactStatus(vec![2].into()), ]); @@ -265,7 +256,6 @@ fn expect_failed_transact_status_should_work() { Transact { origin_kind: OriginKind::Native, call: TestCall::Any(Weight::from_parts(50, 50), None).encode().into(), - fallback_max_weight: None, }, ExpectTransactStatus(vec![2].into()), ]); @@ -292,7 +282,6 @@ fn clear_transact_status_should_work() { Transact { origin_kind: OriginKind::Native, call: TestCall::OnlyRoot(Weight::from_parts(50, 50), None).encode().into(), - fallback_max_weight: None, }, ClearTransactStatus, ReportTransactStatus(QueryResponseInfo { diff --git a/polkadot/xcm/xcm-builder/src/universal_exports.rs b/polkadot/xcm/xcm-builder/src/universal_exports.rs index 6b3c3adf737d..5c754f01ec0a 100644 --- a/polkadot/xcm/xcm-builder/src/universal_exports.rs +++ b/polkadot/xcm/xcm-builder/src/universal_exports.rs @@ -68,36 +68,25 @@ impl> SendXcm fn validate( dest: &mut Option, - msg: &mut Option>, + xcm: &mut Option>, ) -> SendResult { - // This `clone` ensures that `dest` is not consumed in any case. - let d = dest.clone().take().ok_or(MissingArgument)?; + let d = dest.take().ok_or(MissingArgument)?; let universal_source = UniversalLocation::get(); - let devolved = ensure_is_remote(universal_source.clone(), d).map_err(|_| NotApplicable)?; - let (remote_network, remote_location) = devolved; - let xcm = msg.take().ok_or(MissingArgument)?; - - validate_export::( - remote_network, - 0, - universal_source, - remote_location, - xcm.clone(), - ) - .inspect_err(|err| { - if let NotApplicable = err { - // We need to make sure that msg is not consumed in case of `NotApplicable`. - *msg = Some(xcm); - } - }) + let devolved = match ensure_is_remote(universal_source.clone(), d) { + Ok(x) => x, + Err(d) => { + *dest = Some(d); + return Err(NotApplicable) + }, + }; + let (network, destination) = devolved; + let xcm = xcm.take().ok_or(SendError::MissingArgument)?; + validate_export::(network, 0, universal_source, destination, xcm) } fn deliver(ticket: Exporter::Ticket) -> Result { Exporter::deliver(ticket) } - - #[cfg(feature = "runtime-benchmarks")] - fn ensure_successful_delivery(_: Option) {} } pub trait ExporterFor { @@ -106,7 +95,7 @@ pub trait ExporterFor { /// /// The payment is specified from the local context, not the bridge chain. This is the /// total amount to withdraw in to Holding and should cover both payment for the execution on - /// the bridge chain and payment for the use of the `ExportMessage` instruction. + /// the bridge chain as well as payment for the use of the `ExportMessage` instruction. fn exporter_for( network: &NetworkId, remote_location: &InteriorLocation, @@ -216,8 +205,7 @@ impl, msg: &mut Option>, ) -> SendResult { - // This `clone` ensures that `dest` is not consumed in any case. - let d = dest.clone().take().ok_or(MissingArgument)?; + let d = dest.clone().ok_or(MissingArgument)?; let devolved = ensure_is_remote(UniversalLocation::get(), d).map_err(|_| NotApplicable)?; let (remote_network, remote_location) = devolved; let xcm = msg.take().ok_or(MissingArgument)?; @@ -228,7 +216,7 @@ impl(bridge, message).inspect_err(|err| { - if let NotApplicable = err { - // We need to make sure that msg is not consumed in case of `NotApplicable`. - *msg = Some(xcm); - } - }) + validate_send::(bridge, message) } fn deliver(validation: Self::Ticket) -> Result { Router::deliver(validation) } - - #[cfg(feature = "runtime-benchmarks")] - fn ensure_successful_delivery(location: Option) { - Router::ensure_successful_delivery(location); - } } /// Implementation of `SendXcm` which wraps the message inside an `ExportMessage` instruction @@ -298,9 +272,9 @@ impl, msg: &mut Option>, ) -> SendResult { - // This `clone` ensures that `dest` is not consumed in any case. - let d = dest.clone().take().ok_or(MissingArgument)?; - let devolved = ensure_is_remote(UniversalLocation::get(), d).map_err(|_| NotApplicable)?; + let d = dest.as_ref().ok_or(MissingArgument)?; + let devolved = + ensure_is_remote(UniversalLocation::get(), d.clone()).map_err(|_| NotApplicable)?; let (remote_network, remote_location) = devolved; let xcm = msg.take().ok_or(MissingArgument)?; @@ -310,7 +284,7 @@ impl(bridge, message).inspect_err(|err| { - if let NotApplicable = err { - // We need to make sure that msg is not consumed in case of `NotApplicable`. - *msg = Some(xcm); - } - })?; + let (v, mut cost) = validate_send::(bridge, message)?; if let Some(bridge_payment) = maybe_payment { cost.push(bridge_payment); } @@ -369,11 +335,6 @@ impl Result { Router::deliver(ticket) } - - #[cfg(feature = "runtime-benchmarks")] - fn ensure_successful_delivery(location: Option) { - Router::ensure_successful_delivery(location); - } } impl InspectMessageQueues @@ -515,10 +476,10 @@ impl< let Location { parents, interior: mut junctions } = BridgedNetwork::get(); match junctions.take_first() { Some(GlobalConsensus(network)) => (network, parents), - _ => return Err(NotApplicable), + _ => return Err(SendError::NotApplicable), } }; - ensure!(&network == &bridged_network, NotApplicable); + ensure!(&network == &bridged_network, SendError::NotApplicable); // We don't/can't use the `channel` for this adapter. let dest = destination.take().ok_or(SendError::MissingArgument)?; @@ -535,7 +496,7 @@ impl< }, Err((dest, _)) => { *destination = Some(dest); - return Err(NotApplicable) + return Err(SendError::NotApplicable) }, }; @@ -579,10 +540,6 @@ impl< #[cfg(test)] mod tests { use super::*; - use frame_support::{ - assert_err, assert_ok, - traits::{Contains, Equals}, - }; #[test] fn ensure_is_remote_works() { @@ -607,50 +564,20 @@ mod tests { assert_eq!(x, Err((Parent, Polkadot, Parachain(1000)).into())); } - pub struct OkFor(PhantomData); - impl> SendXcm for OkFor { + pub struct OkSender; + impl SendXcm for OkSender { type Ticket = (); fn validate( - destination: &mut Option, + _destination: &mut Option, _message: &mut Option>, ) -> SendResult { - if let Some(d) = destination.as_ref() { - if Filter::contains(&d) { - return Ok(((), Assets::new())) - } - } - Err(NotApplicable) + Ok(((), Assets::new())) } fn deliver(_ticket: Self::Ticket) -> Result { Ok([0; 32]) } - - #[cfg(feature = "runtime-benchmarks")] - fn ensure_successful_delivery(_: Option) {} - } - impl> ExportXcm for OkFor { - type Ticket = (); - - fn validate( - network: NetworkId, - _: u32, - _: &mut Option, - destination: &mut Option, - _: &mut Option>, - ) -> SendResult { - if let Some(d) = destination.as_ref() { - if Filter::contains(&(network, d.clone())) { - return Ok(((), Assets::new())) - } - } - Err(NotApplicable) - } - - fn deliver(_ticket: Self::Ticket) -> Result { - Ok([1; 32]) - } } /// Generic test case asserting that dest and msg is not consumed by `validate` implementation @@ -671,168 +598,46 @@ mod tests { } #[test] - fn local_exporters_works() { + fn remote_exporters_does_not_consume_dest_or_msg_on_not_applicable() { frame_support::parameter_types! { pub Local: NetworkId = ByGenesis([0; 32]); pub UniversalLocation: InteriorLocation = [GlobalConsensus(Local::get()), Parachain(1234)].into(); pub DifferentRemote: NetworkId = ByGenesis([22; 32]); - pub RemoteDestination: Junction = Parachain(9657); - pub RoutableBridgeFilter: (NetworkId, InteriorLocation) = (DifferentRemote::get(), RemoteDestination::get().into()); + // no routers + pub BridgeTable: Vec = vec![]; } - type RoutableBridgeExporter = OkFor>; - type NotApplicableBridgeExporter = OkFor<()>; - assert_ok!(validate_export::( - DifferentRemote::get(), - 0, - UniversalLocation::get(), - RemoteDestination::get().into(), - Xcm::default() - )); - assert_err!( - validate_export::( - DifferentRemote::get(), - 0, - UniversalLocation::get(), - RemoteDestination::get().into(), - Xcm::default() - ), - NotApplicable - ); - // 1. check with local destination (should be remote) + // check with local destination (should be remote) let local_dest: Location = (Parent, Parachain(5678)).into(); assert!(ensure_is_remote(UniversalLocation::get(), local_dest.clone()).is_err()); - // UnpaidLocalExporter ensure_validate_does_not_consume_dest_or_msg::< - UnpaidLocalExporter, + UnpaidRemoteExporter, OkSender, UniversalLocation>, >(local_dest.clone(), |result| assert_eq!(Err(NotApplicable), result)); - // 2. check with not applicable from the inner router (using `NotApplicableBridgeSender`) - let remote_dest: Location = - (Parent, Parent, DifferentRemote::get(), RemoteDestination::get()).into(); - assert!(ensure_is_remote(UniversalLocation::get(), remote_dest.clone()).is_ok()); - - // UnpaidLocalExporter - ensure_validate_does_not_consume_dest_or_msg::< - UnpaidLocalExporter, - >(remote_dest.clone(), |result| assert_eq!(Err(NotApplicable), result)); - - // 3. Ok - deliver - // UnpaidRemoteExporter - assert_ok!(send_xcm::>( - remote_dest, - Xcm::default() - )); - } - - #[test] - fn remote_exporters_works() { - frame_support::parameter_types! { - pub Local: NetworkId = ByGenesis([0; 32]); - pub UniversalLocation: InteriorLocation = [GlobalConsensus(Local::get()), Parachain(1234)].into(); - pub DifferentRemote: NetworkId = ByGenesis([22; 32]); - pub RoutableBridge: Location = Location::new(1, Parachain(9657)); - // not routable - pub NotApplicableBridgeTable: Vec = vec![]; - // routable - pub RoutableBridgeTable: Vec = vec![ - NetworkExportTableItem::new( - DifferentRemote::get(), - None, - RoutableBridge::get(), - None - ) - ]; - } - type RoutableBridgeSender = OkFor>; - type NotApplicableBridgeSender = OkFor<()>; - assert_ok!(validate_send::(RoutableBridge::get(), Xcm::default())); - assert_err!( - validate_send::(RoutableBridge::get(), Xcm::default()), - NotApplicable - ); - - // 1. check with local destination (should be remote) - let local_dest: Location = (Parent, Parachain(5678)).into(); - assert!(ensure_is_remote(UniversalLocation::get(), local_dest.clone()).is_err()); - - // UnpaidRemoteExporter - ensure_validate_does_not_consume_dest_or_msg::< - UnpaidRemoteExporter< - NetworkExportTable, - RoutableBridgeSender, - UniversalLocation, - >, - >(local_dest.clone(), |result| assert_eq!(Err(NotApplicable), result)); - // SovereignPaidRemoteExporter ensure_validate_does_not_consume_dest_or_msg::< SovereignPaidRemoteExporter< - NetworkExportTable, - RoutableBridgeSender, + NetworkExportTable, + OkSender, UniversalLocation, >, >(local_dest, |result| assert_eq!(Err(NotApplicable), result)); - // 2. check with not applicable destination (`NotApplicableBridgeTable`) + // check with not applicable destination let remote_dest: Location = (Parent, Parent, DifferentRemote::get()).into(); assert!(ensure_is_remote(UniversalLocation::get(), remote_dest.clone()).is_ok()); - // UnpaidRemoteExporter ensure_validate_does_not_consume_dest_or_msg::< - UnpaidRemoteExporter< - NetworkExportTable, - RoutableBridgeSender, - UniversalLocation, - >, + UnpaidRemoteExporter, OkSender, UniversalLocation>, >(remote_dest.clone(), |result| assert_eq!(Err(NotApplicable), result)); - // SovereignPaidRemoteExporter - ensure_validate_does_not_consume_dest_or_msg::< - SovereignPaidRemoteExporter< - NetworkExportTable, - RoutableBridgeSender, - UniversalLocation, - >, - >(remote_dest, |result| assert_eq!(Err(NotApplicable), result)); - - // 3. check with not applicable from the inner router (using `NotApplicableBridgeSender`) - let remote_dest: Location = (Parent, Parent, DifferentRemote::get()).into(); - assert!(ensure_is_remote(UniversalLocation::get(), remote_dest.clone()).is_ok()); - // UnpaidRemoteExporter - ensure_validate_does_not_consume_dest_or_msg::< - UnpaidRemoteExporter< - NetworkExportTable, - NotApplicableBridgeSender, - UniversalLocation, - >, - >(remote_dest.clone(), |result| assert_eq!(Err(NotApplicable), result)); - // SovereignPaidRemoteExporter ensure_validate_does_not_consume_dest_or_msg::< SovereignPaidRemoteExporter< - NetworkExportTable, - NotApplicableBridgeSender, - UniversalLocation, - >, - >(remote_dest.clone(), |result| assert_eq!(Err(NotApplicable), result)); - - // 4. Ok - deliver - // UnpaidRemoteExporter - assert_ok!(send_xcm::< - UnpaidRemoteExporter< - NetworkExportTable, - RoutableBridgeSender, + NetworkExportTable, + OkSender, UniversalLocation, >, - >(remote_dest.clone(), Xcm::default())); - // SovereignPaidRemoteExporter - assert_ok!(send_xcm::< - SovereignPaidRemoteExporter< - NetworkExportTable, - RoutableBridgeSender, - UniversalLocation, - >, - >(remote_dest, Xcm::default())); + >(remote_dest, |result| assert_eq!(Err(NotApplicable), result)); } #[test] diff --git a/polkadot/xcm/xcm-builder/src/weight.rs b/polkadot/xcm/xcm-builder/src/weight.rs index 6521121f2c94..f8c0275d0f54 100644 --- a/polkadot/xcm/xcm-builder/src/weight.rs +++ b/polkadot/xcm/xcm-builder/src/weight.rs @@ -65,8 +65,7 @@ impl, C: Decode + GetDispatchInfo, M> FixedWeightBounds ) -> Result { let instr_weight = match instruction { Transact { ref mut call, .. } => call.ensure_decoded()?.get_dispatch_info().call_weight, - SetErrorHandler(xcm) | SetAppendix(xcm) | ExecuteWithOrigin { xcm, .. } => - Self::weight_with_limit(xcm, instrs_limit)?, + SetErrorHandler(xcm) | SetAppendix(xcm) => Self::weight_with_limit(xcm, instrs_limit)?, _ => Weight::zero(), }; T::get().checked_add(&instr_weight).ok_or(()) diff --git a/polkadot/xcm/xcm-executor/Cargo.toml b/polkadot/xcm/xcm-executor/Cargo.toml index 381dca54a5fb..cc966f91fe4d 100644 --- a/polkadot/xcm/xcm-executor/Cargo.toml +++ b/polkadot/xcm/xcm-executor/Cargo.toml @@ -5,26 +5,24 @@ authors.workspace = true edition.workspace = true license.workspace = true version = "7.0.0" -homepage.workspace = true -repository.workspace = true [lints] workspace = true [dependencies] -codec = { features = ["derive"], workspace = true } -environmental = { workspace = true } -frame-benchmarking = { optional = true, workspace = true } -frame-support = { workspace = true } impl-trait-for-tuples = { workspace = true } +environmental = { workspace = true } +codec = { features = ["derive"], workspace = true } scale-info = { features = ["derive", "serde"], workspace = true } +xcm = { workspace = true } +sp-io = { workspace = true } sp-arithmetic = { workspace = true } sp-core = { workspace = true } -sp-io = { workspace = true } sp-runtime = { workspace = true } sp-weights = { workspace = true } +frame-support = { workspace = true } tracing = { workspace = true } -xcm = { workspace = true } +frame-benchmarking = { optional = true, workspace = true } [features] default = ["std"] @@ -32,7 +30,6 @@ runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", "sp-runtime/runtime-benchmarks", - "xcm/runtime-benchmarks", ] std = [ "codec/std", diff --git a/polkadot/xcm/xcm-executor/integration-tests/Cargo.toml b/polkadot/xcm/xcm-executor/integration-tests/Cargo.toml index 6c2e56669bc3..7e6bfe967b90 100644 --- a/polkadot/xcm/xcm-executor/integration-tests/Cargo.toml +++ b/polkadot/xcm/xcm-executor/integration-tests/Cargo.toml @@ -13,24 +13,21 @@ workspace = true [dependencies] codec = { workspace = true, default-features = true } frame-support = { workspace = true } -frame-system = { workspace = true, default-features = true } futures = { workspace = true } -pallet-sudo = { workspace = true, default-features = true } pallet-transaction-payment = { workspace = true, default-features = true } pallet-xcm = { workspace = true, default-features = true } -polkadot-runtime-parachains = { workspace = true, default-features = true } polkadot-test-client = { workspace = true } polkadot-test-runtime = { workspace = true } polkadot-test-service = { workspace = true } sp-consensus = { workspace = true, default-features = true } -sp-core = { workspace = true, default-features = true } sp-keyring = { workspace = true, default-features = true } sp-runtime = { workspace = true } sp-state-machine = { workspace = true, default-features = true } -sp-tracing = { workspace = true, default-features = true } xcm = { workspace = true } xcm-executor = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } [features] default = ["std"] -std = ["frame-support/std", "frame-system/std", "pallet-sudo/std", "polkadot-runtime-parachains/std", "sp-runtime/std", "xcm/std"] +std = ["frame-support/std", "sp-runtime/std", "xcm/std"] diff --git a/polkadot/xcm/xcm-executor/integration-tests/src/lib.rs b/polkadot/xcm/xcm-executor/integration-tests/src/lib.rs index dfcc3fc4187f..9b918fd7eeed 100644 --- a/polkadot/xcm/xcm-executor/integration-tests/src/lib.rs +++ b/polkadot/xcm/xcm-executor/integration-tests/src/lib.rs @@ -79,11 +79,7 @@ fn transact_recursion_limit_works() { Xcm(vec![ WithdrawAsset((Here, 1_000).into()), BuyExecution { fees: (Here, 1).into(), weight_limit: Unlimited }, - Transact { - origin_kind: OriginKind::Native, - call: call.encode().into(), - fallback_max_weight: None, - }, + Transact { origin_kind: OriginKind::Native, call: call.encode().into() }, ]) }; let mut call: Option = None; @@ -375,26 +371,6 @@ fn deposit_reserve_asset_works_for_any_xcm_sender() { let mut block_builder = client.init_polkadot_block_builder(); - // Make the para available, so that `DMP` doesn't reject the XCM because the para is unknown. - let make_para_available = - construct_extrinsic( - &client, - polkadot_test_runtime::RuntimeCall::Sudo(pallet_sudo::Call::sudo { - call: Box::new(polkadot_test_runtime::RuntimeCall::System( - frame_system::Call::set_storage { - items: vec![( - polkadot_runtime_parachains::paras::Heads::< - polkadot_test_runtime::Runtime, - >::hashed_key_for(2000u32), - vec![1, 2, 3], - )], - }, - )), - }), - sp_keyring::Sr25519Keyring::Alice, - 0, - ); - // Simulate execution of an incoming XCM message at the reserve chain let execute = construct_extrinsic( &client, @@ -403,12 +379,9 @@ fn deposit_reserve_asset_works_for_any_xcm_sender() { max_weight: Weight::from_parts(1_000_000_000, 1024 * 1024), }), sp_keyring::Sr25519Keyring::Alice, - 1, + 0, ); - block_builder - .push_polkadot_extrinsic(make_para_available) - .expect("pushes extrinsic"); block_builder.push_polkadot_extrinsic(execute).expect("pushes extrinsic"); let block = block_builder.build().expect("Finalizes the block").block; diff --git a/polkadot/xcm/xcm-executor/src/lib.rs b/polkadot/xcm/xcm-executor/src/lib.rs index d0f18aea1ab3..a823dc6fec78 100644 --- a/polkadot/xcm/xcm-executor/src/lib.rs +++ b/polkadot/xcm/xcm-executor/src/lib.rs @@ -304,17 +304,7 @@ impl XcmAssetTransfers for XcmExecutor { type AssetTransactor = Config::AssetTransactor; } -impl FeeManager for XcmExecutor { - fn is_waived(origin: Option<&Location>, r: FeeReason) -> bool { - Config::FeeManager::is_waived(origin, r) - } - - fn handle_fee(fee: Assets, context: Option<&XcmContext>, r: FeeReason) { - Config::FeeManager::handle_fee(fee, context, r) - } -} - -#[derive(Debug, PartialEq)] +#[derive(Debug)] pub struct ExecutorError { pub index: u32, pub xcm_error: XcmError, @@ -939,8 +929,7 @@ impl XcmExecutor { Ok(()) }) }, - // `fallback_max_weight` is not used in the executor, it's only for conversions. - Transact { origin_kind, mut call, .. } => { + Transact { origin_kind, mut call } => { // We assume that the Relay-chain is allowed to use transact on this parachain. let origin = self.cloned_origin().ok_or_else(|| { tracing::trace!( @@ -1042,25 +1031,19 @@ impl XcmExecutor { ); Ok(()) }, - DescendOrigin(who) => self.do_descend_origin(who), - ClearOrigin => self.do_clear_origin(), - ExecuteWithOrigin { descendant_origin, xcm } => { - let previous_origin = self.context.origin.clone(); - - // Set new temporary origin. - if let Some(who) = descendant_origin { - self.do_descend_origin(who)?; - } else { - self.do_clear_origin()?; - } - // Process instructions. - let result = self.process(xcm).map_err(|error| { - tracing::error!(target: "xcm::execute", ?error, actual_origin = ?self.context.origin, original_origin = ?previous_origin, "ExecuteWithOrigin inner xcm failure"); - error.xcm_error - }); - // Reset origin to previous one. - self.context.origin = previous_origin; - result + DescendOrigin(who) => self + .context + .origin + .as_mut() + .ok_or(XcmError::BadOrigin)? + .append_with(who) + .map_err(|e| { + tracing::error!(target: "xcm::process_instruction::descend_origin", ?e, "Failed to append junctions"); + XcmError::LocationFull + }), + ClearOrigin => { + self.context.origin = None; + Ok(()) }, ReportError(response_info) => { // Report the given result by sending a QueryResponse XCM to a previously given @@ -1087,19 +1070,18 @@ impl XcmExecutor { DepositReserveAsset { assets, dest, xcm } => { let old_holding = self.holding.clone(); let result = Config::TransactionalProcessor::process(|| { - let mut assets = self.holding.saturating_take(assets); - // When not using `PayFees`, nor `JIT_WITHDRAW`, delivery fees are paid from - // transferred assets. - let maybe_delivery_fee_from_assets = if self.fees.is_empty() && !self.fees_mode.jit_withdraw { - // Deduct and return the part of `assets` that shall be used for delivery fees. - self.take_delivery_fee_from_assets(&mut assets, &dest, FeeReason::DepositReserveAsset, &xcm)? + let maybe_delivery_fee_from_holding = if self.fees.is_empty() { + self.get_delivery_fee_from_holding(&assets, &dest, &xcm)? } else { None }; + let mut message = Vec::with_capacity(xcm.len() + 2); - tracing::trace!(target: "xcm::DepositReserveAsset", ?assets, "Assets except delivery fee"); + // now take assets to deposit (after having taken delivery fees) + let deposited = self.holding.saturating_take(assets); + tracing::trace!(target: "xcm::DepositReserveAsset", ?deposited, "Assets except delivery fee"); Self::do_reserve_deposit_assets( - assets, + deposited, &dest, &mut message, Some(&self.context), @@ -1108,7 +1090,7 @@ impl XcmExecutor { message.push(ClearOrigin); // append custom instructions message.extend(xcm.0.into_iter()); - if let Some(delivery_fee) = maybe_delivery_fee_from_assets { + if let Some(delivery_fee) = maybe_delivery_fee_from_holding { // Put back delivery_fee in holding register to be charged by XcmSender. self.holding.subsume_assets(delivery_fee); } @@ -1123,15 +1105,7 @@ impl XcmExecutor { InitiateReserveWithdraw { assets, reserve, xcm } => { let old_holding = self.holding.clone(); let result = Config::TransactionalProcessor::process(|| { - let mut assets = self.holding.saturating_take(assets); - // When not using `PayFees`, nor `JIT_WITHDRAW`, delivery fees are paid from - // transferred assets. - let maybe_delivery_fee_from_assets = if self.fees.is_empty() && !self.fees_mode.jit_withdraw { - // Deduct and return the part of `assets` that shall be used for delivery fees. - self.take_delivery_fee_from_assets(&mut assets, &reserve, FeeReason::InitiateReserveWithdraw, &xcm)? - } else { - None - }; + let assets = self.holding.saturating_take(assets); let mut message = Vec::with_capacity(xcm.len() + 2); Self::do_reserve_withdraw_assets( assets, @@ -1143,10 +1117,6 @@ impl XcmExecutor { message.push(ClearOrigin); // append custom instructions message.extend(xcm.0.into_iter()); - if let Some(delivery_fee) = maybe_delivery_fee_from_assets { - // Put back delivery_fee in holding register to be charged by XcmSender. - self.holding.subsume_assets(delivery_fee); - } self.send(reserve, Xcm(message), FeeReason::InitiateReserveWithdraw)?; Ok(()) }); @@ -1158,25 +1128,13 @@ impl XcmExecutor { InitiateTeleport { assets, dest, xcm } => { let old_holding = self.holding.clone(); let result = Config::TransactionalProcessor::process(|| { - let mut assets = self.holding.saturating_take(assets); - // When not using `PayFees`, nor `JIT_WITHDRAW`, delivery fees are paid from - // transferred assets. - let maybe_delivery_fee_from_assets = if self.fees.is_empty() && !self.fees_mode.jit_withdraw { - // Deduct and return the part of `assets` that shall be used for delivery fees. - self.take_delivery_fee_from_assets(&mut assets, &dest, FeeReason::InitiateTeleport, &xcm)? - } else { - None - }; + let assets = self.holding.saturating_take(assets); let mut message = Vec::with_capacity(xcm.len() + 2); Self::do_teleport_assets(assets, &dest, &mut message, &self.context)?; // clear origin for subsequent custom instructions message.push(ClearOrigin); // append custom instructions message.extend(xcm.0.into_iter()); - if let Some(delivery_fee) = maybe_delivery_fee_from_assets { - // Put back delivery_fee in holding register to be charged by XcmSender. - self.holding.subsume_assets(delivery_fee); - } self.send(dest.clone(), Xcm(message), FeeReason::InitiateTeleport)?; Ok(()) }); @@ -1396,14 +1354,8 @@ impl XcmExecutor { self.error = None; Ok(()) }, - SetHints { hints } => { - for hint in hints.into_iter() { - match hint { - AssetClaimer { location } => { - self.asset_claimer = Some(location) - }, - } - } + SetAssetClaimer { location } => { + self.asset_claimer = Some(location); Ok(()) }, ClaimAsset { assets, ticket } => { @@ -1681,23 +1633,6 @@ impl XcmExecutor { } } - fn do_descend_origin(&mut self, who: InteriorLocation) -> XcmResult { - self.context - .origin - .as_mut() - .ok_or(XcmError::BadOrigin)? - .append_with(who) - .map_err(|e| { - tracing::error!(target: "xcm::do_descend_origin", ?e, "Failed to append junctions"); - XcmError::LocationFull - }) - } - - fn do_clear_origin(&mut self) -> XcmResult { - self.context.origin = None; - Ok(()) - } - /// Deposit `to_deposit` assets to `beneficiary`, without giving up on the first (transient) /// error, and retrying once just in case one of the subsequently deposited assets satisfy some /// requirement. @@ -1739,48 +1674,36 @@ impl XcmExecutor { Ok(()) } - /// Take from transferred `assets` the delivery fee required to send an onward transfer message - /// to `destination`. + /// Gets the necessary delivery fee to send a reserve transfer message to `destination` from + /// holding. /// /// Will be removed once the transition from `BuyExecution` to `PayFees` is complete. - fn take_delivery_fee_from_assets( - &self, - assets: &mut AssetsInHolding, + fn get_delivery_fee_from_holding( + &mut self, + assets: &AssetFilter, destination: &Location, - reason: FeeReason, xcm: &Xcm<()>, ) -> Result, XcmError> { - let to_weigh = assets.clone(); + // we need to do this take/put cycle to solve wildcards and get exact assets to + // be weighed + let to_weigh = self.holding.saturating_take(assets.clone()); + self.holding.subsume_assets(to_weigh.clone()); let to_weigh_reanchored = Self::reanchored(to_weigh, &destination, None); - let remote_instruction = match reason { - FeeReason::DepositReserveAsset => ReserveAssetDeposited(to_weigh_reanchored), - FeeReason::InitiateReserveWithdraw => WithdrawAsset(to_weigh_reanchored), - FeeReason::InitiateTeleport => ReceiveTeleportedAsset(to_weigh_reanchored), - _ => { - tracing::debug!( - target: "xcm::take_delivery_fee_from_assets", - "Unexpected delivery fee reason", - ); - return Err(XcmError::NotHoldingFees); - }, - }; - let mut message_to_weigh = Vec::with_capacity(xcm.len() + 2); - message_to_weigh.push(remote_instruction); - message_to_weigh.push(ClearOrigin); + let mut message_to_weigh = vec![ReserveAssetDeposited(to_weigh_reanchored), ClearOrigin]; message_to_weigh.extend(xcm.0.clone().into_iter()); let (_, fee) = validate_send::(destination.clone(), Xcm(message_to_weigh))?; let maybe_delivery_fee = fee.get(0).map(|asset_needed_for_fees| { tracing::trace!( - target: "xcm::fees::take_delivery_fee_from_assets", + target: "xcm::fees::DepositReserveAsset", "Asset provided to pay for fees {:?}, asset required for delivery fees: {:?}", self.asset_used_in_buy_execution, asset_needed_for_fees, ); let asset_to_pay_for_fees = self.calculate_asset_for_delivery_fees(asset_needed_for_fees.clone()); // set aside fee to be charged by XcmSender - let delivery_fee = assets.saturating_take(asset_to_pay_for_fees.into()); - tracing::trace!(target: "xcm::fees::take_delivery_fee_from_assets", ?delivery_fee); + let delivery_fee = self.holding.saturating_take(asset_to_pay_for_fees.into()); + tracing::trace!(target: "xcm::fees::DepositReserveAsset", ?delivery_fee); delivery_fee }); Ok(maybe_delivery_fee) diff --git a/polkadot/xcm/xcm-executor/src/tests/execute_with_origin.rs b/polkadot/xcm/xcm-executor/src/tests/execute_with_origin.rs deleted file mode 100644 index daba8ae1c036..000000000000 --- a/polkadot/xcm/xcm-executor/src/tests/execute_with_origin.rs +++ /dev/null @@ -1,177 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -//! Unit tests for the `ExecuteWithOrigin` instruction. -//! -//! See the [XCM RFC](https://github.com/polkadot-fellows/xcm-format/pull/38) -//! and the [specification](https://github.com/polkadot-fellows/xcm-format/tree/8cef08e375c6f6d3966909ccf773ed46ac703917) for more information. -//! -//! The XCM RFCs were moved to the fellowship RFCs but this one was approved and merged before that. - -use xcm::prelude::*; - -use super::mock::*; -use crate::ExecutorError; - -// The sender and recipient we use across these tests. -const SENDER_1: [u8; 32] = [0; 32]; -const SENDER_2: [u8; 32] = [1; 32]; -const RECIPIENT: [u8; 32] = [2; 32]; - -// ===== Happy path ===== - -// In this test, root descends into one account to pay fees, pops that origin -// and descends into a second account to withdraw funds. -// These assets can now be used to perform actions as root. -#[test] -fn root_can_descend_into_more_than_one_account() { - // Make sure the sender has enough funds to withdraw. - add_asset(SENDER_1, (Here, 10u128)); - add_asset(SENDER_2, (Here, 100u128)); - - // Build xcm. - let xcm = Xcm::::builder_unsafe() - .execute_with_origin( - Some(SENDER_1.into()), - Xcm::::builder_unsafe() - .withdraw_asset((Here, 10u128)) - .pay_fees((Here, 10u128)) - .build(), - ) - .execute_with_origin( - Some(SENDER_2.into()), - Xcm::::builder_unsafe().withdraw_asset((Here, 100u128)).build(), - ) - .expect_origin(Some(Here.into())) - .deposit_asset(All, RECIPIENT) - .build(); - - let (mut vm, weight) = instantiate_executor(Here, xcm.clone()); - - // Program runs successfully. - assert!(vm.bench_process(xcm).is_ok()); - assert!(vm.bench_post_process(weight).ensure_complete().is_ok()); - - // RECIPIENT gets the funds. - assert_eq!(asset_list(RECIPIENT), [(Here, 100u128).into()]); -} - -// ExecuteWithOrigin works for clearing the origin as well. -#[test] -fn works_for_clearing_origin() { - // Make sure the sender has enough funds to withdraw. - add_asset(SENDER_1, (Here, 100u128)); - - // Build xcm. - let xcm = Xcm::::builder_unsafe() - // Root code. - .expect_origin(Some(Here.into())) - .execute_with_origin( - None, - // User code, we run it with no origin. - Xcm::::builder_unsafe().expect_origin(None).build(), - ) - // We go back to root code. - .build(); - - let (mut vm, weight) = instantiate_executor(Here, xcm.clone()); - - // Program runs successfully. - assert!(vm.bench_process(xcm).is_ok()); - assert!(vm.bench_post_process(weight).ensure_complete().is_ok()); -} - -// Setting the error handler or appendix inside of `ExecuteWithOrigin` -// will work as expected. -#[test] -fn set_error_handler_and_appendix_work() { - add_asset(SENDER_1, (Here, 110u128)); - - let xcm = Xcm::::builder_unsafe() - .execute_with_origin( - Some(SENDER_1.into()), - Xcm::::builder_unsafe() - .withdraw_asset((Here, 110u128)) - .pay_fees((Here, 10u128)) - .set_error_handler( - Xcm::::builder_unsafe() - .deposit_asset(vec![(Here, 10u128).into()], SENDER_2) - .build(), - ) - .set_appendix( - Xcm::::builder_unsafe().deposit_asset(All, RECIPIENT).build(), - ) - .build(), - ) - .build(); - - let (mut vm, weight) = instantiate_executor(Here, xcm.clone()); - - // Program runs successfully. - assert!(vm.bench_process(xcm).is_ok()); - - assert_eq!( - vm.error_handler(), - &Xcm::(vec![DepositAsset { - assets: vec![Asset { id: AssetId(Location::new(0, [])), fun: Fungible(10) }].into(), - beneficiary: Location::new(0, [AccountId32 { id: SENDER_2, network: None }]), - },]) - ); - assert_eq!( - vm.appendix(), - &Xcm::(vec![DepositAsset { - assets: All.into(), - beneficiary: Location::new(0, [AccountId32 { id: RECIPIENT, network: None }]), - },]) - ); - - assert!(vm.bench_post_process(weight).ensure_complete().is_ok()); -} - -// ===== Unhappy path ===== - -// Processing still can't be called recursively more than the limit. -#[test] -fn recursion_exceeds_limit() { - // Make sure the sender has enough funds to withdraw. - add_asset(SENDER_1, (Here, 10u128)); - add_asset(SENDER_2, (Here, 100u128)); - - let mut xcm = Xcm::::builder_unsafe() - .execute_with_origin(None, Xcm::::builder_unsafe().clear_origin().build()) - .build(); - - // 10 is the RECURSION_LIMIT. - for _ in 0..10 { - let clone_of_xcm = xcm.clone(); - if let ExecuteWithOrigin { xcm: ref mut inner, .. } = xcm.inner_mut()[0] { - *inner = clone_of_xcm; - } - } - - let (mut vm, weight) = instantiate_executor(Here, xcm.clone()); - - // Program errors with `ExceedsStackLimit`. - assert_eq!( - vm.bench_process(xcm), - Err(ExecutorError { - index: 0, - xcm_error: XcmError::ExceedsStackLimit, - weight: Weight::zero(), - }) - ); - assert!(vm.bench_post_process(weight).ensure_complete().is_ok()); -} diff --git a/polkadot/xcm/xcm-executor/src/tests/mod.rs b/polkadot/xcm/xcm-executor/src/tests/mod.rs index 15a0565e357c..5c133871f0bf 100644 --- a/polkadot/xcm/xcm-executor/src/tests/mod.rs +++ b/polkadot/xcm/xcm-executor/src/tests/mod.rs @@ -20,7 +20,6 @@ //! `xcm-emulator` based tests in the cumulus folder. //! These tests deal with internal state changes of the XCVM. -mod execute_with_origin; mod initiate_transfer; mod mock; mod pay_fees; diff --git a/polkadot/xcm/xcm-executor/src/tests/set_asset_claimer.rs b/polkadot/xcm/xcm-executor/src/tests/set_asset_claimer.rs index cc97e2b3a16e..bc504b8db2a2 100644 --- a/polkadot/xcm/xcm-executor/src/tests/set_asset_claimer.rs +++ b/polkadot/xcm/xcm-executor/src/tests/set_asset_claimer.rs @@ -38,7 +38,7 @@ fn set_asset_claimer() { // if withdrawing fails we're not missing any corner case. .withdraw_asset((Here, 100u128)) .clear_origin() - .set_hints(vec![AssetClaimer { location: bob.clone() }]) + .set_asset_claimer(bob.clone()) .pay_fees((Here, 10u128)) // 10% destined for fees, not more. .build(); @@ -93,7 +93,7 @@ fn trap_then_set_asset_claimer() { .withdraw_asset((Here, 100u128)) .clear_origin() .trap(0u64) - .set_hints(vec![AssetClaimer { location: bob }]) + .set_asset_claimer(bob) .pay_fees((Here, 10u128)) // 10% destined for fees, not more. .build(); @@ -121,7 +121,7 @@ fn set_asset_claimer_then_trap() { // if withdrawing fails we're not missing any corner case. .withdraw_asset((Here, 100u128)) .clear_origin() - .set_hints(vec![AssetClaimer { location: bob.clone() }]) + .set_asset_claimer(bob.clone()) .trap(0u64) .pay_fees((Here, 10u128)) // 10% destined for fees, not more. .build(); diff --git a/polkadot/xcm/xcm-executor/src/traits/export.rs b/polkadot/xcm/xcm-executor/src/traits/export.rs index 3e9275edab37..b356e0da7df7 100644 --- a/polkadot/xcm/xcm-executor/src/traits/export.rs +++ b/polkadot/xcm/xcm-executor/src/traits/export.rs @@ -108,7 +108,7 @@ impl ExportXcm for Tuple { } /// Convenience function for using a `SendXcm` implementation. Just interprets the `dest` and wraps -/// both in `Some` before passing them as mutable references into `T::send_xcm`. +/// both in `Some` before passing them as as mutable references into `T::send_xcm`. pub fn validate_export( network: NetworkId, channel: u32, @@ -120,7 +120,7 @@ pub fn validate_export( } /// Convenience function for using a `SendXcm` implementation. Just interprets the `dest` and wraps -/// both in `Some` before passing them as mutable references into `T::send_xcm`. +/// both in `Some` before passing them as as mutable references into `T::send_xcm`. /// /// Returns either `Ok` with the price of the delivery, or `Err` with the reason why the message /// could not be sent. diff --git a/polkadot/xcm/xcm-runtime-apis/Cargo.toml b/polkadot/xcm/xcm-runtime-apis/Cargo.toml index 96afb10e5397..9ccca76c321c 100644 --- a/polkadot/xcm/xcm-runtime-apis/Cargo.toml +++ b/polkadot/xcm/xcm-runtime-apis/Cargo.toml @@ -21,17 +21,17 @@ xcm = { workspace = true } xcm-executor = { workspace = true } [dev-dependencies] -frame-executive = { workspace = true } frame-system = { workspace = true } -hex-literal = { workspace = true } -log = { workspace = true } -pallet-assets = { workspace = true } -pallet-balances = { workspace = true } -pallet-xcm = { workspace = true } sp-io = { workspace = true } -sp-tracing = { workspace = true, default-features = true } xcm-builder = { workspace = true } +hex-literal = { workspace = true } +pallet-xcm = { workspace = true } +pallet-balances = { workspace = true } +pallet-assets = { workspace = true } xcm-executor = { workspace = true } +frame-executive = { workspace = true } +log = { workspace = true } +sp-tracing = { workspace = true, default-features = true } [features] default = ["std"] @@ -60,5 +60,4 @@ runtime-benchmarks = [ "pallet-xcm/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", - "xcm/runtime-benchmarks", ] diff --git a/polkadot/xcm/xcm-runtime-apis/tests/fee_estimation.rs b/polkadot/xcm/xcm-runtime-apis/tests/fee_estimation.rs index c3046b134d1f..2d14b4e571c6 100644 --- a/polkadot/xcm/xcm-runtime-apis/tests/fee_estimation.rs +++ b/polkadot/xcm/xcm-runtime-apis/tests/fee_estimation.rs @@ -353,26 +353,3 @@ fn dry_run_xcm() { ); }); } - -#[test] -fn calling_payment_api_with_a_lower_version_works() { - let transfer_amount = 100u128; - let xcm_to_weigh = Xcm::::builder_unsafe() - .withdraw_asset((Here, transfer_amount)) - .buy_execution((Here, transfer_amount), Unlimited) - .deposit_asset(AllCounted(1), [1u8; 32]) - .build(); - let versioned_xcm_to_weigh = VersionedXcm::from(xcm_to_weigh.clone().into()); - let lower_version_xcm_to_weigh = versioned_xcm_to_weigh.into_version(XCM_VERSION - 1).unwrap(); - let client = TestClient; - let runtime_api = client.runtime_api(); - let xcm_weight = - runtime_api.query_xcm_weight(H256::zero(), lower_version_xcm_to_weigh).unwrap(); - assert!(xcm_weight.is_ok()); - let native_token = VersionedAssetId::from(AssetId(Here.into())); - let lower_version_native_token = native_token.into_version(XCM_VERSION - 1).unwrap(); - let execution_fees = runtime_api - .query_weight_to_asset_fee(H256::zero(), xcm_weight.unwrap(), lower_version_native_token) - .unwrap(); - assert!(execution_fees.is_ok()); -} diff --git a/polkadot/xcm/xcm-runtime-apis/tests/mock.rs b/polkadot/xcm/xcm-runtime-apis/tests/mock.rs index fb5d1ae7c0e5..f0a5be908f69 100644 --- a/polkadot/xcm/xcm-runtime-apis/tests/mock.rs +++ b/polkadot/xcm/xcm-runtime-apis/tests/mock.rs @@ -453,8 +453,7 @@ sp_api::mock_impl_runtime_apis! { } fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result { - let latest_asset_id: Result = asset.clone().try_into(); - match latest_asset_id { + match asset.try_as::() { Ok(asset_id) if asset_id.0 == HereLocation::get() => { Ok(WeightToFee::weight_to_fee(&weight)) }, diff --git a/polkadot/xcm/xcm-simulator/Cargo.toml b/polkadot/xcm/xcm-simulator/Cargo.toml index 10c6f14bc8b9..c7caa49393ed 100644 --- a/polkadot/xcm/xcm-simulator/Cargo.toml +++ b/polkadot/xcm/xcm-simulator/Cargo.toml @@ -5,27 +5,25 @@ version = "7.0.0" authors.workspace = true edition.workspace = true license.workspace = true -homepage.workspace = true -repository.workspace = true [lints] workspace = true [dependencies] codec = { workspace = true, default-features = true } -paste = { workspace = true, default-features = true } scale-info = { workspace = true } +paste = { workspace = true, default-features = true } frame-support = { workspace = true, default-features = true } frame-system = { workspace = true, default-features = true } sp-io = { workspace = true, default-features = true } -sp-runtime = { workspace = true, default-features = true } sp-std = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +xcm = { workspace = true, default-features = true } +xcm-executor = { workspace = true, default-features = true } +xcm-builder = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } polkadot-core-primitives = { workspace = true, default-features = true } polkadot-parachain-primitives = { workspace = true, default-features = true } -polkadot-primitives = { workspace = true, default-features = true } polkadot-runtime-parachains = { workspace = true, default-features = true } -xcm = { workspace = true, default-features = true } -xcm-builder = { workspace = true, default-features = true } -xcm-executor = { workspace = true, default-features = true } diff --git a/polkadot/xcm/xcm-simulator/example/Cargo.toml b/polkadot/xcm/xcm-simulator/example/Cargo.toml index ccf0ecc39c4c..e0aff9b7782a 100644 --- a/polkadot/xcm/xcm-simulator/example/Cargo.toml +++ b/polkadot/xcm/xcm-simulator/example/Cargo.toml @@ -5,36 +5,34 @@ authors.workspace = true edition.workspace = true license.workspace = true version = "7.0.0" -homepage.workspace = true -repository.workspace = true [lints] workspace = true [dependencies] codec = { workspace = true, default-features = true } -log = { workspace = true } scale-info = { features = ["derive"], workspace = true, default-features = true } +log = { workspace = true } -frame-support = { workspace = true, default-features = true } frame-system = { workspace = true, default-features = true } +frame-support = { workspace = true, default-features = true } pallet-balances = { workspace = true, default-features = true } pallet-message-queue = { workspace = true, default-features = true } pallet-uniques = { workspace = true, default-features = true } +sp-std = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } -sp-io = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } -sp-std = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } sp-tracing = { workspace = true, default-features = true } +xcm = { workspace = true, default-features = true } +xcm-simulator = { workspace = true, default-features = true } +xcm-executor = { workspace = true, default-features = true } +xcm-builder = { workspace = true, default-features = true } pallet-xcm = { workspace = true, default-features = true } polkadot-core-primitives = { workspace = true, default-features = true } -polkadot-parachain-primitives = { workspace = true, default-features = true } polkadot-runtime-parachains = { workspace = true, default-features = true } -xcm = { workspace = true, default-features = true } -xcm-builder = { workspace = true, default-features = true } -xcm-executor = { workspace = true, default-features = true } -xcm-simulator = { workspace = true, default-features = true } +polkadot-parachain-primitives = { workspace = true, default-features = true } [features] default = [] @@ -50,5 +48,4 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", - "xcm/runtime-benchmarks", ] diff --git a/polkadot/xcm/xcm-simulator/example/src/tests.rs b/polkadot/xcm/xcm-simulator/example/src/tests.rs index f971812f4f4d..bbac44ed8a1f 100644 --- a/polkadot/xcm/xcm-simulator/example/src/tests.rs +++ b/polkadot/xcm/xcm-simulator/example/src/tests.rs @@ -47,7 +47,6 @@ fn dmp() { Xcm(vec![Transact { origin_kind: OriginKind::SovereignAccount, call: remark.encode().into(), - fallback_max_weight: None, }]), )); }); @@ -75,7 +74,6 @@ fn ump() { Xcm(vec![Transact { origin_kind: OriginKind::SovereignAccount, call: remark.encode().into(), - fallback_max_weight: None, }]), )); }); @@ -103,7 +101,6 @@ fn xcmp() { Xcm(vec![Transact { origin_kind: OriginKind::SovereignAccount, call: remark.encode().into(), - fallback_max_weight: None, }]), )); }); @@ -391,7 +388,6 @@ fn reserve_asset_class_create_and_reserve_transfer() { ) .encode() .into(), - fallback_max_weight: None, }]); // Send creation. assert_ok!(RelayChainPalletXcm::send_xcm(Here, Parachain(1), message)); diff --git a/polkadot/xcm/xcm-simulator/fuzzer/Cargo.toml b/polkadot/xcm/xcm-simulator/fuzzer/Cargo.toml index 62a047975c87..04f8ba115173 100644 --- a/polkadot/xcm/xcm-simulator/fuzzer/Cargo.toml +++ b/polkadot/xcm/xcm-simulator/fuzzer/Cargo.toml @@ -11,30 +11,30 @@ publish = false workspace = true [dependencies] -arbitrary = { workspace = true } codec = { workspace = true, default-features = true } honggfuzz = { workspace = true } +arbitrary = { workspace = true } scale-info = { features = ["derive"], workspace = true, default-features = true } -frame-executive = { workspace = true, default-features = true } -frame-support = { workspace = true, default-features = true } frame-system = { workspace = true, default-features = true } +frame-support = { workspace = true, default-features = true } +frame-executive = { workspace = true, default-features = true } frame-try-runtime = { workspace = true, default-features = true } pallet-balances = { workspace = true, default-features = true } pallet-message-queue = { workspace = true, default-features = true } +sp-std = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } -sp-io = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } -sp-std = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } +xcm = { workspace = true, default-features = true } +xcm-simulator = { workspace = true, default-features = true } +xcm-executor = { workspace = true, default-features = true } +xcm-builder = { workspace = true, default-features = true } pallet-xcm = { workspace = true, default-features = true } polkadot-core-primitives = { workspace = true, default-features = true } -polkadot-parachain-primitives = { workspace = true, default-features = true } polkadot-runtime-parachains = { workspace = true, default-features = true } -xcm = { workspace = true, default-features = true } -xcm-builder = { workspace = true, default-features = true } -xcm-executor = { workspace = true, default-features = true } -xcm-simulator = { workspace = true, default-features = true } +polkadot-parachain-primitives = { workspace = true, default-features = true } [features] try-runtime = [ @@ -59,7 +59,6 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", - "xcm/runtime-benchmarks", ] [[bin]] diff --git a/polkadot/zombienet-sdk-tests/Cargo.toml b/polkadot/zombienet-sdk-tests/Cargo.toml index 120857c9a42e..4eac7af49f8a 100644 --- a/polkadot/zombienet-sdk-tests/Cargo.toml +++ b/polkadot/zombienet-sdk-tests/Cargo.toml @@ -8,16 +8,16 @@ license.workspace = true publish = false [dependencies] -anyhow = { workspace = true } -codec = { workspace = true, features = ["derive"] } env_logger = { workspace = true } log = { workspace = true } -serde = { workspace = true } -serde_json = { workspace = true } subxt = { workspace = true, features = ["substrate-compat"] } subxt-signer = { workspace = true } tokio = { workspace = true, features = ["rt-multi-thread"] } +anyhow = { workspace = true } zombienet-sdk = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } +codec = { workspace = true, features = ["derive"] } [features] zombie-metadata = [] diff --git a/polkadot/zombienet-sdk-tests/build.rs b/polkadot/zombienet-sdk-tests/build.rs index f7a62a53a8ac..240d86386af2 100644 --- a/polkadot/zombienet-sdk-tests/build.rs +++ b/polkadot/zombienet-sdk-tests/build.rs @@ -25,47 +25,39 @@ fn make_env_key(k: &str) -> String { replace_dashes(&k.to_ascii_uppercase()) } -fn wasm_sub_path(chain: &str) -> String { - let (package, runtime_name) = - if let Some(cumulus_test_runtime) = chain.strip_prefix("cumulus-test-runtime-") { - ( - "cumulus-test-runtime".to_string(), - format!("wasm_binary_{}.rs", replace_dashes(cumulus_test_runtime)), - ) - } else { - (format!("{chain}-runtime"), replace_dashes(&format!("{chain}-runtime"))) - }; - - format!("{}/{}.wasm", package, runtime_name) -} - fn find_wasm(chain: &str) -> Option { const PROFILES: [&str; 2] = ["release", "testnet"]; let manifest_path = env::var("CARGO_WORKSPACE_ROOT_DIR").unwrap(); let manifest_path = manifest_path.strip_suffix('/').unwrap(); debug_output!("manifest_path is : {}", manifest_path); - - let sub_path = wasm_sub_path(chain); - + let package = format!("{chain}-runtime"); let profile = PROFILES.into_iter().find(|p| { - let full_path = format!("{}/target/{}/wbuild/{}", manifest_path, p, sub_path); + let full_path = format!( + "{}/target/{}/wbuild/{}/{}.wasm", + manifest_path, + p, + &package, + replace_dashes(&package) + ); debug_output!("checking wasm at : {}", full_path); matches!(path::PathBuf::from(&full_path).try_exists(), Ok(true)) }); debug_output!("profile is : {:?}", profile); profile.map(|profile| { - PathBuf::from(&format!("{}/target/{}/wbuild/{}", manifest_path, profile, sub_path)) + PathBuf::from(&format!( + "{}/target/{}/wbuild/{}/{}.wasm", + manifest_path, + profile, + &package, + replace_dashes(&package) + )) }) } // based on https://gist.github.com/s0me0ne-unkn0wn/bbd83fe32ce10327086adbf13e750eec fn build_wasm(chain: &str) -> PathBuf { - let package = if chain.starts_with("cumulus-test-runtime-") { - String::from("cumulus-test-runtime") - } else { - format!("{chain}-runtime") - }; + let package = format!("{chain}-runtime"); let cargo = env::var("CARGO").unwrap(); let target = env::var("TARGET").unwrap(); @@ -89,7 +81,11 @@ fn build_wasm(chain: &str) -> PathBuf { .status() .unwrap(); - let wasm_path = &format!("{target_dir}/{target}/release/wbuild/{}", wasm_sub_path(chain)); + let wasm_path = &format!( + "{target_dir}/{target}/release/wbuild/{}/{}.wasm", + &package, + replace_dashes(&package) + ); PathBuf::from(wasm_path) } @@ -132,10 +128,6 @@ fn main() { const METADATA_DIR: &str = "metadata-files"; const CHAINS: [&str; 2] = ["rococo", "coretime-rococo"]; - // Add some cumulus test runtimes if needed. Formatted like - // "cumulus-test-runtime-elastic-scaling". - const CUMULUS_TEST_RUNTIMES: [&str; 0] = []; - let metadata_path = format!("{manifest_path}/{METADATA_DIR}"); for chain in CHAINS { @@ -153,21 +145,6 @@ fn main() { }; } - for chain in CUMULUS_TEST_RUNTIMES { - let full_path = format!("{metadata_path}/{chain}-local.scale"); - let output_path = path::PathBuf::from(&full_path); - - match output_path.try_exists() { - Ok(true) => { - debug_output!("got: {}", full_path); - }, - _ => { - debug_output!("needs: {}", full_path); - fetch_metadata_file(chain, &output_path); - }, - }; - } - substrate_build_script_utils::generate_cargo_keys(); substrate_build_script_utils::rerun_if_git_head_changed(); println!("cargo:rerun-if-changed={}", metadata_path); diff --git a/polkadot/zombienet-sdk-tests/tests/elastic_scaling/helpers.rs b/polkadot/zombienet-sdk-tests/tests/elastic_scaling/helpers.rs deleted file mode 100644 index 7d4ad4a1dd8b..000000000000 --- a/polkadot/zombienet-sdk-tests/tests/elastic_scaling/helpers.rs +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -use super::rococo; -use std::{collections::HashMap, ops::Range}; -use subxt::{OnlineClient, PolkadotConfig}; - -// Helper function for asserting the throughput of parachains (total number of backed candidates in -// a window of relay chain blocks), after the first session change. -pub async fn assert_para_throughput( - relay_client: &OnlineClient, - stop_at: u32, - expected_candidate_ranges: HashMap>, -) -> Result<(), anyhow::Error> { - let mut blocks_sub = relay_client.blocks().subscribe_finalized().await?; - let mut candidate_count: HashMap = HashMap::new(); - let mut current_block_count = 0; - let mut had_first_session_change = false; - - while let Some(block) = blocks_sub.next().await { - let block = block?; - log::debug!("Finalized relay chain block {}", block.number()); - let events = block.events().await?; - let is_session_change = events.has::()?; - - if !had_first_session_change && is_session_change { - had_first_session_change = true; - } - - if had_first_session_change && !is_session_change { - current_block_count += 1; - - for event in events.find::() { - *(candidate_count.entry(event?.0.descriptor.para_id.0).or_default()) += 1; - } - } - - if current_block_count == stop_at { - break; - } - } - - log::info!( - "Reached {} finalized relay chain blocks that contain backed candidates. The per-parachain distribution is: {:#?}", - stop_at, - candidate_count - ); - - for (para_id, expected_candidate_range) in expected_candidate_ranges { - let actual = candidate_count - .get(¶_id) - .expect("ParaId did not have any backed candidates"); - assert!( - expected_candidate_range.contains(actual), - "Candidate count {actual} not within range {expected_candidate_range:?}" - ); - } - - Ok(()) -} diff --git a/polkadot/zombienet-sdk-tests/tests/elastic_scaling/mod.rs b/polkadot/zombienet-sdk-tests/tests/elastic_scaling/mod.rs deleted file mode 100644 index bb296a419df1..000000000000 --- a/polkadot/zombienet-sdk-tests/tests/elastic_scaling/mod.rs +++ /dev/null @@ -1,8 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -#[subxt::subxt(runtime_metadata_path = "metadata-files/rococo-local.scale")] -pub mod rococo {} - -mod helpers; -mod slot_based_3cores; diff --git a/polkadot/zombienet-sdk-tests/tests/elastic_scaling/slot_based_3cores.rs b/polkadot/zombienet-sdk-tests/tests/elastic_scaling/slot_based_3cores.rs deleted file mode 100644 index 41ec1250ecc4..000000000000 --- a/polkadot/zombienet-sdk-tests/tests/elastic_scaling/slot_based_3cores.rs +++ /dev/null @@ -1,166 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Test that parachains that use a single slot-based collator with elastic scaling MVP and with -// elastic scaling with RFC103 can achieve full throughput of 3 candidates per block. - -use anyhow::anyhow; - -use super::{ - helpers::assert_para_throughput, - rococo, - rococo::runtime_types::{ - pallet_broker::coretime_interface::CoreAssignment, - polkadot_runtime_parachains::assigner_coretime::PartsOf57600, - }, -}; -use serde_json::json; -use subxt::{OnlineClient, PolkadotConfig}; -use subxt_signer::sr25519::dev; -use zombienet_sdk::NetworkConfigBuilder; - -#[tokio::test(flavor = "multi_thread")] -async fn slot_based_3cores_test() -> Result<(), anyhow::Error> { - let _ = env_logger::try_init_from_env( - env_logger::Env::default().filter_or(env_logger::DEFAULT_FILTER_ENV, "info"), - ); - - let images = zombienet_sdk::environment::get_images_from_env(); - - let config = NetworkConfigBuilder::new() - .with_relaychain(|r| { - let r = r - .with_chain("rococo-local") - .with_default_command("polkadot") - .with_default_image(images.polkadot.as_str()) - .with_default_args(vec![("-lparachain=debug").into()]) - .with_genesis_overrides(json!({ - "configuration": { - "config": { - "scheduler_params": { - // Num cores is 4, because 2 extra will be added automatically when registering the paras. - "num_cores": 4, - "max_validators_per_core": 2 - }, - "async_backing_params": { - "max_candidate_depth": 6, - "allowed_ancestry_len": 2 - } - } - } - })) - // Have to set a `with_node` outside of the loop below, so that `r` has the right - // type. - .with_node(|node| node.with_name("validator-0")); - - (1..12) - .fold(r, |acc, i| acc.with_node(|node| node.with_name(&format!("validator-{i}")))) - }) - .with_parachain(|p| { - // Para 2100 uses the old elastic scaling mvp, which doesn't send the new UMP signal - // commitment for selecting the core index. - p.with_id(2100) - .with_default_command("test-parachain") - .with_default_image(images.cumulus.as_str()) - .with_chain("elastic-scaling-mvp") - .with_default_args(vec![("--experimental-use-slot-based").into()]) - .with_default_args(vec![ - ("--experimental-use-slot-based").into(), - ("-lparachain=debug,aura=debug").into(), - ]) - .with_collator(|n| n.with_name("collator-elastic-mvp")) - }) - .with_parachain(|p| { - // Para 2200 uses the new RFC103-enabled collator which sends the UMP signal commitment - // for selecting the core index - p.with_id(2200) - .with_default_command("test-parachain") - .with_default_image(images.cumulus.as_str()) - .with_chain("elastic-scaling") - .with_default_args(vec![ - ("--experimental-use-slot-based").into(), - ("-lparachain=debug,aura=debug").into(), - ]) - .with_collator(|n| n.with_name("collator-elastic")) - }) - .build() - .map_err(|e| { - let errs = e.into_iter().map(|e| e.to_string()).collect::>().join(" "); - anyhow!("config errs: {errs}") - })?; - - let spawn_fn = zombienet_sdk::environment::get_spawn_fn(); - let network = spawn_fn(config).await?; - - let relay_node = network.get_node("validator-0")?; - - let relay_client: OnlineClient = relay_node.wait_client().await?; - let alice = dev::alice(); - - // Assign two extra cores to each parachain. - relay_client - .tx() - .sign_and_submit_then_watch_default( - &rococo::tx() - .sudo() - .sudo(rococo::runtime_types::rococo_runtime::RuntimeCall::Utility( - rococo::runtime_types::pallet_utility::pallet::Call::batch { - calls: vec![ - rococo::runtime_types::rococo_runtime::RuntimeCall::Coretime( - rococo::runtime_types::polkadot_runtime_parachains::coretime::pallet::Call::assign_core { - core: 0, - begin: 0, - assignment: vec![(CoreAssignment::Task(2100), PartsOf57600(57600))], - end_hint: None - } - ), - rococo::runtime_types::rococo_runtime::RuntimeCall::Coretime( - rococo::runtime_types::polkadot_runtime_parachains::coretime::pallet::Call::assign_core { - core: 1, - begin: 0, - assignment: vec![(CoreAssignment::Task(2100), PartsOf57600(57600))], - end_hint: None - } - ), - rococo::runtime_types::rococo_runtime::RuntimeCall::Coretime( - rococo::runtime_types::polkadot_runtime_parachains::coretime::pallet::Call::assign_core { - core: 2, - begin: 0, - assignment: vec![(CoreAssignment::Task(2200), PartsOf57600(57600))], - end_hint: None - } - ), - rococo::runtime_types::rococo_runtime::RuntimeCall::Coretime( - rococo::runtime_types::polkadot_runtime_parachains::coretime::pallet::Call::assign_core { - core: 3, - begin: 0, - assignment: vec![(CoreAssignment::Task(2200), PartsOf57600(57600))], - end_hint: None - } - ) - ], - }, - )), - &alice, - ) - .await? - .wait_for_finalized_success() - .await?; - - log::info!("2 more cores assigned to each parachain"); - - // Expect a backed candidate count of at least 39 for each parachain in 15 relay chain blocks - // (2.6 candidates per para per relay chain block). - // Note that only blocks after the first session change and blocks that don't contain a session - // change will be counted. - assert_para_throughput( - &relay_client, - 15, - [(2100, 39..46), (2200, 39..46)].into_iter().collect(), - ) - .await?; - - log::info!("Test finished successfully"); - - Ok(()) -} diff --git a/polkadot/zombienet-sdk-tests/tests/lib.rs b/polkadot/zombienet-sdk-tests/tests/lib.rs index 977e0f90b1c9..74cdc0765600 100644 --- a/polkadot/zombienet-sdk-tests/tests/lib.rs +++ b/polkadot/zombienet-sdk-tests/tests/lib.rs @@ -1,7 +1,4 @@ // Copyright (C) Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 -#[cfg(feature = "zombie-metadata")] -mod elastic_scaling; -#[cfg(feature = "zombie-metadata")] mod smoke; diff --git a/polkadot/zombienet-sdk-tests/tests/smoke/coretime_revenue.rs b/polkadot/zombienet-sdk-tests/tests/smoke/coretime_revenue.rs index 2da2436a1111..7880dc782d05 100644 --- a/polkadot/zombienet-sdk-tests/tests/smoke/coretime_revenue.rs +++ b/polkadot/zombienet-sdk-tests/tests/smoke/coretime_revenue.rs @@ -180,7 +180,7 @@ where #[tokio::test(flavor = "multi_thread")] async fn coretime_revenue_test() -> Result<(), anyhow::Error> { - let _ = env_logger::try_init_from_env( + env_logger::init_from_env( env_logger::Env::default().filter_or(env_logger::DEFAULT_FILTER_ENV, "info"), ); @@ -499,7 +499,7 @@ async fn coretime_revenue_test() -> Result<(), anyhow::Error> { assert_total_issuance(relay_client.clone(), para_client.clone(), total_issuance).await; - log::info!("Test finished successfully"); + log::info!("Test finished successfuly"); Ok(()) } diff --git a/polkadot/zombienet-sdk-tests/tests/smoke/mod.rs b/polkadot/zombienet-sdk-tests/tests/smoke/mod.rs index 072a9d54ecda..a3fe15382674 100644 --- a/polkadot/zombienet-sdk-tests/tests/smoke/mod.rs +++ b/polkadot/zombienet-sdk-tests/tests/smoke/mod.rs @@ -1,4 +1,5 @@ // Copyright (C) Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 +#[cfg(feature = "zombie-metadata")] mod coretime_revenue; diff --git a/polkadot/zombienet_tests/elastic_scaling/0002-elastic-scaling-doesnt-break-parachains.toml b/polkadot/zombienet_tests/elastic_scaling/0002-elastic-scaling-doesnt-break-parachains.toml index 046d707cc1e8..9b3576eaa3c2 100644 --- a/polkadot/zombienet_tests/elastic_scaling/0002-elastic-scaling-doesnt-break-parachains.toml +++ b/polkadot/zombienet_tests/elastic_scaling/0002-elastic-scaling-doesnt-break-parachains.toml @@ -37,4 +37,4 @@ onboard_as_parachain = false [parachains.collator] name = "collator2000" command = "polkadot-parachain" - args = [ "-lparachain=debug", "--experimental-use-slot-based" ] + args = [ "-lparachain=debug" ] diff --git a/polkadot/zombienet_tests/elastic_scaling/0002-elastic-scaling-doesnt-break-parachains.zndsl b/polkadot/zombienet_tests/elastic_scaling/0002-elastic-scaling-doesnt-break-parachains.zndsl index 0cfc29f532d1..7ba896e1c903 100644 --- a/polkadot/zombienet_tests/elastic_scaling/0002-elastic-scaling-doesnt-break-parachains.zndsl +++ b/polkadot/zombienet_tests/elastic_scaling/0002-elastic-scaling-doesnt-break-parachains.zndsl @@ -12,7 +12,7 @@ validator: parachain 2000 block height is at least 10 within 200 seconds # Register the second core assigned to this parachain. alice: js-script ./assign-core.js with "0,2000,57600" return is 0 within 600 seconds -alice: js-script ./assign-core.js with "1,2000,57600" return is 0 within 600 seconds +alice: js-script ./assign-core.js with "0,2000,57600" return is 0 within 600 seconds validator: reports substrate_block_height{status="finalized"} is at least 35 within 100 seconds diff --git a/polkadot/zombienet_tests/functional/0018-shared-core-idle-parachain.toml b/polkadot/zombienet_tests/functional/0018-shared-core-idle-parachain.toml index d3ff00002242..745c4f9e24b1 100644 --- a/polkadot/zombienet_tests/functional/0018-shared-core-idle-parachain.toml +++ b/polkadot/zombienet_tests/functional/0018-shared-core-idle-parachain.toml @@ -36,4 +36,4 @@ chain = "glutton-westend-local-2000" name = "collator-2000" image = "{{CUMULUS_IMAGE}}" command = "polkadot-parachain" - args = ["-lparachain=debug", "--experimental-use-slot-based"] + args = ["-lparachain=debug"] diff --git a/polkadot/zombienet_tests/functional/0019-coretime-collation-fetching-fairness.toml b/polkadot/zombienet_tests/functional/0019-coretime-collation-fetching-fairness.toml deleted file mode 100644 index 43f3ef8f9e55..000000000000 --- a/polkadot/zombienet_tests/functional/0019-coretime-collation-fetching-fairness.toml +++ /dev/null @@ -1,58 +0,0 @@ -[settings] -timeout = 1000 - -[relaychain.genesis.runtimeGenesis.patch.configuration.config.async_backing_params] - max_candidate_depth = 3 - allowed_ancestry_len = 2 - -[relaychain.genesis.runtimeGenesis.patch.configuration.config.scheduler_params] - max_validators_per_core = 4 - num_cores = 1 - lookahead = 2 - -[relaychain.genesis.runtimeGenesis.patch.configuration.config.approval_voting_params] - needed_approvals = 3 - -[relaychain] -default_image = "{{ZOMBIENET_INTEGRATION_TEST_IMAGE}}" -chain = "rococo-local" -command = "polkadot" - - [[relaychain.node_groups]] - name = "validator" - args = ["-lparachain=debug,parachain::collator-protocol=trace" ] - count = 4 - -[[parachains]] -id = 2000 -register_para = false -onboard_as_parachain = false -add_to_genesis = false -chain = "glutton-westend-local-2000" - [parachains.genesis.runtimeGenesis.patch.glutton] - compute = "50000000" - storage = "2500000000" - trashDataCount = 5120 - - [parachains.collator] - name = "collator-2000" - image = "{{CUMULUS_IMAGE}}" - command = "polkadot-parachain" - args = ["-lparachain=debug,parachain::collator-protocol=trace", "--experimental-use-slot-based"] - -[[parachains]] -id = 2001 -register_para = false -onboard_as_parachain = false -add_to_genesis = false -chain = "glutton-westend-local-2001" - [parachains.genesis.runtimeGenesis.patch.glutton] - compute = "50000000" - storage = "2500000000" - trashDataCount = 5120 - - [parachains.collator] - name = "collator-2001" - image = "{{CUMULUS_IMAGE}}" - command = "polkadot-parachain" - args = ["-lparachain=debug"] diff --git a/polkadot/zombienet_tests/functional/0019-coretime-collation-fetching-fairness.zndsl b/polkadot/zombienet_tests/functional/0019-coretime-collation-fetching-fairness.zndsl deleted file mode 100644 index 8892b03ac29c..000000000000 --- a/polkadot/zombienet_tests/functional/0019-coretime-collation-fetching-fairness.zndsl +++ /dev/null @@ -1,16 +0,0 @@ -Description: CT shared core fairness test -Network: ./0019-coretime-collation-fetching-fairness.toml -Creds: config - -validator: reports node_roles is 4 - -validator-0: js-script ./force-register-paras.js with "2000,2001" return is 0 within 600 seconds -# core 0 is shared 3:1 between paras -validator-0: js-script ./assign-core.js with "0,2000,43200,2001,14400" return is 0 within 600 seconds - -collator-2000: reports block height is at least 9 within 200 seconds -collator-2001: reports block height is at least 3 within 10 seconds - -# hardcoded check to verify that included onchain events are indeed 3:1 -validator-0: js-script ./0019-verify-included-events.js return is 1 within 120 seconds - diff --git a/polkadot/zombienet_tests/functional/0019-verify-included-events.js b/polkadot/zombienet_tests/functional/0019-verify-included-events.js deleted file mode 100644 index 6557a5a80e6b..000000000000 --- a/polkadot/zombienet_tests/functional/0019-verify-included-events.js +++ /dev/null @@ -1,51 +0,0 @@ -function parse_pjs_int(input) { - return parseInt(input.replace(/,/g, '')); -} - -async function run(nodeName, networkInfo) { - const { wsUri, userDefinedTypes } = networkInfo.nodesByName[nodeName]; - const api = await zombie.connect(wsUri, userDefinedTypes); - - let blocks_per_para = {}; - - await new Promise(async (resolve, _) => { - let block_count = 0; - const unsubscribe = await api.query.system.events(async (events, block_hash) => { - block_count++; - - events.forEach((record) => { - const event = record.event; - - if (event.method != 'CandidateIncluded') { - return; - } - - let included_para_id = parse_pjs_int(event.toHuman().data[0].descriptor.paraId); - let relay_parent = event.toHuman().data[0].descriptor.relayParent; - if (blocks_per_para[included_para_id] == undefined) { - blocks_per_para[included_para_id] = 1; - } else { - blocks_per_para[included_para_id]++; - } - console.log(`CandidateIncluded for ${included_para_id}: block_offset=${block_count} relay_parent=${relay_parent}`); - }); - - if (block_count == 12) { - unsubscribe(); - return resolve(); - } - }); - }); - - console.log(`Result: 2000: ${blocks_per_para[2000]}, 2001: ${blocks_per_para[2001]}`); - // This check assumes that para 2000 runs slot based collator which respects its claim queue - // and para 2001 runs lookahead which generates blocks for each relay parent. - // - // For 12 blocks there will be one session change. One block won't have anything backed/included. - // In the next there will be one backed so for 12 blocks we should expect 10 included events - no - // more than 4 for para 2001 and at least 6 for para 2000. This should also cover the unlucky - // case when we observe two session changes during the 12 block period. - return (blocks_per_para[2000] >= 6) && (blocks_per_para[2001] <= 4); -} - -module.exports = { run }; diff --git a/prdoc/stable2412/pr_3151.prdoc b/prdoc/pr_3151.prdoc similarity index 100% rename from prdoc/stable2412/pr_3151.prdoc rename to prdoc/pr_3151.prdoc diff --git a/prdoc/stable2412/pr_3685.prdoc b/prdoc/pr_3685.prdoc similarity index 100% rename from prdoc/stable2412/pr_3685.prdoc rename to prdoc/pr_3685.prdoc diff --git a/prdoc/stable2412/pr_3881.prdoc b/prdoc/pr_3881.prdoc similarity index 100% rename from prdoc/stable2412/pr_3881.prdoc rename to prdoc/pr_3881.prdoc diff --git a/prdoc/stable2412/pr_3970.prdoc b/prdoc/pr_3970.prdoc similarity index 100% rename from prdoc/stable2412/pr_3970.prdoc rename to prdoc/pr_3970.prdoc diff --git a/prdoc/stable2412/pr_4012.prdoc b/prdoc/pr_4012.prdoc similarity index 100% rename from prdoc/stable2412/pr_4012.prdoc rename to prdoc/pr_4012.prdoc diff --git a/prdoc/stable2412/pr_4251.prdoc b/prdoc/pr_4251.prdoc similarity index 100% rename from prdoc/stable2412/pr_4251.prdoc rename to prdoc/pr_4251.prdoc diff --git a/prdoc/stable2412/pr_4257.prdoc b/prdoc/pr_4257.prdoc similarity index 100% rename from prdoc/stable2412/pr_4257.prdoc rename to prdoc/pr_4257.prdoc diff --git a/prdoc/pr_4273.prdoc b/prdoc/pr_4273.prdoc deleted file mode 100644 index 1ff0a5782a41..000000000000 --- a/prdoc/pr_4273.prdoc +++ /dev/null @@ -1,19 +0,0 @@ -# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 -# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json - -title: "[pallet-broker] add extrinsic to reserve a system core without having to wait two sale boundaries" - -doc: - - audience: Runtime User - description: | - When calling the reserve extrinsic after sales have started, the assignment will be reserved, - but two sale period boundaries must pass before the core is actually assigned. A new - `force_reserve` extrinsic is introduced to allow a core to be immediately assigned. - -crates: - - name: pallet-broker - bump: major - - name: coretime-rococo-runtime - bump: patch - - name: coretime-westend-runtime - bump: patch diff --git a/prdoc/stable2412/pr_4639.prdoc b/prdoc/pr_4639.prdoc similarity index 100% rename from prdoc/stable2412/pr_4639.prdoc rename to prdoc/pr_4639.prdoc diff --git a/prdoc/stable2412/pr_4826.prdoc b/prdoc/pr_4826.prdoc similarity index 100% rename from prdoc/stable2412/pr_4826.prdoc rename to prdoc/pr_4826.prdoc diff --git a/prdoc/stable2412/pr_4837.prdoc b/prdoc/pr_4837.prdoc similarity index 100% rename from prdoc/stable2412/pr_4837.prdoc rename to prdoc/pr_4837.prdoc diff --git a/prdoc/stable2412/pr_4846.prdoc b/prdoc/pr_4846.prdoc similarity index 100% rename from prdoc/stable2412/pr_4846.prdoc rename to prdoc/pr_4846.prdoc diff --git a/prdoc/stable2412/pr_4849.prdoc b/prdoc/pr_4849.prdoc similarity index 100% rename from prdoc/stable2412/pr_4849.prdoc rename to prdoc/pr_4849.prdoc diff --git a/prdoc/stable2412/pr_4851.prdoc b/prdoc/pr_4851.prdoc similarity index 100% rename from prdoc/stable2412/pr_4851.prdoc rename to prdoc/pr_4851.prdoc diff --git a/prdoc/pr_4880.prdoc b/prdoc/pr_4880.prdoc deleted file mode 100644 index 1bcd09088b5f..000000000000 --- a/prdoc/pr_4880.prdoc +++ /dev/null @@ -1,31 +0,0 @@ -title: Collation fetching fairness in collator protocol - -doc: - - audience: "Node Dev" - description: | - Implements collation fetching fairness in the validator side of the collator protocol. With - core time in place if two (or more) parachains share a single core no fairness was guaranteed - between them in terms of collation fetching. The current implementation was accepting up to - `max_candidate_depth + 1` seconded collations per relay parent and once this limit is reached - no new collations are accepted. A misbehaving collator can abuse this fact and prevent other - collators/parachains from advertising collations by advertising `max_candidate_depth + 1` - collations of its own. - To address this issue two changes are made: - 1. For each parachain id the validator accepts advertisements until the number of entries in - the claim queue equals the number of seconded candidates. - 2. When new collation should be fetched the validator inspects what was seconded so far, - what's in the claim queue and picks the first slot which hasn't got a collation seconded - and there is no candidate pending seconding for it. If there is an advertisement in the - waiting queue for it it is fetched. Otherwise the next free slot is picked. - These two changes guarantee that: - 1. Validator doesn't accept more collations than it can actually back. - 2. Each parachain has got a fair share of core time based on its allocations in the claim - queue. - -crates: - - name: polkadot-collator-protocol - bump: patch - - name: polkadot - bump: patch - - name: polkadot-node-subsystem-util - bump: minor \ No newline at end of file diff --git a/prdoc/stable2412/pr_4889.prdoc b/prdoc/pr_4889.prdoc similarity index 100% rename from prdoc/stable2412/pr_4889.prdoc rename to prdoc/pr_4889.prdoc diff --git a/prdoc/stable2412/pr_4974.prdoc b/prdoc/pr_4974.prdoc similarity index 100% rename from prdoc/stable2412/pr_4974.prdoc rename to prdoc/pr_4974.prdoc diff --git a/prdoc/stable2412/pr_4982.prdoc b/prdoc/pr_4982.prdoc similarity index 100% rename from prdoc/stable2412/pr_4982.prdoc rename to prdoc/pr_4982.prdoc diff --git a/prdoc/stable2412/pr_5038.prdoc b/prdoc/pr_5038.prdoc similarity index 100% rename from prdoc/stable2412/pr_5038.prdoc rename to prdoc/pr_5038.prdoc diff --git a/prdoc/stable2412/pr_5194.prdoc b/prdoc/pr_5194.prdoc similarity index 100% rename from prdoc/stable2412/pr_5194.prdoc rename to prdoc/pr_5194.prdoc diff --git a/prdoc/stable2412/pr_5198.prdoc b/prdoc/pr_5198.prdoc similarity index 100% rename from prdoc/stable2412/pr_5198.prdoc rename to prdoc/pr_5198.prdoc diff --git a/prdoc/stable2412/pr_5201.prdoc b/prdoc/pr_5201.prdoc similarity index 100% rename from prdoc/stable2412/pr_5201.prdoc rename to prdoc/pr_5201.prdoc diff --git a/prdoc/stable2412/pr_5274.prdoc b/prdoc/pr_5274.prdoc similarity index 100% rename from prdoc/stable2412/pr_5274.prdoc rename to prdoc/pr_5274.prdoc diff --git a/prdoc/stable2412/pr_5322.prdoc b/prdoc/pr_5322.prdoc similarity index 100% rename from prdoc/stable2412/pr_5322.prdoc rename to prdoc/pr_5322.prdoc diff --git a/prdoc/stable2412/pr_5343.prdoc b/prdoc/pr_5343.prdoc similarity index 100% rename from prdoc/stable2412/pr_5343.prdoc rename to prdoc/pr_5343.prdoc diff --git a/prdoc/pr_5363.prdoc b/prdoc/pr_5363.prdoc deleted file mode 100644 index c3ecfffb9e52..000000000000 --- a/prdoc/pr_5363.prdoc +++ /dev/null @@ -1,14 +0,0 @@ -title: "[pallet-xcm] waive transport fees based on XcmConfig" - -doc: - - audience: Runtime Dev - description: | - pallet-xcm::send() no longer implicitly waives transport fees for the local root location, - but instead relies on xcm_executor::Config::FeeManager to determine whether certain locations have free transport. - - 🚨 Warning: 🚨 If your chain relies on free transport for local root, please make - sure to add Location::here() to the waived-fee locations in your configured xcm_executor::Config::FeeManager. - -crates: - - name: pallet-xcm - bump: major \ No newline at end of file diff --git a/prdoc/stable2412/pr_5372.prdoc b/prdoc/pr_5372.prdoc similarity index 100% rename from prdoc/stable2412/pr_5372.prdoc rename to prdoc/pr_5372.prdoc diff --git a/prdoc/stable2412/pr_5390.prdoc b/prdoc/pr_5390.prdoc similarity index 100% rename from prdoc/stable2412/pr_5390.prdoc rename to prdoc/pr_5390.prdoc diff --git a/prdoc/stable2412/pr_5420.prdoc b/prdoc/pr_5420.prdoc similarity index 100% rename from prdoc/stable2412/pr_5420.prdoc rename to prdoc/pr_5420.prdoc diff --git a/prdoc/stable2412/pr_5423.prdoc b/prdoc/pr_5423.prdoc similarity index 100% rename from prdoc/stable2412/pr_5423.prdoc rename to prdoc/pr_5423.prdoc diff --git a/prdoc/stable2412/pr_5435.prdoc b/prdoc/pr_5435.prdoc similarity index 100% rename from prdoc/stable2412/pr_5435.prdoc rename to prdoc/pr_5435.prdoc diff --git a/prdoc/stable2412/pr_5461.prdoc b/prdoc/pr_5461.prdoc similarity index 100% rename from prdoc/stable2412/pr_5461.prdoc rename to prdoc/pr_5461.prdoc diff --git a/prdoc/stable2412/pr_5469.prdoc b/prdoc/pr_5469.prdoc similarity index 100% rename from prdoc/stable2412/pr_5469.prdoc rename to prdoc/pr_5469.prdoc diff --git a/prdoc/stable2412/pr_5502.prdoc b/prdoc/pr_5502.prdoc similarity index 100% rename from prdoc/stable2412/pr_5502.prdoc rename to prdoc/pr_5502.prdoc diff --git a/prdoc/stable2412/pr_5515.prdoc b/prdoc/pr_5515.prdoc similarity index 100% rename from prdoc/stable2412/pr_5515.prdoc rename to prdoc/pr_5515.prdoc diff --git a/prdoc/stable2412/pr_5521.prdoc b/prdoc/pr_5521.prdoc similarity index 100% rename from prdoc/stable2412/pr_5521.prdoc rename to prdoc/pr_5521.prdoc diff --git a/prdoc/stable2412/pr_5526.prdoc b/prdoc/pr_5526.prdoc similarity index 100% rename from prdoc/stable2412/pr_5526.prdoc rename to prdoc/pr_5526.prdoc diff --git a/prdoc/stable2412/pr_5540.prdoc b/prdoc/pr_5540.prdoc similarity index 100% rename from prdoc/stable2412/pr_5540.prdoc rename to prdoc/pr_5540.prdoc diff --git a/prdoc/stable2412/pr_5548.prdoc b/prdoc/pr_5548.prdoc similarity index 100% rename from prdoc/stable2412/pr_5548.prdoc rename to prdoc/pr_5548.prdoc diff --git a/prdoc/stable2412/pr_5554.prdoc b/prdoc/pr_5554.prdoc similarity index 100% rename from prdoc/stable2412/pr_5554.prdoc rename to prdoc/pr_5554.prdoc diff --git a/prdoc/stable2412/pr_5555.prdoc b/prdoc/pr_5555.prdoc similarity index 100% rename from prdoc/stable2412/pr_5555.prdoc rename to prdoc/pr_5555.prdoc diff --git a/prdoc/stable2412/pr_5556.prdoc b/prdoc/pr_5556.prdoc similarity index 100% rename from prdoc/stable2412/pr_5556.prdoc rename to prdoc/pr_5556.prdoc diff --git a/prdoc/stable2412/pr_5572.prdoc b/prdoc/pr_5572.prdoc similarity index 100% rename from prdoc/stable2412/pr_5572.prdoc rename to prdoc/pr_5572.prdoc diff --git a/prdoc/stable2412/pr_5585.prdoc b/prdoc/pr_5585.prdoc similarity index 100% rename from prdoc/stable2412/pr_5585.prdoc rename to prdoc/pr_5585.prdoc diff --git a/prdoc/stable2412/pr_5592.prdoc b/prdoc/pr_5592.prdoc similarity index 100% rename from prdoc/stable2412/pr_5592.prdoc rename to prdoc/pr_5592.prdoc diff --git a/prdoc/stable2412/pr_5601.prdoc b/prdoc/pr_5601.prdoc similarity index 100% rename from prdoc/stable2412/pr_5601.prdoc rename to prdoc/pr_5601.prdoc diff --git a/prdoc/stable2412/pr_5606.prdoc b/prdoc/pr_5606.prdoc similarity index 100% rename from prdoc/stable2412/pr_5606.prdoc rename to prdoc/pr_5606.prdoc diff --git a/prdoc/stable2412/pr_5608.prdoc b/prdoc/pr_5608.prdoc similarity index 100% rename from prdoc/stable2412/pr_5608.prdoc rename to prdoc/pr_5608.prdoc diff --git a/prdoc/stable2412/pr_5609.prdoc b/prdoc/pr_5609.prdoc similarity index 100% rename from prdoc/stable2412/pr_5609.prdoc rename to prdoc/pr_5609.prdoc diff --git a/prdoc/stable2412/pr_5616.prdoc b/prdoc/pr_5616.prdoc similarity index 100% rename from prdoc/stable2412/pr_5616.prdoc rename to prdoc/pr_5616.prdoc diff --git a/prdoc/stable2412/pr_5623.prdoc b/prdoc/pr_5623.prdoc similarity index 100% rename from prdoc/stable2412/pr_5623.prdoc rename to prdoc/pr_5623.prdoc diff --git a/prdoc/stable2412/pr_5630.prdoc b/prdoc/pr_5630.prdoc similarity index 100% rename from prdoc/stable2412/pr_5630.prdoc rename to prdoc/pr_5630.prdoc diff --git a/prdoc/stable2412/pr_5635.prdoc b/prdoc/pr_5635.prdoc similarity index 100% rename from prdoc/stable2412/pr_5635.prdoc rename to prdoc/pr_5635.prdoc diff --git a/prdoc/stable2412/pr_5640.prdoc b/prdoc/pr_5640.prdoc similarity index 100% rename from prdoc/stable2412/pr_5640.prdoc rename to prdoc/pr_5640.prdoc diff --git a/prdoc/pr_5656.prdoc b/prdoc/pr_5656.prdoc deleted file mode 100644 index b20546bf7a5e..000000000000 --- a/prdoc/pr_5656.prdoc +++ /dev/null @@ -1,18 +0,0 @@ -# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 -# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json - -title: Use Relay Blocknumber in Pallet Broker - -doc: - - audience: Runtime Dev - description: | - Changing `sale_start`, `interlude_length` and `leading_length` in `pallet_broker` to use relay chain block numbers instead of parachain block numbers. - Relay chain block numbers are almost deterministic and more future proof. - -crates: - - name: pallet-broker - bump: major - - name: coretime-rococo-runtime - bump: major - - name: coretime-westend-runtime - bump: major \ No newline at end of file diff --git a/prdoc/stable2412/pr_5664.prdoc b/prdoc/pr_5664.prdoc similarity index 100% rename from prdoc/stable2412/pr_5664.prdoc rename to prdoc/pr_5664.prdoc diff --git a/prdoc/stable2412/pr_5665.prdoc b/prdoc/pr_5665.prdoc similarity index 100% rename from prdoc/stable2412/pr_5665.prdoc rename to prdoc/pr_5665.prdoc diff --git a/prdoc/stable2412/pr_5666.prdoc b/prdoc/pr_5666.prdoc similarity index 100% rename from prdoc/stable2412/pr_5666.prdoc rename to prdoc/pr_5666.prdoc diff --git a/prdoc/stable2412/pr_5675.prdoc b/prdoc/pr_5675.prdoc similarity index 100% rename from prdoc/stable2412/pr_5675.prdoc rename to prdoc/pr_5675.prdoc diff --git a/prdoc/stable2412/pr_5676.prdoc b/prdoc/pr_5676.prdoc similarity index 100% rename from prdoc/stable2412/pr_5676.prdoc rename to prdoc/pr_5676.prdoc diff --git a/prdoc/stable2412/pr_5679.prdoc b/prdoc/pr_5679.prdoc similarity index 100% rename from prdoc/stable2412/pr_5679.prdoc rename to prdoc/pr_5679.prdoc diff --git a/prdoc/stable2412/pr_5682.prdoc b/prdoc/pr_5682.prdoc similarity index 100% rename from prdoc/stable2412/pr_5682.prdoc rename to prdoc/pr_5682.prdoc diff --git a/prdoc/stable2412/pr_5684.prdoc b/prdoc/pr_5684.prdoc similarity index 100% rename from prdoc/stable2412/pr_5684.prdoc rename to prdoc/pr_5684.prdoc diff --git a/prdoc/stable2412/pr_5686.prdoc b/prdoc/pr_5686.prdoc similarity index 100% rename from prdoc/stable2412/pr_5686.prdoc rename to prdoc/pr_5686.prdoc diff --git a/prdoc/stable2412/pr_5687.prdoc b/prdoc/pr_5687.prdoc similarity index 100% rename from prdoc/stable2412/pr_5687.prdoc rename to prdoc/pr_5687.prdoc diff --git a/prdoc/stable2412/pr_5693.prdoc b/prdoc/pr_5693.prdoc similarity index 100% rename from prdoc/stable2412/pr_5693.prdoc rename to prdoc/pr_5693.prdoc diff --git a/prdoc/stable2412/pr_5701.prdoc b/prdoc/pr_5701.prdoc similarity index 100% rename from prdoc/stable2412/pr_5701.prdoc rename to prdoc/pr_5701.prdoc diff --git a/prdoc/pr_5703.prdoc b/prdoc/pr_5703.prdoc deleted file mode 100644 index 3cef4468a87d..000000000000 --- a/prdoc/pr_5703.prdoc +++ /dev/null @@ -1,13 +0,0 @@ -title: Properly handle block gap created by fast sync - -doc: - - audience: Node Dev - description: | - Implements support for handling block gaps generated during fast sync. This includes managing the creation, - updating, and removal of block gaps. - Note that this feature is not fully activated until the `body` attribute is removed from the `LightState` - block request in chain sync, which will occur after the issue #5406 is resolved. - -crates: - - name: sc-client-db - bump: patch diff --git a/prdoc/stable2412/pr_5707.prdoc b/prdoc/pr_5707.prdoc similarity index 100% rename from prdoc/stable2412/pr_5707.prdoc rename to prdoc/pr_5707.prdoc diff --git a/prdoc/stable2412/pr_5716.prdoc b/prdoc/pr_5716.prdoc similarity index 100% rename from prdoc/stable2412/pr_5716.prdoc rename to prdoc/pr_5716.prdoc diff --git a/prdoc/pr_5723.prdoc b/prdoc/pr_5723.prdoc deleted file mode 100644 index ded5f9cebd1d..000000000000 --- a/prdoc/pr_5723.prdoc +++ /dev/null @@ -1,24 +0,0 @@ -title: Adds `BlockNumberProvider` in multisig, proxy and nft pallets - -doc: - - audience: Runtime Dev - description: | - This PR adds the ability for these pallets to specify their source of the block number. - This is useful when these pallets are migrated from the relay chain to a parachain and - vice versa. - - This change is backwards compatible: - 1. If the `BlockNumberProvider` continues to use the system pallet's block number - 2. When a pallet deployed on the relay chain is moved to a parachain, but still uses the - relay chain's block number - - However, we would need migrations if the deployed pallets are upgraded on an existing parachain, - and the `BlockNumberProvider` uses the relay chain block number. - -crates: - - name: pallet-multisig - bump: major - - name: pallet-proxy - bump: major - - name: pallet-nfts - bump: major diff --git a/prdoc/pr_5724.prdoc b/prdoc/pr_5724.prdoc deleted file mode 100644 index be9d21c214a8..000000000000 --- a/prdoc/pr_5724.prdoc +++ /dev/null @@ -1,37 +0,0 @@ -# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 -# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json - -title: Validator Re-Enabling (master PR) - -doc: - - audience: Runtime Dev - description: | - Implementation of the Stage 3 for the New Disabling Strategy: https://github.com/paritytech/polkadot-sdk/issues/4359 - - This PR changes when an active validator node gets disabled for comitting offences. - When Byzantine Threshold Validators (1/3) are already disabled instead of no longer - disabling the highest offenders will be disabled potentially re-enabling low offenders. - - - audience: Node Operator - description: | - Implementation of the Stage 3 for the New Disabling Strategy: https://github.com/paritytech/polkadot-sdk/issues/4359 - - This PR changes when an active validator node gets disabled within parachain consensus (reduced responsibilities and - reduced rewards) for comitting offences. This should not affect active validators on a day-to-day basis and will only - be relevant when the network is under attack or there is a wide spread malfunction causing slashes. In that case - lowest offenders might get eventually re-enabled (back to normal responsibilities and normal rewards). - -migrations: - db: [] - runtime: - - reference: pallet-staking - description: | - Migrating `DisabledValidators` from `Vec` to `Vec<(u32, PerBill)>` where the PerBill represents the severity - of the offence in terms of the % slash. - -crates: - - name: pallet-staking - bump: minor - - - name: pallet-session - bump: minor diff --git a/prdoc/stable2412/pr_5726.prdoc b/prdoc/pr_5726.prdoc similarity index 100% rename from prdoc/stable2412/pr_5726.prdoc rename to prdoc/pr_5726.prdoc diff --git a/prdoc/stable2412/pr_5737.prdoc b/prdoc/pr_5737.prdoc similarity index 100% rename from prdoc/stable2412/pr_5737.prdoc rename to prdoc/pr_5737.prdoc diff --git a/prdoc/stable2412/pr_5741.prdoc b/prdoc/pr_5741.prdoc similarity index 100% rename from prdoc/stable2412/pr_5741.prdoc rename to prdoc/pr_5741.prdoc diff --git a/prdoc/stable2412/pr_5743.prdoc b/prdoc/pr_5743.prdoc similarity index 100% rename from prdoc/stable2412/pr_5743.prdoc rename to prdoc/pr_5743.prdoc diff --git a/prdoc/stable2412/pr_5745.prdoc b/prdoc/pr_5745.prdoc similarity index 100% rename from prdoc/stable2412/pr_5745.prdoc rename to prdoc/pr_5745.prdoc diff --git a/prdoc/stable2412/pr_5756.prdoc b/prdoc/pr_5756.prdoc similarity index 100% rename from prdoc/stable2412/pr_5756.prdoc rename to prdoc/pr_5756.prdoc diff --git a/prdoc/stable2412/pr_5762.prdoc b/prdoc/pr_5762.prdoc similarity index 100% rename from prdoc/stable2412/pr_5762.prdoc rename to prdoc/pr_5762.prdoc diff --git a/prdoc/stable2412/pr_5765.prdoc b/prdoc/pr_5765.prdoc similarity index 100% rename from prdoc/stable2412/pr_5765.prdoc rename to prdoc/pr_5765.prdoc diff --git a/prdoc/stable2412/pr_5768.prdoc b/prdoc/pr_5768.prdoc similarity index 100% rename from prdoc/stable2412/pr_5768.prdoc rename to prdoc/pr_5768.prdoc diff --git a/prdoc/stable2412/pr_5774.prdoc b/prdoc/pr_5774.prdoc similarity index 100% rename from prdoc/stable2412/pr_5774.prdoc rename to prdoc/pr_5774.prdoc diff --git a/prdoc/stable2412/pr_5779.prdoc b/prdoc/pr_5779.prdoc similarity index 100% rename from prdoc/stable2412/pr_5779.prdoc rename to prdoc/pr_5779.prdoc diff --git a/prdoc/stable2412/pr_5787.prdoc b/prdoc/pr_5787.prdoc similarity index 100% rename from prdoc/stable2412/pr_5787.prdoc rename to prdoc/pr_5787.prdoc diff --git a/prdoc/stable2412/pr_5789.prdoc b/prdoc/pr_5789.prdoc similarity index 100% rename from prdoc/stable2412/pr_5789.prdoc rename to prdoc/pr_5789.prdoc diff --git a/prdoc/stable2412/pr_5796.prdoc b/prdoc/pr_5796.prdoc similarity index 100% rename from prdoc/stable2412/pr_5796.prdoc rename to prdoc/pr_5796.prdoc diff --git a/prdoc/stable2412/pr_5804.prdoc b/prdoc/pr_5804.prdoc similarity index 100% rename from prdoc/stable2412/pr_5804.prdoc rename to prdoc/pr_5804.prdoc diff --git a/prdoc/stable2412/pr_5807.prdoc b/prdoc/pr_5807.prdoc similarity index 100% rename from prdoc/stable2412/pr_5807.prdoc rename to prdoc/pr_5807.prdoc diff --git a/prdoc/stable2412/pr_5811.prdoc b/prdoc/pr_5811.prdoc similarity index 100% rename from prdoc/stable2412/pr_5811.prdoc rename to prdoc/pr_5811.prdoc diff --git a/prdoc/stable2412/pr_5813.prdoc b/prdoc/pr_5813.prdoc similarity index 100% rename from prdoc/stable2412/pr_5813.prdoc rename to prdoc/pr_5813.prdoc diff --git a/prdoc/stable2412/pr_5824.prdoc b/prdoc/pr_5824.prdoc similarity index 100% rename from prdoc/stable2412/pr_5824.prdoc rename to prdoc/pr_5824.prdoc diff --git a/prdoc/stable2412/pr_5830.prdoc b/prdoc/pr_5830.prdoc similarity index 100% rename from prdoc/stable2412/pr_5830.prdoc rename to prdoc/pr_5830.prdoc diff --git a/prdoc/stable2412/pr_5838.prdoc b/prdoc/pr_5838.prdoc similarity index 100% rename from prdoc/stable2412/pr_5838.prdoc rename to prdoc/pr_5838.prdoc diff --git a/prdoc/stable2412/pr_5839.prdoc b/prdoc/pr_5839.prdoc similarity index 100% rename from prdoc/stable2412/pr_5839.prdoc rename to prdoc/pr_5839.prdoc diff --git a/prdoc/pr_5842.prdoc b/prdoc/pr_5842.prdoc deleted file mode 100644 index 0175c7583419..000000000000 --- a/prdoc/pr_5842.prdoc +++ /dev/null @@ -1,18 +0,0 @@ -# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 -# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json - -title: Get rid of libp2p dependency in sc-authority-discovery - -doc: - - audience: Node Dev - description: | - Removes `libp2p` types in authority-discovery, and replace them with network backend agnostic types from `sc-network-types`. - The `sc-network` interface is therefore updated accordingly. - -crates: - - name: sc-network - bump: patch - - name: sc-network-types - bump: patch - - name: sc-authority-discovery - bump: patch diff --git a/prdoc/stable2412/pr_5845.prdoc b/prdoc/pr_5845.prdoc similarity index 100% rename from prdoc/stable2412/pr_5845.prdoc rename to prdoc/pr_5845.prdoc diff --git a/prdoc/stable2412/pr_5847.prdoc b/prdoc/pr_5847.prdoc similarity index 100% rename from prdoc/stable2412/pr_5847.prdoc rename to prdoc/pr_5847.prdoc diff --git a/prdoc/pr_5855.prdoc b/prdoc/pr_5855.prdoc deleted file mode 100644 index 7735cfee9f37..000000000000 --- a/prdoc/pr_5855.prdoc +++ /dev/null @@ -1,15 +0,0 @@ -# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 -# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json - -title: Remove feature `test-helpers` from sc-service - -doc: - - audience: Node Dev - description: | - Removes feature `test-helpers` from sc-service. - -crates: - - name: sc-service - bump: major - - name: sc-rpc-spec-v2 - bump: major diff --git a/prdoc/stable2412/pr_5856.prdoc b/prdoc/pr_5856.prdoc similarity index 100% rename from prdoc/stable2412/pr_5856.prdoc rename to prdoc/pr_5856.prdoc diff --git a/prdoc/stable2412/pr_5857.prdoc b/prdoc/pr_5857.prdoc similarity index 100% rename from prdoc/stable2412/pr_5857.prdoc rename to prdoc/pr_5857.prdoc diff --git a/prdoc/stable2412/pr_5859.prdoc b/prdoc/pr_5859.prdoc similarity index 100% rename from prdoc/stable2412/pr_5859.prdoc rename to prdoc/pr_5859.prdoc diff --git a/prdoc/stable2412/pr_5861.prdoc b/prdoc/pr_5861.prdoc similarity index 100% rename from prdoc/stable2412/pr_5861.prdoc rename to prdoc/pr_5861.prdoc diff --git a/prdoc/stable2412/pr_5866.prdoc b/prdoc/pr_5866.prdoc similarity index 100% rename from prdoc/stable2412/pr_5866.prdoc rename to prdoc/pr_5866.prdoc diff --git a/prdoc/stable2412/pr_5872.prdoc b/prdoc/pr_5872.prdoc similarity index 100% rename from prdoc/stable2412/pr_5872.prdoc rename to prdoc/pr_5872.prdoc diff --git a/prdoc/stable2412/pr_5875.prdoc b/prdoc/pr_5875.prdoc similarity index 100% rename from prdoc/stable2412/pr_5875.prdoc rename to prdoc/pr_5875.prdoc diff --git a/prdoc/stable2412/pr_5876.prdoc b/prdoc/pr_5876.prdoc similarity index 100% rename from prdoc/stable2412/pr_5876.prdoc rename to prdoc/pr_5876.prdoc diff --git a/prdoc/stable2412/pr_5880.prdoc b/prdoc/pr_5880.prdoc similarity index 100% rename from prdoc/stable2412/pr_5880.prdoc rename to prdoc/pr_5880.prdoc diff --git a/prdoc/stable2412/pr_5883.prdoc b/prdoc/pr_5883.prdoc similarity index 100% rename from prdoc/stable2412/pr_5883.prdoc rename to prdoc/pr_5883.prdoc diff --git a/prdoc/stable2412/pr_5886.prdoc b/prdoc/pr_5886.prdoc similarity index 100% rename from prdoc/stable2412/pr_5886.prdoc rename to prdoc/pr_5886.prdoc diff --git a/prdoc/stable2412/pr_5888.prdoc b/prdoc/pr_5888.prdoc similarity index 100% rename from prdoc/stable2412/pr_5888.prdoc rename to prdoc/pr_5888.prdoc diff --git a/prdoc/stable2412/pr_5891.prdoc b/prdoc/pr_5891.prdoc similarity index 100% rename from prdoc/stable2412/pr_5891.prdoc rename to prdoc/pr_5891.prdoc diff --git a/prdoc/stable2412/pr_5892.prdoc b/prdoc/pr_5892.prdoc similarity index 100% rename from prdoc/stable2412/pr_5892.prdoc rename to prdoc/pr_5892.prdoc diff --git a/prdoc/pr_5899.prdoc b/prdoc/pr_5899.prdoc deleted file mode 100644 index fef810dd5f20..000000000000 --- a/prdoc/pr_5899.prdoc +++ /dev/null @@ -1,52 +0,0 @@ -# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 -# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json - -title: "Remove usage of AccountKeyring" - -doc: - - audience: Runtime Dev - description: | - Compared with AccountKeyring, Sr25519Keyring and Ed25519Keyring are more intuitive. - When both Sr25519Keyring and Ed25519Keyring are required, using AccountKeyring bring confusion. - There are two AccountKeyring definitions, it becomes more complex if export two AccountKeyring from frame. - -crates: - - name: frame-support - bump: patch - - name: sp-keyring - bump: major - - name: sc-service - bump: patch - - name: sc-chain-spec - bump: patch - - name: sc-rpc - bump: patch - - name: sc-transaction-pool - bump: patch - - name: sc-rpc-spec-v2 - bump: patch - - name: polkadot-node-metrics - bump: patch - - name: substrate-frame-rpc-system - bump: patch - - name: westend-runtime - bump: patch - - name: polkadot-sdk-frame - bump: patch - - name: rococo-runtime - bump: patch - - name: sc-basic-authorship - bump: patch - - name: bridge-hub-test-utils - bump: patch - - name: sc-consensus-manual-seal - bump: patch - - name: snowbridge-pallet-inbound-queue - bump: patch - - name: snowbridge-runtime-test-common - bump: patch - - name: bridge-hub-rococo-runtime - bump: patch - - name: bridge-hub-westend-runtime - bump: patch - diff --git a/prdoc/stable2412/pr_5901.prdoc b/prdoc/pr_5901.prdoc similarity index 100% rename from prdoc/stable2412/pr_5901.prdoc rename to prdoc/pr_5901.prdoc diff --git a/prdoc/stable2412/pr_5908.prdoc b/prdoc/pr_5908.prdoc similarity index 100% rename from prdoc/stable2412/pr_5908.prdoc rename to prdoc/pr_5908.prdoc diff --git a/prdoc/stable2412/pr_5911.prdoc b/prdoc/pr_5911.prdoc similarity index 100% rename from prdoc/stable2412/pr_5911.prdoc rename to prdoc/pr_5911.prdoc diff --git a/prdoc/stable2412/pr_5915.prdoc b/prdoc/pr_5915.prdoc similarity index 100% rename from prdoc/stable2412/pr_5915.prdoc rename to prdoc/pr_5915.prdoc diff --git a/prdoc/stable2412/pr_5917.prdoc b/prdoc/pr_5917.prdoc similarity index 100% rename from prdoc/stable2412/pr_5917.prdoc rename to prdoc/pr_5917.prdoc diff --git a/prdoc/stable2412/pr_5919.prdoc b/prdoc/pr_5919.prdoc similarity index 100% rename from prdoc/stable2412/pr_5919.prdoc rename to prdoc/pr_5919.prdoc diff --git a/prdoc/stable2412/pr_5924.prdoc b/prdoc/pr_5924.prdoc similarity index 100% rename from prdoc/stable2412/pr_5924.prdoc rename to prdoc/pr_5924.prdoc diff --git a/prdoc/stable2412/pr_5939.prdoc b/prdoc/pr_5939.prdoc similarity index 100% rename from prdoc/stable2412/pr_5939.prdoc rename to prdoc/pr_5939.prdoc diff --git a/prdoc/stable2412/pr_5941.prdoc b/prdoc/pr_5941.prdoc similarity index 100% rename from prdoc/stable2412/pr_5941.prdoc rename to prdoc/pr_5941.prdoc diff --git a/prdoc/stable2412/pr_5946.prdoc b/prdoc/pr_5946.prdoc similarity index 100% rename from prdoc/stable2412/pr_5946.prdoc rename to prdoc/pr_5946.prdoc diff --git a/prdoc/stable2412/pr_5954.prdoc b/prdoc/pr_5954.prdoc similarity index 100% rename from prdoc/stable2412/pr_5954.prdoc rename to prdoc/pr_5954.prdoc diff --git a/prdoc/stable2412/pr_5961.prdoc b/prdoc/pr_5961.prdoc similarity index 100% rename from prdoc/stable2412/pr_5961.prdoc rename to prdoc/pr_5961.prdoc diff --git a/prdoc/stable2412/pr_5971.prdoc b/prdoc/pr_5971.prdoc similarity index 100% rename from prdoc/stable2412/pr_5971.prdoc rename to prdoc/pr_5971.prdoc diff --git a/prdoc/stable2412/pr_5984.prdoc b/prdoc/pr_5984.prdoc similarity index 100% rename from prdoc/stable2412/pr_5984.prdoc rename to prdoc/pr_5984.prdoc diff --git a/prdoc/stable2412/pr_5994.prdoc b/prdoc/pr_5994.prdoc similarity index 100% rename from prdoc/stable2412/pr_5994.prdoc rename to prdoc/pr_5994.prdoc diff --git a/prdoc/stable2412/pr_5995.prdoc b/prdoc/pr_5995.prdoc similarity index 100% rename from prdoc/stable2412/pr_5995.prdoc rename to prdoc/pr_5995.prdoc diff --git a/prdoc/stable2412/pr_5998.prdoc b/prdoc/pr_5998.prdoc similarity index 100% rename from prdoc/stable2412/pr_5998.prdoc rename to prdoc/pr_5998.prdoc diff --git a/prdoc/stable2412/pr_5999.prdoc b/prdoc/pr_5999.prdoc similarity index 100% rename from prdoc/stable2412/pr_5999.prdoc rename to prdoc/pr_5999.prdoc diff --git a/prdoc/stable2412/pr_6011.prdoc b/prdoc/pr_6011.prdoc similarity index 100% rename from prdoc/stable2412/pr_6011.prdoc rename to prdoc/pr_6011.prdoc diff --git a/prdoc/stable2412/pr_6015.prdoc b/prdoc/pr_6015.prdoc similarity index 100% rename from prdoc/stable2412/pr_6015.prdoc rename to prdoc/pr_6015.prdoc diff --git a/prdoc/stable2412/pr_6016.prdoc b/prdoc/pr_6016.prdoc similarity index 100% rename from prdoc/stable2412/pr_6016.prdoc rename to prdoc/pr_6016.prdoc diff --git a/prdoc/stable2412/pr_6022.prdoc b/prdoc/pr_6022.prdoc similarity index 100% rename from prdoc/stable2412/pr_6022.prdoc rename to prdoc/pr_6022.prdoc diff --git a/prdoc/stable2412/pr_6023.prdoc b/prdoc/pr_6023.prdoc similarity index 100% rename from prdoc/stable2412/pr_6023.prdoc rename to prdoc/pr_6023.prdoc diff --git a/prdoc/stable2412/pr_6025.prdoc b/prdoc/pr_6025.prdoc similarity index 100% rename from prdoc/stable2412/pr_6025.prdoc rename to prdoc/pr_6025.prdoc diff --git a/prdoc/stable2412/pr_6027.prdoc b/prdoc/pr_6027.prdoc similarity index 100% rename from prdoc/stable2412/pr_6027.prdoc rename to prdoc/pr_6027.prdoc diff --git a/prdoc/stable2412/pr_6032.prdoc b/prdoc/pr_6032.prdoc similarity index 100% rename from prdoc/stable2412/pr_6032.prdoc rename to prdoc/pr_6032.prdoc diff --git a/prdoc/stable2412/pr_6039.prdoc b/prdoc/pr_6039.prdoc similarity index 100% rename from prdoc/stable2412/pr_6039.prdoc rename to prdoc/pr_6039.prdoc diff --git a/prdoc/stable2412/pr_6045.prdoc b/prdoc/pr_6045.prdoc similarity index 100% rename from prdoc/stable2412/pr_6045.prdoc rename to prdoc/pr_6045.prdoc diff --git a/prdoc/stable2412/pr_6058.prdoc b/prdoc/pr_6058.prdoc similarity index 100% rename from prdoc/stable2412/pr_6058.prdoc rename to prdoc/pr_6058.prdoc diff --git a/prdoc/stable2412/pr_6061.prdoc b/prdoc/pr_6061.prdoc similarity index 100% rename from prdoc/stable2412/pr_6061.prdoc rename to prdoc/pr_6061.prdoc diff --git a/prdoc/stable2412/pr_6073.prdoc b/prdoc/pr_6073.prdoc similarity index 100% rename from prdoc/stable2412/pr_6073.prdoc rename to prdoc/pr_6073.prdoc diff --git a/prdoc/stable2412/pr_6077.prdoc b/prdoc/pr_6077.prdoc similarity index 100% rename from prdoc/stable2412/pr_6077.prdoc rename to prdoc/pr_6077.prdoc diff --git a/prdoc/stable2412/pr_6080.prdoc b/prdoc/pr_6080.prdoc similarity index 100% rename from prdoc/stable2412/pr_6080.prdoc rename to prdoc/pr_6080.prdoc diff --git a/prdoc/stable2412/pr_6087.prdoc b/prdoc/pr_6087.prdoc similarity index 100% rename from prdoc/stable2412/pr_6087.prdoc rename to prdoc/pr_6087.prdoc diff --git a/prdoc/stable2412/pr_6088.prdoc b/prdoc/pr_6088.prdoc similarity index 100% rename from prdoc/stable2412/pr_6088.prdoc rename to prdoc/pr_6088.prdoc diff --git a/prdoc/stable2412/pr_6094.prdoc b/prdoc/pr_6094.prdoc similarity index 100% rename from prdoc/stable2412/pr_6094.prdoc rename to prdoc/pr_6094.prdoc diff --git a/prdoc/stable2412/pr_6096.prdoc b/prdoc/pr_6096.prdoc similarity index 100% rename from prdoc/stable2412/pr_6096.prdoc rename to prdoc/pr_6096.prdoc diff --git a/prdoc/stable2412/pr_6104.prdoc b/prdoc/pr_6104.prdoc similarity index 100% rename from prdoc/stable2412/pr_6104.prdoc rename to prdoc/pr_6104.prdoc diff --git a/prdoc/stable2412/pr_6105.prdoc b/prdoc/pr_6105.prdoc similarity index 100% rename from prdoc/stable2412/pr_6105.prdoc rename to prdoc/pr_6105.prdoc diff --git a/prdoc/pr_6111.prdoc b/prdoc/pr_6111.prdoc deleted file mode 100644 index 4ada3031c805..000000000000 --- a/prdoc/pr_6111.prdoc +++ /dev/null @@ -1,17 +0,0 @@ -title: "[pallet-revive] Update delegate_call to accept address and weight" - -doc: - - audience: Runtime Dev - description: | - Enhance the `delegate_call` function to accept an `address` target parameter instead of a `code_hash`. - This allows direct identification of the target contract using the provided address. - Additionally, introduce parameters for specifying a customizable `ref_time` limit and `proof_size` limit, - thereby improving flexibility and control during contract interactions. - -crates: - - name: pallet-revive - bump: major - - name: pallet-revive-fixtures - bump: patch - - name: pallet-revive-uapi - bump: major diff --git a/prdoc/stable2412/pr_6129.prdoc b/prdoc/pr_6129.prdoc similarity index 100% rename from prdoc/stable2412/pr_6129.prdoc rename to prdoc/pr_6129.prdoc diff --git a/prdoc/stable2412/pr_6141.prdoc b/prdoc/pr_6141.prdoc similarity index 100% rename from prdoc/stable2412/pr_6141.prdoc rename to prdoc/pr_6141.prdoc diff --git a/prdoc/stable2412/pr_6147.prdoc b/prdoc/pr_6147.prdoc similarity index 100% rename from prdoc/stable2412/pr_6147.prdoc rename to prdoc/pr_6147.prdoc diff --git a/prdoc/stable2412/pr_6148.prdoc b/prdoc/pr_6148.prdoc similarity index 100% rename from prdoc/stable2412/pr_6148.prdoc rename to prdoc/pr_6148.prdoc diff --git a/prdoc/stable2412/pr_6156.prdoc b/prdoc/pr_6156.prdoc similarity index 100% rename from prdoc/stable2412/pr_6156.prdoc rename to prdoc/pr_6156.prdoc diff --git a/prdoc/stable2412/pr_6169.prdoc b/prdoc/pr_6169.prdoc similarity index 100% rename from prdoc/stable2412/pr_6169.prdoc rename to prdoc/pr_6169.prdoc diff --git a/prdoc/stable2412/pr_6171.prdoc b/prdoc/pr_6171.prdoc similarity index 100% rename from prdoc/stable2412/pr_6171.prdoc rename to prdoc/pr_6171.prdoc diff --git a/prdoc/stable2412/pr_6174.prdoc b/prdoc/pr_6174.prdoc similarity index 100% rename from prdoc/stable2412/pr_6174.prdoc rename to prdoc/pr_6174.prdoc diff --git a/prdoc/pr_6184.prdoc b/prdoc/pr_6184.prdoc deleted file mode 100644 index e05a5884e930..000000000000 --- a/prdoc/pr_6184.prdoc +++ /dev/null @@ -1,24 +0,0 @@ -title: Remove pallet::getter from pallet-staking -doc: - - audience: Runtime Dev - description: | - This PR removes all pallet::getter occurrences from pallet-staking and replaces them with explicit implementations. - It also adds tests to verify that retrieval of affected entities works as expected so via storage::getter. - -crates: - - name: pallet-babe - bump: patch - - name: pallet-beefy - bump: patch - - name: pallet-election-provider-multi-phase - bump: patch - - name: pallet-grandpa - bump: patch - - name: pallet-nomination-pools - bump: patch - - name: pallet-root-offences - bump: patch - - name: westend-runtime - bump: patch - - name: pallet-staking - bump: patch \ No newline at end of file diff --git a/prdoc/stable2412/pr_6187.prdoc b/prdoc/pr_6187.prdoc similarity index 100% rename from prdoc/stable2412/pr_6187.prdoc rename to prdoc/pr_6187.prdoc diff --git a/prdoc/stable2412/pr_6192.prdoc b/prdoc/pr_6192.prdoc similarity index 100% rename from prdoc/stable2412/pr_6192.prdoc rename to prdoc/pr_6192.prdoc diff --git a/prdoc/stable2412/pr_6205.prdoc b/prdoc/pr_6205.prdoc similarity index 100% rename from prdoc/stable2412/pr_6205.prdoc rename to prdoc/pr_6205.prdoc diff --git a/prdoc/stable2412/pr_6212.prdoc b/prdoc/pr_6212.prdoc similarity index 100% rename from prdoc/stable2412/pr_6212.prdoc rename to prdoc/pr_6212.prdoc diff --git a/prdoc/stable2412/pr_6214.prdoc b/prdoc/pr_6214.prdoc similarity index 100% rename from prdoc/stable2412/pr_6214.prdoc rename to prdoc/pr_6214.prdoc diff --git a/prdoc/pr_6215.prdoc b/prdoc/pr_6215.prdoc deleted file mode 100644 index 3726a2fc5788..000000000000 --- a/prdoc/pr_6215.prdoc +++ /dev/null @@ -1,16 +0,0 @@ -# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 -# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json - -title: Remove `ProspectiveParachainsMode` from backing subsystem -doc: - - audience: "Node Dev" - description: | - Removes `ProspectiveParachainsMode` usage from the backing subsystem and assumes - `async_backing_params` runtime api is always available. Since the runtime api v7 is released on - all networks it should always be true. - -crates: - - name: polkadot-node-core-backing - bump: patch - - name: polkadot-statement-table - bump: major diff --git a/prdoc/stable2412/pr_6217.prdoc b/prdoc/pr_6217.prdoc similarity index 100% rename from prdoc/stable2412/pr_6217.prdoc rename to prdoc/pr_6217.prdoc diff --git a/prdoc/stable2412/pr_6218.prdoc b/prdoc/pr_6218.prdoc similarity index 100% rename from prdoc/stable2412/pr_6218.prdoc rename to prdoc/pr_6218.prdoc diff --git a/prdoc/pr_6220.prdoc b/prdoc/pr_6220.prdoc deleted file mode 100644 index 6a5ee4fa59be..000000000000 --- a/prdoc/pr_6220.prdoc +++ /dev/null @@ -1,10 +0,0 @@ -title: Fix metrics not shutting down if there are open connections - -doc: - - audience: Runtime Dev - description: | - Fix prometheus metrics not shutting down if there are open connections - -crates: -- name: substrate-prometheus-endpoint - bump: patch diff --git a/prdoc/stable2412/pr_6221.prdoc b/prdoc/pr_6221.prdoc similarity index 100% rename from prdoc/stable2412/pr_6221.prdoc rename to prdoc/pr_6221.prdoc diff --git a/prdoc/stable2412/pr_6228.prdoc b/prdoc/pr_6228.prdoc similarity index 100% rename from prdoc/stable2412/pr_6228.prdoc rename to prdoc/pr_6228.prdoc diff --git a/prdoc/stable2412/pr_6246.prdoc b/prdoc/pr_6246.prdoc similarity index 100% rename from prdoc/stable2412/pr_6246.prdoc rename to prdoc/pr_6246.prdoc diff --git a/prdoc/pr_6248.prdoc b/prdoc/pr_6248.prdoc deleted file mode 100644 index 71fb0891cac6..000000000000 --- a/prdoc/pr_6248.prdoc +++ /dev/null @@ -1,16 +0,0 @@ -title: Upgrade libp2p to 0.54.1 - -doc: - - audience: [Node Dev, Node Operator] - description: | - Upgrade libp2p from 0.52.4 to 0.54.1 - -crates: - - name: sc-network - bump: major - - name: sc-network-types - bump: minor - - name: sc-network-sync - bump: patch - - name: sc-telemetry - bump: minor diff --git a/prdoc/pr_6249.prdoc b/prdoc/pr_6249.prdoc deleted file mode 100644 index 52fa10b22627..000000000000 --- a/prdoc/pr_6249.prdoc +++ /dev/null @@ -1,10 +0,0 @@ -title: Pure state sync refactoring (part-1) - -doc: -- audience: Node Dev - description: | - The pure refactoring of state sync is preparing for https://github.com/paritytech/polkadot-sdk/issues/4. This is the first part, focusing on isolating the function `process_state_key_values()` as the central point for storing received state data in memory. This function will later be adapted to forward the state data directly to the DB layer to resolve the OOM issue and support persistent state sync. - -crates: -- name: sc-network-sync - bump: none diff --git a/prdoc/stable2412/pr_6255.prdoc b/prdoc/pr_6255.prdoc similarity index 100% rename from prdoc/stable2412/pr_6255.prdoc rename to prdoc/pr_6255.prdoc diff --git a/prdoc/stable2412/pr_6257.prdoc b/prdoc/pr_6257.prdoc similarity index 100% rename from prdoc/stable2412/pr_6257.prdoc rename to prdoc/pr_6257.prdoc diff --git a/prdoc/stable2412/pr_6260.prdoc b/prdoc/pr_6260.prdoc similarity index 100% rename from prdoc/stable2412/pr_6260.prdoc rename to prdoc/pr_6260.prdoc diff --git a/prdoc/stable2412/pr_6261.prdoc b/prdoc/pr_6261.prdoc similarity index 100% rename from prdoc/stable2412/pr_6261.prdoc rename to prdoc/pr_6261.prdoc diff --git a/prdoc/pr_6262.prdoc b/prdoc/pr_6262.prdoc deleted file mode 100644 index 8ad99bc6ad28..000000000000 --- a/prdoc/pr_6262.prdoc +++ /dev/null @@ -1,10 +0,0 @@ -title: "Size limits implemented for fork aware transaction pool" - -doc: - - audience: Node Dev - description: | - Size limits are now obeyed in fork aware transaction pool - -crates: - - name: sc-transaction-pool - bump: minor diff --git a/prdoc/stable2412/pr_6263.prdoc b/prdoc/pr_6263.prdoc similarity index 100% rename from prdoc/stable2412/pr_6263.prdoc rename to prdoc/pr_6263.prdoc diff --git a/prdoc/stable2412/pr_6264.prdoc b/prdoc/pr_6264.prdoc similarity index 100% rename from prdoc/stable2412/pr_6264.prdoc rename to prdoc/pr_6264.prdoc diff --git a/prdoc/stable2412/pr_6268.prdoc b/prdoc/pr_6268.prdoc similarity index 100% rename from prdoc/stable2412/pr_6268.prdoc rename to prdoc/pr_6268.prdoc diff --git a/prdoc/stable2412/pr_6278.prdoc b/prdoc/pr_6278.prdoc similarity index 100% rename from prdoc/stable2412/pr_6278.prdoc rename to prdoc/pr_6278.prdoc diff --git a/prdoc/pr_6284.prdoc b/prdoc/pr_6284.prdoc deleted file mode 100644 index e2d9ebb526d2..000000000000 --- a/prdoc/pr_6284.prdoc +++ /dev/null @@ -1,22 +0,0 @@ -title: "backing: improve session buffering for runtime information" - -doc: - - audience: Node Dev - description: | - This PR implements caching within the backing module for session-stable information, - reducing redundant runtime API calls. - - Specifically, it introduces a local cache for the: - - validators list; - - node features; - - executor parameters; - - minimum backing votes threshold; - - validator-to-group mapping. - - Previously, this data was fetched or computed repeatedly each time `PerRelayParentState` - was built. With this update, the cached information is fetched once and reused throughout - the session. - -crates: - - name: polkadot-node-core-backing - bump: patch diff --git a/prdoc/stable2412/pr_6288.prdoc b/prdoc/pr_6288.prdoc similarity index 100% rename from prdoc/stable2412/pr_6288.prdoc rename to prdoc/pr_6288.prdoc diff --git a/prdoc/pr_6290.prdoc b/prdoc/pr_6290.prdoc deleted file mode 100644 index a05d0cd15acf..000000000000 --- a/prdoc/pr_6290.prdoc +++ /dev/null @@ -1,11 +0,0 @@ -title: Migrate pallet-transaction-storage and pallet-indices to benchmark v2 -doc: -- audience: Runtime Dev - description: |- - Part of: - #6202 -crates: -- name: pallet-indices - bump: patch -- name: pallet-transaction-storage - bump: patch diff --git a/prdoc/stable2412/pr_6291.prdoc b/prdoc/pr_6291.prdoc similarity index 100% rename from prdoc/stable2412/pr_6291.prdoc rename to prdoc/pr_6291.prdoc diff --git a/prdoc/stable2412/pr_6295.prdoc b/prdoc/pr_6295.prdoc similarity index 100% rename from prdoc/stable2412/pr_6295.prdoc rename to prdoc/pr_6295.prdoc diff --git a/prdoc/stable2412/pr_6296.prdoc b/prdoc/pr_6296.prdoc similarity index 100% rename from prdoc/stable2412/pr_6296.prdoc rename to prdoc/pr_6296.prdoc diff --git a/prdoc/stable2412/pr_6298.prdoc b/prdoc/pr_6298.prdoc similarity index 100% rename from prdoc/stable2412/pr_6298.prdoc rename to prdoc/pr_6298.prdoc diff --git a/prdoc/stable2412/pr_6299.prdoc b/prdoc/pr_6299.prdoc similarity index 100% rename from prdoc/stable2412/pr_6299.prdoc rename to prdoc/pr_6299.prdoc diff --git a/prdoc/pr_6301.prdoc b/prdoc/pr_6301.prdoc deleted file mode 100644 index d4c05c17c8fb..000000000000 --- a/prdoc/pr_6301.prdoc +++ /dev/null @@ -1,11 +0,0 @@ -title: migrate pallet-nft-fractionalization to benchmarking v2 syntax -doc: -- audience: Runtime Dev - description: |- - Migrates pallet-nft-fractionalization to benchmarking v2 syntax. - - Part of: - * #6202 -crates: -- name: pallet-nft-fractionalization - bump: patch diff --git a/prdoc/pr_6302.prdoc b/prdoc/pr_6302.prdoc deleted file mode 100644 index 8b3e0964b6a6..000000000000 --- a/prdoc/pr_6302.prdoc +++ /dev/null @@ -1,8 +0,0 @@ -title: migrate pallet-nomination-pool-benchmarking to benchmarking syntax v2 -doc: -- audience: Runtime Dev - description: |- - migrate pallet-nomination-pool-benchmarking to benchmarking syntax v2 -crates: -- name: pallet-nomination-pools-benchmarking - bump: patch diff --git a/prdoc/stable2412/pr_6305.prdoc b/prdoc/pr_6305.prdoc similarity index 100% rename from prdoc/stable2412/pr_6305.prdoc rename to prdoc/pr_6305.prdoc diff --git a/prdoc/pr_6310.prdoc b/prdoc/pr_6310.prdoc deleted file mode 100644 index ab421791dc72..000000000000 --- a/prdoc/pr_6310.prdoc +++ /dev/null @@ -1,12 +0,0 @@ -title: Migrate pallet-child-bounties benchmark to v2 -doc: -- audience: Runtime Dev - description: |- - Part of: - - - #6202. -crates: -- name: pallet-utility - bump: patch -- name: pallet-child-bounties - bump: patch diff --git a/prdoc/pr_6311.prdoc b/prdoc/pr_6311.prdoc deleted file mode 100644 index a63876f4e4ac..000000000000 --- a/prdoc/pr_6311.prdoc +++ /dev/null @@ -1,13 +0,0 @@ -# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 -# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json - -title: Migrate pallet-fast-unstake and pallet-babe benchmark to v2 -doc: -- audience: Runtime Dev - description: |- - Migrate pallet-fast-unstake and pallet-babe benchmark to v2 -crates: -- name: pallet-babe - bump: patch -- name: pallet-fast-unstake - bump: patch diff --git a/prdoc/stable2412/pr_6314.prdoc b/prdoc/pr_6314.prdoc similarity index 100% rename from prdoc/stable2412/pr_6314.prdoc rename to prdoc/pr_6314.prdoc diff --git a/prdoc/stable2412/pr_6315.prdoc b/prdoc/pr_6315.prdoc similarity index 100% rename from prdoc/stable2412/pr_6315.prdoc rename to prdoc/pr_6315.prdoc diff --git a/prdoc/stable2412/pr_6316.prdoc b/prdoc/pr_6316.prdoc similarity index 100% rename from prdoc/stable2412/pr_6316.prdoc rename to prdoc/pr_6316.prdoc diff --git a/prdoc/stable2412/pr_6317.prdoc b/prdoc/pr_6317.prdoc similarity index 100% rename from prdoc/stable2412/pr_6317.prdoc rename to prdoc/pr_6317.prdoc diff --git a/prdoc/stable2412/pr_6318.prdoc b/prdoc/pr_6318.prdoc similarity index 100% rename from prdoc/stable2412/pr_6318.prdoc rename to prdoc/pr_6318.prdoc diff --git a/prdoc/stable2412/pr_6337.prdoc b/prdoc/pr_6337.prdoc similarity index 100% rename from prdoc/stable2412/pr_6337.prdoc rename to prdoc/pr_6337.prdoc diff --git a/prdoc/pr_6349.prdoc b/prdoc/pr_6349.prdoc deleted file mode 100644 index 40f02712c99a..000000000000 --- a/prdoc/pr_6349.prdoc +++ /dev/null @@ -1,44 +0,0 @@ -title: "runtimes: presets are provided as config patches" - -doc: - - audience: Runtime Dev - description: | - This PR introduces usage of build_struct_json_patch macro in all - runtimes (also guides) within the code base. It also fixes macro to support - field init shorthand, and Struct Update syntax which were missing in original - implementation. - -crates: - - name: frame-support - bump: major - - - name: westend-runtime - bump: patch - - - name: rococo-runtime - bump: patch - - - name: asset-hub-westend-runtime - bump: patch - - - name: bridge-hub-rococo-runtime - bump: patch - - - name: bridge-hub-westend-runtime - bump: patch - - - name: collectives-westend-runtime - bump: patch - - - name: minimal-template-runtime - bump: patch - - - name: solochain-template-runtime - bump: patch - - - name: parachain-template-runtime - bump: patch - - - name: polkadot-sdk-docs-first-runtime - bump: patch - diff --git a/prdoc/stable2412/pr_6353.prdoc b/prdoc/pr_6353.prdoc similarity index 100% rename from prdoc/stable2412/pr_6353.prdoc rename to prdoc/pr_6353.prdoc diff --git a/prdoc/stable2412/pr_6357.prdoc b/prdoc/pr_6357.prdoc similarity index 100% rename from prdoc/stable2412/pr_6357.prdoc rename to prdoc/pr_6357.prdoc diff --git a/prdoc/stable2412/pr_6360.prdoc b/prdoc/pr_6360.prdoc similarity index 100% rename from prdoc/stable2412/pr_6360.prdoc rename to prdoc/pr_6360.prdoc diff --git a/prdoc/stable2412/pr_6365.prdoc b/prdoc/pr_6365.prdoc similarity index 100% rename from prdoc/stable2412/pr_6365.prdoc rename to prdoc/pr_6365.prdoc diff --git a/prdoc/pr_6367.prdoc b/prdoc/pr_6367.prdoc deleted file mode 100644 index fd1e6bb4196d..000000000000 --- a/prdoc/pr_6367.prdoc +++ /dev/null @@ -1,14 +0,0 @@ -# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 -# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json - -title: Refactor pallet society - -doc: - - audience: Runtime Dev - description: | - Derives `MaxEncodedLen` implementation for stored types and removes `without_storage_info` attribute. - Migrates benchmarks from v1 to v2 API. - -crates: - - name: pallet-society - bump: minor diff --git a/prdoc/pr_6368.prdoc b/prdoc/pr_6368.prdoc deleted file mode 100644 index 4fd3963eb05e..000000000000 --- a/prdoc/pr_6368.prdoc +++ /dev/null @@ -1,7 +0,0 @@ -title: Migrate inclusion benchmark to v2 -doc: -- audience: Runtime Dev - description: Migrate inclusion benchmark to v2. -crates: -- name: polkadot-runtime-parachains - bump: patch diff --git a/prdoc/stable2412/pr_6373.prdoc b/prdoc/pr_6373.prdoc similarity index 100% rename from prdoc/stable2412/pr_6373.prdoc rename to prdoc/pr_6373.prdoc diff --git a/prdoc/stable2412/pr_6380.prdoc b/prdoc/pr_6380.prdoc similarity index 100% rename from prdoc/stable2412/pr_6380.prdoc rename to prdoc/pr_6380.prdoc diff --git a/prdoc/stable2412/pr_6382.prdoc b/prdoc/pr_6382.prdoc similarity index 100% rename from prdoc/stable2412/pr_6382.prdoc rename to prdoc/pr_6382.prdoc diff --git a/prdoc/stable2412/pr_6384.prdoc b/prdoc/pr_6384.prdoc similarity index 100% rename from prdoc/stable2412/pr_6384.prdoc rename to prdoc/pr_6384.prdoc diff --git a/prdoc/pr_6393.prdoc b/prdoc/pr_6393.prdoc deleted file mode 100644 index fc8fe9bd8576..000000000000 --- a/prdoc/pr_6393.prdoc +++ /dev/null @@ -1,16 +0,0 @@ -title: '[pallet-revive] adjust fee dry-run calculation' -doc: -- audience: Runtime Dev - description: |- - - Fix bare_eth_transact so that it estimate more precisely the transaction fee - - Add some context to the build.rs to make it easier to troubleshoot errors - - Add TransactionBuilder for the RPC tests. - - Tweaked some error message, We will need to wait for the next subxt release to properly downcast some errors and - adopt MM error code (https://eips.ethereum.org/EIPS/eip-1474#error-codes) -crates: -- name: pallet-revive-eth-rpc - bump: minor -- name: pallet-revive - bump: minor -- name: pallet-revive-fixtures - bump: minor diff --git a/prdoc/pr_6400.prdoc b/prdoc/pr_6400.prdoc deleted file mode 100644 index a29ad49b4e51..000000000000 --- a/prdoc/pr_6400.prdoc +++ /dev/null @@ -1,41 +0,0 @@ -title: Remove network starter that is no longer needed -doc: -- audience: Node Dev - description: |- - # Description - - This seems to be an old artifact of the long closed https://github.com/paritytech/substrate/issues/6827 that I noticed when working on related code earlier. - - ## Integration - - `NetworkStarter` was removed, simply remove its usage: - ```diff - -let (network, system_rpc_tx, tx_handler_controller, start_network, sync_service) = - +let (network, system_rpc_tx, tx_handler_controller, sync_service) = - build_network(BuildNetworkParams { - ... - -start_network.start_network(); - ``` - - ## Review Notes - - Changes are trivial, the only reason for this to not be accepted is if it is desired to not start network automatically for whatever reason, in which case the description of network starter needs to change. - - # Checklist - - * [x] My PR includes a detailed description as outlined in the "Description" and its two subsections above. - * [ ] My PR follows the [labeling requirements]( - https://github.com/paritytech/polkadot-sdk/blob/master/docs/contributor/CONTRIBUTING.md#Process - ) of this project (at minimum one label for `T` required) - * External contributors: ask maintainers to put the right label on your PR. -crates: -- name: cumulus-relay-chain-minimal-node - bump: major -- name: cumulus-client-service - bump: major -- name: polkadot-omni-node-lib - bump: major -- name: polkadot-service - bump: major -- name: sc-service - bump: major diff --git a/prdoc/pr_6405.prdoc b/prdoc/pr_6405.prdoc deleted file mode 100644 index 9e4e0b3c6c20..000000000000 --- a/prdoc/pr_6405.prdoc +++ /dev/null @@ -1,9 +0,0 @@ -title: '`fatxpool`: handling limits and priorities improvements' -doc: -- audience: Node Dev - description: |- - This PR provides a number of improvements and fixes around handling limits and priorities in the fork-aware transaction pool. - -crates: -- name: sc-transaction-pool - bump: major diff --git a/prdoc/stable2412/pr_6406.prdoc b/prdoc/pr_6406.prdoc similarity index 100% rename from prdoc/stable2412/pr_6406.prdoc rename to prdoc/pr_6406.prdoc diff --git a/prdoc/pr_6411.prdoc b/prdoc/pr_6411.prdoc deleted file mode 100644 index 3d8c2219e90e..000000000000 --- a/prdoc/pr_6411.prdoc +++ /dev/null @@ -1,10 +0,0 @@ -title: "Support more types in TypeWithDefault" - -doc: - - audience: Runtime Dev - description: | - This PR supports more integer types to be used with `TypeWithDefault` and makes `TypeWithDefault: BaseArithmetic` satisfied - -crates: - - name: sp-runtime - bump: patch diff --git a/prdoc/pr_6417.prdoc b/prdoc/pr_6417.prdoc deleted file mode 100644 index dfbc8c0d311b..000000000000 --- a/prdoc/pr_6417.prdoc +++ /dev/null @@ -1,9 +0,0 @@ -title: fix prospective-parachains best backable chain reversion bug -doc: - - audience: Node Dev - description: | - Fixes a bug in the prospective-parachains subsystem that prevented proper best backable chain reorg. - -crates: -- name: polkadot-node-core-prospective-parachains - bump: patch diff --git a/prdoc/pr_6419.prdoc b/prdoc/pr_6419.prdoc deleted file mode 100644 index 6cc155d64b91..000000000000 --- a/prdoc/pr_6419.prdoc +++ /dev/null @@ -1,12 +0,0 @@ -# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 -# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json - -title: Use the custom target riscv32emac-unknown-none-polkavm -doc: - - audience: Runtime Dev - description: | - Closes: https://github.com/paritytech/polkadot-sdk/issues/6335 - -crates: -- name: substrate-wasm-builder - bump: patch diff --git a/prdoc/pr_6425.prdoc b/prdoc/pr_6425.prdoc deleted file mode 100644 index 57e759bf3376..000000000000 --- a/prdoc/pr_6425.prdoc +++ /dev/null @@ -1,27 +0,0 @@ -title: Introduce `ConstUint` to make dependent types in `DefaultConfig` more adaptable -author: conr2d -topic: runtime - -doc: -- audience: Runtime Dev - description: |- - Introduce `ConstUint` that is a unified alternative to `ConstU8`, `ConstU16`, and - similar types, particularly useful for configuring `DefaultConfig` in pallets. - It enables configuring the underlying integer for a specific type without the need - to update all dependent types, offering enhanced flexibility in type management. - -crates: - - name: frame-support - bump: patch - - name: frame-system - bump: none - - name: pallet-assets - bump: none - - name: pallet-balances - bump: none - - name: pallet-timestamp - bump: none - - name: sp-core - bump: patch - - name: sp-runtime - bump: patch diff --git a/prdoc/pr_6435.prdoc b/prdoc/pr_6435.prdoc deleted file mode 100644 index 025c666d9115..000000000000 --- a/prdoc/pr_6435.prdoc +++ /dev/null @@ -1,16 +0,0 @@ -title: 'frame-benchmarking: Use correct components for pallet instances' -doc: -- audience: Runtime Dev - description: |- - When benchmarking multiple instances of the same pallet, each instance was executed with the components of all instances. While actually each instance should only be executed with the components generated for the particular instance. The problem here was that in the runtime only the pallet-name was used to determine if a certain pallet should be benchmarked. When using instances, the pallet name is the same for both of these instances. The solution is to also take the instance name into account. - - The fix requires to change the `Benchmark` runtime api to also take the `instance`. The node side is written in a backwards compatible way to also support runtimes which do not yet support the `instance` parameter. -crates: -- name: frame-benchmarking - bump: major -- name: frame-benchmarking-cli - bump: major -- name: sc-client-db - bump: none -- name: pallet-referenda - bump: none diff --git a/prdoc/pr_6439.prdoc b/prdoc/pr_6439.prdoc deleted file mode 100644 index fb3b62523576..000000000000 --- a/prdoc/pr_6439.prdoc +++ /dev/null @@ -1,10 +0,0 @@ -title: 'pallet-membership: Do not verify the `MembershipChanged` in bechmarks' -doc: -- audience: Runtime Dev - description: |- - There is no need to verify in the `pallet-membership` benchmark that the `MemembershipChanged` implementation works as the pallet thinks it should work. If you for example set it to `()`, `get_prime()` will always return `None`. - - TLDR: Remove the checks of `MembershipChanged` in the benchmarks to support any kind of implementation. -crates: -- name: pallet-membership - bump: patch diff --git a/prdoc/pr_6440.prdoc b/prdoc/pr_6440.prdoc deleted file mode 100644 index 376e59fa752e..000000000000 --- a/prdoc/pr_6440.prdoc +++ /dev/null @@ -1,8 +0,0 @@ -title: Remove debug message about pruning active leaves -doc: -- audience: Node Dev - description: |- - Removed useless debug message -crates: -- name: polkadot-node-core-pvf - validate: false diff --git a/prdoc/pr_6446.prdoc b/prdoc/pr_6446.prdoc deleted file mode 100644 index 3bfe7d0c7a60..000000000000 --- a/prdoc/pr_6446.prdoc +++ /dev/null @@ -1,16 +0,0 @@ -title: Make pallet-recovery supports `BlockNumberProvider` -doc: -- audience: Runtime Dev - description: |- - pallet-recovery now allows configuring the block provider to be utilized within this pallet. This block is employed for the delay in the recovery process. - - A new associated type has been introduced in the `Config` trait: `BlockNumberProvider`. This can be assigned to `System` to maintain the previous behavior, or it can be set to another block number provider, such as `RelayChain`. - - If the block provider is configured with a value different from `System`, a migration will be necessary for the `Recoverable` and `ActiveRecoveries` storage items. -crates: -- name: rococo-runtime - bump: major -- name: westend-runtime - bump: major -- name: pallet-recovery - bump: major diff --git a/prdoc/pr_6450.prdoc b/prdoc/pr_6450.prdoc deleted file mode 100644 index a9e927e45106..000000000000 --- a/prdoc/pr_6450.prdoc +++ /dev/null @@ -1,21 +0,0 @@ -# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 -# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json - -title: Add omni-node checks for runtime parachain compatibility - -doc: - - audience: [ Node Dev, Runtime Dev ] - description: | - OmniNode parses runtime metadata and checks against the existence of `cumulus-pallet-parachain-system` - and `frame-system`, by filtering pallets by names: `ParachainSystem` and `System`. It also checks the - `frame-system` pallet storage `Number` type, and then uses it to configure AURA if `u32` or `u64`. - -crates: - - name: polkadot-omni-node-lib - bump: minor - - name: polkadot-sdk - bump: minor - - name: sc-runtime-utilities - bump: patch - - name: frame-benchmarking-cli - bump: major diff --git a/prdoc/pr_6452.prdoc b/prdoc/pr_6452.prdoc deleted file mode 100644 index f2cb69875e95..000000000000 --- a/prdoc/pr_6452.prdoc +++ /dev/null @@ -1,16 +0,0 @@ -title: "elastic scaling RFC 103 end-to-end tests" - -doc: - - audience: [Node Dev, Runtime Dev] - description: | - Adds end-to-end zombienet-sdk tests for elastic scaling using the RFC103 implementation. - Only notable user-facing change is that the default chain configurations of westend and rococo - now enable by default the CandidateReceiptV2 node feature. - -crates: - - name: westend-runtime - bump: patch - - name: rococo-runtime - bump: patch - - name: rococo-parachain-runtime - bump: patch diff --git a/prdoc/pr_6453.prdoc b/prdoc/pr_6453.prdoc deleted file mode 100644 index 5df44f11296d..000000000000 --- a/prdoc/pr_6453.prdoc +++ /dev/null @@ -1,7 +0,0 @@ -title: '[pallet-revive] breakdown integration tests' -doc: -- audience: Runtime Dev - description: Break down the single integration tests into multiple tests, use keccak-256 for tx.hash -crates: -- name: pallet-revive-eth-rpc - bump: minor diff --git a/prdoc/pr_6455.prdoc b/prdoc/pr_6455.prdoc deleted file mode 100644 index 9a83048e2fd2..000000000000 --- a/prdoc/pr_6455.prdoc +++ /dev/null @@ -1,8 +0,0 @@ -title: Add litep2p network protocol benches -doc: -- audience: Node Dev - description: |- - Adds networking protocol benchmarks with litep2p backend -crates: -- name: sc-network - validate: false diff --git a/prdoc/pr_6459.prdoc b/prdoc/pr_6459.prdoc deleted file mode 100644 index 592ba4c6b29d..000000000000 --- a/prdoc/pr_6459.prdoc +++ /dev/null @@ -1,22 +0,0 @@ -# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 -# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json - -title: Fix version conversion in XcmPaymentApi::query_weight_to_asset_fee. - -doc: - - audience: Runtime Dev - description: | - The `query_weight_to_asset_fee` function of the `XcmPaymentApi` was trying - to convert versions in the wrong way. - This resulted in all calls made with lower versions failing. - The version conversion is now done correctly and these same calls will now succeed. - -crates: - - name: asset-hub-westend-runtime - bump: patch - - name: asset-hub-rococo-runtime - bump: patch - - name: xcm-runtime-apis - bump: patch - - name: assets-common - bump: patch diff --git a/prdoc/pr_6460.prdoc b/prdoc/pr_6460.prdoc deleted file mode 100644 index e1fd1a740228..000000000000 --- a/prdoc/pr_6460.prdoc +++ /dev/null @@ -1,9 +0,0 @@ -title: '[pallet-revive] set logs_bloom' -doc: -- audience: Runtime Dev - description: Set the logs_bloom in the transaction receipt -crates: -- name: pallet-revive-eth-rpc - bump: minor -- name: pallet-revive - bump: minor diff --git a/prdoc/pr_6461.prdoc b/prdoc/pr_6461.prdoc deleted file mode 100644 index 1b3d1e8b0364..000000000000 --- a/prdoc/pr_6461.prdoc +++ /dev/null @@ -1,12 +0,0 @@ -# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 -# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json -title: '[pallet-revive] add support for all eth tx types' -doc: -- audience: Runtime Dev - description: Add support for 1559, 4844, and 2930 transaction types -crates: -- name: pallet-revive-eth-rpc - bump: minor -- name: pallet-revive - bump: minor - diff --git a/prdoc/pr_6463.prdoc b/prdoc/pr_6463.prdoc deleted file mode 100644 index 9c4787540a49..000000000000 --- a/prdoc/pr_6463.prdoc +++ /dev/null @@ -1,8 +0,0 @@ -title: Fix staking benchmark -doc: -- audience: Runtime Dev - description: 'Fix staking benchmark, error was introduced when migrating to v2: - https://github.com/paritytech/polkadot-sdk/pull/6025' -crates: -- name: pallet-staking - bump: patch diff --git a/prdoc/pr_6466.prdoc b/prdoc/pr_6466.prdoc deleted file mode 100644 index 0faa6afc8005..000000000000 --- a/prdoc/pr_6466.prdoc +++ /dev/null @@ -1,12 +0,0 @@ -title: '[pallet-revive] add piggy-bank sol example' -doc: -- audience: Runtime Dev - description: |- - This PR update the pallet to use the EVM 18 decimal balance in contracts call and host functions instead of the native balance. - - It also updates the js example to add the piggy-bank solidity contract that expose the problem -crates: -- name: pallet-revive-eth-rpc - bump: minor -- name: pallet-revive - bump: minor diff --git a/prdoc/pr_6481.prdoc b/prdoc/pr_6481.prdoc deleted file mode 100644 index 83ba0a32eb24..000000000000 --- a/prdoc/pr_6481.prdoc +++ /dev/null @@ -1,10 +0,0 @@ -title: 'slot-based-collator: Implement dedicated block import' -doc: -- audience: Node Dev - description: |- - The `SlotBasedBlockImport` job is to collect the storage proofs of all blocks getting imported. These storage proofs alongside the block are being forwarded to the collation task. Right now they are just being thrown away. More logic will follow later. Basically this will be required to include multiple blocks into one `PoV` which will then be done by the collation task. -crates: -- name: cumulus-client-consensus-aura - bump: major -- name: polkadot-omni-node-lib - bump: major diff --git a/prdoc/pr_6486.prdoc b/prdoc/pr_6486.prdoc deleted file mode 100644 index e401d3f9a887..000000000000 --- a/prdoc/pr_6486.prdoc +++ /dev/null @@ -1,10 +0,0 @@ -title: "sp-trie: minor fix to avoid panic on badly-constructed proof" - -doc: - - audience: ["Runtime Dev", "Runtime User"] - description: | - "Added a check when decoding encoded proof nodes in `sp-trie` to avoid panicking when receiving a badly constructed proof, instead erroring out." - -crates: -- name: sp-trie - bump: patch diff --git a/prdoc/pr_6502.prdoc b/prdoc/pr_6502.prdoc deleted file mode 100644 index 3e2467ed5524..000000000000 --- a/prdoc/pr_6502.prdoc +++ /dev/null @@ -1,10 +0,0 @@ -title: "sp-trie: correctly avoid panicking when decoding bad compact proofs" - -doc: - - audience: "Runtime Dev" - description: | - "Fixed the check introduced in [PR #6486](https://github.com/paritytech/polkadot-sdk/pull/6486). Now `sp-trie` correctly avoids panicking when decoding bad compact proofs." - -crates: -- name: sp-trie - bump: patch diff --git a/prdoc/pr_6503.prdoc b/prdoc/pr_6503.prdoc deleted file mode 100644 index dc296a93f0eb..000000000000 --- a/prdoc/pr_6503.prdoc +++ /dev/null @@ -1,10 +0,0 @@ -title: "xcm: minor fix for compatibility with V4" - -doc: - - audience: ["Runtime Dev", "Runtime User"] - description: | - Following the removal of `Rococo`, `Westend` and `Wococo` from `NetworkId`, fixed `xcm::v5::NetworkId` encoding/decoding to be compatible with `xcm::v4::NetworkId` - -crates: -- name: staging-xcm - bump: patch diff --git a/prdoc/pr_6506.prdoc b/prdoc/pr_6506.prdoc deleted file mode 100644 index 7c6164a9959a..000000000000 --- a/prdoc/pr_6506.prdoc +++ /dev/null @@ -1,10 +0,0 @@ -title: Zero refund check for FungibleAdapter -doc: -- audience: Runtime User - description: |- - `FungibleAdapter` will now check if the _refund amount_ is zero before calling deposit & emitting an event. - - Fixes https://github.com/paritytech/polkadot-sdk/issues/6469. -crates: -- name: pallet-transaction-payment - bump: patch diff --git a/prdoc/pr_6509.prdoc b/prdoc/pr_6509.prdoc deleted file mode 100644 index 74215fe0084c..000000000000 --- a/prdoc/pr_6509.prdoc +++ /dev/null @@ -1,13 +0,0 @@ -# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 -# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json - -title: Migrate pallet-democracy benchmark to v2 - -doc: - - audience: Runtime Dev - description: | - "Part of issue #6202." - -crates: -- name: pallet-democracy - bump: patch diff --git a/prdoc/pr_6521.prdoc b/prdoc/pr_6521.prdoc deleted file mode 100644 index 6f4acf8d028b..000000000000 --- a/prdoc/pr_6521.prdoc +++ /dev/null @@ -1,10 +0,0 @@ -title: Pure state sync refactoring (part-2) - -doc: -- audience: Node Dev - description: | - This is the last part of the pure refactoring of state sync, focusing on encapsulating `StateSyncMetadata` as a separate entity. - -crates: -- name: sc-network-sync - bump: none diff --git a/prdoc/pr_6522.prdoc b/prdoc/pr_6522.prdoc deleted file mode 100644 index bd59e9cb08dc..000000000000 --- a/prdoc/pr_6522.prdoc +++ /dev/null @@ -1,18 +0,0 @@ -title: Removes constraint in BlockNumberProvider from treasury - -doc: -- audience: Runtime Dev - description: |- - https://github.com/paritytech/polkadot-sdk/pull/3970 updated the treasury pallet to support - relay chain block number provider. However, it added a constraint to the `BlockNumberProvider` - trait to have the same block number type as `frame_system`: - - ```rust - type BlockNumberProvider: BlockNumberProvider>; - ``` - - This PR removes that constraint and allows the treasury pallet to use any block number type. - -crates: -- name: pallet-treasury - bump: major \ No newline at end of file diff --git a/prdoc/pr_6526.prdoc b/prdoc/pr_6526.prdoc deleted file mode 100644 index 9ea1368ab10c..000000000000 --- a/prdoc/pr_6526.prdoc +++ /dev/null @@ -1,8 +0,0 @@ -title: 'sp-runtime: Be a little bit more functional :D' -doc: -- audience: Runtime Dev - description: - Some internal refactorings in the `Digest` code. -crates: -- name: sp-runtime - bump: patch diff --git a/prdoc/pr_6528.prdoc b/prdoc/pr_6528.prdoc deleted file mode 100644 index 477ad76c947f..000000000000 --- a/prdoc/pr_6528.prdoc +++ /dev/null @@ -1,18 +0,0 @@ -title: 'TransactionPool API uses async_trait' -doc: -- audience: Node Dev - description: |- - This PR refactors `TransactionPool` API to use `async_trait`, replacing the` Pin>` pattern. This should improve readability and maintainability. - - The change is not altering any functionality. -crates: -- name: sc-rpc-spec-v2 - bump: minor -- name: sc-service - bump: minor -- name: sc-transaction-pool-api - bump: major -- name: sc-transaction-pool - bump: major -- name: sc-rpc - bump: minor diff --git a/prdoc/pr_6533.prdoc b/prdoc/pr_6533.prdoc deleted file mode 100644 index eb72a97db0f8..000000000000 --- a/prdoc/pr_6533.prdoc +++ /dev/null @@ -1,20 +0,0 @@ -title: "Migrate executor into PolkaVM 0.18.0" -doc: - - audience: Runtime Dev - description: | - Bump `polkavm` to 0.18.0, and update `sc-polkavm-executor` to be - compatible with the API changes. In addition, bump also `polkavm-derive` - and `polkavm-linker` in order to make sure that the all parts of the - Polkadot SDK use the exact same ABI for `.polkavm` binaries. - - Purely relying on RV32E/RV64E ABI is not possible, as PolkaVM uses a - RISCV-V alike ISA, which is derived from RV32E/RV64E but it is still its - own microarchitecture, i.e. not fully binary compatible. - -crates: - - name: sc-executor-common - bump: major - - name: sc-executor-polkavm - bump: minor - - name: substrate-wasm-builder - bump: minor diff --git a/prdoc/pr_6534.prdoc b/prdoc/pr_6534.prdoc deleted file mode 100644 index 7a92fe3c857b..000000000000 --- a/prdoc/pr_6534.prdoc +++ /dev/null @@ -1,10 +0,0 @@ -title: Forward logging directives to Polkadot workers -doc: -- audience: Node Dev - description: |- - This pull request forward all the logging directives given to the node via `RUST_LOG` or `-l` to the workers, instead of only forwarding `RUST_LOG`. -crates: -- name: polkadot-node-core-pvf - bump: patch -- name: sc-tracing - bump: patch diff --git a/prdoc/pr_6540.prdoc b/prdoc/pr_6540.prdoc deleted file mode 100644 index 5e0305205521..000000000000 --- a/prdoc/pr_6540.prdoc +++ /dev/null @@ -1,16 +0,0 @@ -# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 -# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json - -title: Only allow apply slash to be executed if the slash amount is atleast ED - -doc: - - audience: Runtime User - description: | - This change prevents `pools::apply_slash` from being executed when the pending slash amount of the member is lower - than the ED. With this change, such small slashes will still be applied but only when member funds are withdrawn. - -crates: -- name: pallet-nomination-pools-runtime-api - bump: patch -- name: pallet-nomination-pools - bump: major diff --git a/prdoc/pr_6544.prdoc b/prdoc/pr_6544.prdoc deleted file mode 100644 index f2bc9627697d..000000000000 --- a/prdoc/pr_6544.prdoc +++ /dev/null @@ -1,14 +0,0 @@ -# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 -# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json - -title: Add and test events to conviction voting pallet - -doc: - - audience: Runtime Dev - description: | - Add event for the unlocking of an expired conviction vote's funds, and test recently added - voting events. - -crates: - - name: pallet-conviction-voting - bump: major diff --git a/prdoc/pr_6546.prdoc b/prdoc/pr_6546.prdoc deleted file mode 100644 index 353578a7f58f..000000000000 --- a/prdoc/pr_6546.prdoc +++ /dev/null @@ -1,13 +0,0 @@ -title: Increase default trie cache size to 1GiB -doc: -- audience: Node Operator - description: "The default trie cache size before was set to `64MiB`, which is quite\ - \ low to achieve real speed ups. `1GiB` should be a reasonable number as the requirements\ - \ for validators/collators/full nodes are much higher when it comes to minimum\ - \ memory requirements. Also the cache will not use `1GiB` from the start and fills\ - \ over time. The setting can be changed by setting `--trie-cache-size BYTE_SIZE`.\ - The CLI option `--state-cache-size` is also removed, which was not having any effect anymore.\r\ - \n" -crates: -- name: sc-cli - bump: patch diff --git a/prdoc/pr_6549.prdoc b/prdoc/pr_6549.prdoc deleted file mode 100644 index 61a64c724185..000000000000 --- a/prdoc/pr_6549.prdoc +++ /dev/null @@ -1,247 +0,0 @@ -doc: [] - -crates: - - name: polkadot-sdk - bump: none - - name: asset-test-utils - bump: none - - name: cumulus-pallet-parachain-system - bump: none - - name: cumulus-pallet-parachain-system-proc-macro - bump: none - - name: cumulus-primitives-core - bump: none - - name: polkadot-core-primitives - bump: none - - name: polkadot-parachain-primitives - bump: none - - name: polkadot-primitives - bump: none - - name: staging-xcm - bump: none - - name: xcm-procedural - bump: none - - name: cumulus-primitives-parachain-inherent - bump: none - - name: cumulus-primitives-proof-size-hostfunction - bump: none - - name: polkadot-runtime-common - bump: none - - name: polkadot-runtime-parachains - bump: none - - name: polkadot-runtime-metrics - bump: none - - name: staging-xcm-executor - bump: none - - name: slot-range-helper - bump: none - - name: staging-xcm-builder - bump: none - - name: pallet-xcm - bump: none - - name: cumulus-primitives-storage-weight-reclaim - bump: none - - name: cumulus-pallet-aura-ext - bump: none - - name: cumulus-primitives-aura - bump: none - - name: staging-parachain-info - bump: none - - name: cumulus-test-relay-sproof-builder - bump: none - - name: cumulus-client-cli - bump: none - - name: cumulus-client-collator - bump: none - - name: cumulus-client-consensus-common - bump: none - - name: cumulus-client-pov-recovery - bump: none - - name: cumulus-relay-chain-interface - bump: none - - name: polkadot-overseer - bump: none - - name: tracing-gum - bump: none - - name: tracing-gum-proc-macro - bump: none - - name: polkadot-node-metrics - bump: none - - name: polkadot-node-primitives - bump: none - - name: polkadot-erasure-coding - bump: none - - name: polkadot-node-subsystem - bump: none - - name: polkadot-node-subsystem-types - bump: none - - name: polkadot-node-network-protocol - bump: none - - name: polkadot-statement-table - bump: none - - name: polkadot-rpc - bump: none - - name: polkadot-service - bump: none - - name: cumulus-client-parachain-inherent - bump: none - - name: westend-runtime - bump: none - - name: pallet-xcm-benchmarks - bump: none - - name: westend-runtime-constants - bump: none - - name: polkadot-approval-distribution - bump: none - - name: polkadot-node-subsystem-util - bump: none - - name: polkadot-availability-bitfield-distribution - bump: none - - name: polkadot-availability-distribution - bump: none - - name: polkadot-availability-recovery - bump: none - - name: polkadot-node-core-approval-voting - bump: none - - name: polkadot-node-core-approval-voting-parallel - bump: none - - name: polkadot-node-core-av-store - bump: none - - name: polkadot-node-core-chain-api - bump: none - - name: polkadot-statement-distribution - bump: none - - name: polkadot-collator-protocol - bump: none - - name: polkadot-dispute-distribution - bump: none - - name: polkadot-gossip-support - bump: none - - name: polkadot-network-bridge - bump: none - - name: polkadot-node-collation-generation - bump: none - - name: polkadot-node-core-backing - bump: none - - name: polkadot-node-core-bitfield-signing - bump: none - - name: polkadot-node-core-candidate-validation - bump: none - - name: polkadot-node-core-pvf - bump: none - - name: polkadot-node-core-pvf-common - bump: none - - name: polkadot-node-core-pvf-execute-worker - bump: none - - name: polkadot-node-core-pvf-prepare-worker - bump: none - - name: staging-tracking-allocator - bump: none - - name: rococo-runtime - bump: none - - name: rococo-runtime-constants - bump: none - - name: polkadot-node-core-chain-selection - bump: none - - name: polkadot-node-core-dispute-coordinator - bump: none - - name: polkadot-node-core-parachains-inherent - bump: none - - name: polkadot-node-core-prospective-parachains - bump: none - - name: polkadot-node-core-provisioner - bump: none - - name: polkadot-node-core-pvf-checker - bump: none - - name: polkadot-node-core-runtime-api - bump: none - - name: cumulus-client-network - bump: none - - name: cumulus-relay-chain-inprocess-interface - bump: none - - name: polkadot-cli - bump: none - - name: cumulus-client-consensus-aura - bump: none - - name: cumulus-client-consensus-proposer - bump: none - - name: cumulus-client-consensus-relay-chain - bump: none - - name: cumulus-client-service - bump: none - - name: cumulus-relay-chain-minimal-node - bump: none - - name: cumulus-relay-chain-rpc-interface - bump: none - - name: parachains-common - bump: none - - name: cumulus-primitives-utility - bump: none - - name: cumulus-pallet-xcmp-queue - bump: none - - name: parachains-runtimes-test-utils - bump: none - - name: assets-common - bump: none - - name: bridge-hub-common - bump: none - - name: bridge-hub-test-utils - bump: none - - name: cumulus-pallet-solo-to-para - bump: none - - name: cumulus-pallet-xcm - bump: none - - name: cumulus-ping - bump: none - - name: cumulus-primitives-timestamp - bump: none - - name: emulated-integration-tests-common - bump: none - - name: xcm-emulator - bump: none - - name: pallet-collective-content - bump: none - - name: xcm-simulator - bump: none - - name: pallet-revive-fixtures - bump: none - - name: polkadot-omni-node-lib - bump: none - - name: snowbridge-runtime-test-common - bump: none - - name: testnet-parachains-constants - bump: none - - name: asset-hub-rococo-runtime - bump: none - - name: asset-hub-westend-runtime - bump: none - - name: bridge-hub-rococo-runtime - bump: none - - name: bridge-hub-westend-runtime - bump: none - - name: collectives-westend-runtime - bump: none - - name: coretime-rococo-runtime - bump: none - - name: coretime-westend-runtime - bump: none - - name: people-rococo-runtime - bump: none - - name: people-westend-runtime - bump: none - - name: contracts-rococo-runtime - bump: none - - name: glutton-westend-runtime - bump: none - - name: rococo-parachain-runtime - bump: none - - name: polkadot-omni-node - bump: none - - name: polkadot-parachain-bin - bump: none - - name: polkadot - bump: none - - name: polkadot-voter-bags - bump: none - - name: xcm-simulator-example - bump: none diff --git a/prdoc/pr_6553.prdoc b/prdoc/pr_6553.prdoc deleted file mode 100644 index 8692eba3a9f5..000000000000 --- a/prdoc/pr_6553.prdoc +++ /dev/null @@ -1,13 +0,0 @@ -title: Ensure sync event is processed on unknown peer roles - -doc: - - audience: Node Dev - description: | - The GossipEngine::poll_next implementation polls both the notification_service and the sync_event_stream. - This PR ensures both events are processed gracefully. - -crates: - - name: sc-network-gossip - bump: patch - - name: sc-network-sync - bump: patch diff --git a/prdoc/pr_6561.prdoc b/prdoc/pr_6561.prdoc deleted file mode 100644 index 714521925a6b..000000000000 --- a/prdoc/pr_6561.prdoc +++ /dev/null @@ -1,11 +0,0 @@ -title: 'slot-based-collator: Move spawning of the futures' -doc: -- audience: Node Dev - description: "Move spawning of the slot-based collator into the `run` function.\ - \ Also the tasks are being spawned as blocking task and not just as normal tasks.\r\ - \n" -crates: -- name: cumulus-client-consensus-aura - bump: major -- name: polkadot-omni-node-lib - bump: major diff --git a/prdoc/pr_6562.prdoc b/prdoc/pr_6562.prdoc deleted file mode 100644 index 250b656aefb5..000000000000 --- a/prdoc/pr_6562.prdoc +++ /dev/null @@ -1,14 +0,0 @@ -# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 -# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json - -title: Hide nonce implementation details in metadata - -doc: - - audience: Runtime Dev - description: | - Use custom implementation of TypeInfo for TypeWithDefault to show inner value's type info. - This should bring back nonce to u64 in metadata. - -crates: -- name: sp-runtime - bump: minor \ No newline at end of file diff --git a/prdoc/pr_6565.prdoc b/prdoc/pr_6565.prdoc deleted file mode 100644 index f9a75a16a6a7..000000000000 --- a/prdoc/pr_6565.prdoc +++ /dev/null @@ -1,35 +0,0 @@ -title: 'pallet_revive: Switch to 64bit RISC-V' -doc: -- audience: Runtime Dev - description: |- - This PR updates pallet_revive to the newest PolkaVM version and adapts the test fixtures and syscall interface to work under 64bit. - - Please note that after this PR no 32bit contracts can be deployed (they will be rejected at deploy time). Pre-deployed 32bit contracts are now considered defunct since we changes how parameters are passed for functions with more than 6 arguments. - - ## Fixtures - - The fixtures are now built for the 64bit target. I also removed the temporary directory mechanism that triggered a full rebuild every time. It also makes it easier to find the compiled fixtures since they are now always in `target/pallet-revive-fixtures`. - - ## Syscall interface - - ### Passing pointer - - Registers and pointers are now 64bit wide. This allows us to pass u64 arguments in a single register. Before we needed two registers to pass them. This means that just as before we need one register per pointer we pass. We keep pointers as `u32` argument by truncating the register. This is done since the memory space of PolkaVM is 32bit. - - ### Functions with more than 6 arguments - - We only have 6 registers to pass arguments. This is why we pass a pointer to a struct when we need more than 6. Before this PR we expected a packed struct and interpreted it as SCALE encoded tuple. However, this was buggy because the `MaxEncodedLen` returned something that was larger than the packed size of the structure. This wasn't a problem before. But now the memory space changed in a way that things were placed at the edges of the memory space and those extra bytes lead to an out of bound access. - - This is why this PR drops SCALE and expects the arguments to be passed as a pointer to a `C` aligned struct. This avoids unaligned accesses. However, revive needs to adapt its codegen to properly align the structure fields. - - ## TODO - - [ ] Add multi block migration that wipes all existing contracts as we made breaking changes to the syscall interface -crates: -- name: pallet-revive - bump: major -- name: pallet-revive-fixtures - bump: major -- name: pallet-revive-proc-macro - bump: major -- name: pallet-revive-uapi - bump: major diff --git a/prdoc/pr_6583.prdoc b/prdoc/pr_6583.prdoc deleted file mode 100644 index 0e67ed33e27c..000000000000 --- a/prdoc/pr_6583.prdoc +++ /dev/null @@ -1,7 +0,0 @@ -title: Bump Westend AH -doc: -- audience: Runtime Dev - description: Bump Asset-Hub westend spec version -crates: -- name: asset-hub-westend-runtime - bump: minor diff --git a/prdoc/pr_6604.prdoc b/prdoc/pr_6604.prdoc deleted file mode 100644 index dc198287ff67..000000000000 --- a/prdoc/pr_6604.prdoc +++ /dev/null @@ -1,106 +0,0 @@ -title: 'dmp: Check that the para exist before delivering a message' -doc: -- audience: Runtime Dev - description: | - Ensure that a para exists before trying to deliver a message to it. - Besides that `ensure_successful_delivery` function is added to `SendXcm`. This function - should be used by benchmarks to ensure that the delivery of a Xcm will work in the benchmark. -crates: -- name: polkadot-runtime-parachains - bump: major -- name: polkadot-runtime-common - bump: major -- name: polkadot-parachain-primitives - bump: major -- name: rococo-runtime - bump: major -- name: westend-runtime - bump: major -- name: pallet-xcm-benchmarks - bump: major -- name: pallet-xcm - bump: major -- name: cumulus-pallet-parachain-system - bump: major -- name: staging-xcm - bump: major -- name: staging-xcm-builder - bump: major -- name: bridge-runtime-common - bump: major -- name: pallet-xcm-bridge-hub-router - bump: major -- name: pallet-xcm-bridge-hub - bump: major -- name: snowbridge-pallet-inbound-queue - bump: major -- name: snowbridge-pallet-system - bump: major -- name: snowbridge-core - bump: major -- name: snowbridge-router-primitives - bump: major -- name: snowbridge-runtime-common - bump: major -- name: snowbridge-runtime-test-common - bump: major -- name: cumulus-pallet-dmp-queue - bump: major -- name: cumulus-pallet-xcmp-queue - bump: major -- name: parachains-common - bump: major -- name: asset-hub-rococo-runtime - bump: major -- name: asset-hub-westend-runtime - bump: major -- name: assets-common - bump: major -- name: bridge-hub-rococo-runtime - bump: major -- name: bridge-hub-westend-runtime - bump: major -- name: bridge-hub-common - bump: major -- name: collectives-westend-runtime - bump: major -- name: contracts-rococo-runtime - bump: major -- name: coretime-rococo-runtime - bump: major -- name: coretime-westend-runtime - bump: major -- name: glutton-westend-runtime - bump: major -- name: people-rococo-runtime - bump: major -- name: people-westend-runtime - bump: major -- name: penpal-runtime - bump: major -- name: rococo-parachain-runtime - bump: major -- name: polkadot-parachain-bin - bump: major -- name: cumulus-primitives-core - bump: major -- name: cumulus-primitives-utility - bump: major -- name: polkadot-service - bump: major -- name: staging-xcm-executor - bump: major -- name: xcm-runtime-apis - bump: major -- name: xcm-simulator-example - bump: major -- name: pallet-contracts - bump: major -- name: pallet-contracts-mock-network - bump: major -- name: pallet-revive - bump: major -- name: pallet-revive-mock-network - bump: major -- name: polkadot-sdk - bump: major diff --git a/prdoc/pr_6605.prdoc b/prdoc/pr_6605.prdoc deleted file mode 100644 index 2adb1d8aee35..000000000000 --- a/prdoc/pr_6605.prdoc +++ /dev/null @@ -1,10 +0,0 @@ -title: Notify telemetry only every second about the tx pool status -doc: -- audience: Node Operator - description: |- - Before this was done for every imported transaction. When a lot of transactions got imported, the import notification channel was filled. The underlying problem was that the `status` call is read locking the `validated_pool` which will be write locked by the internal submitting logic. Thus, the submitting and status reading was interferring which each other. -crates: -- name: cumulus-client-service - bump: patch -- name: sc-service - bump: patch diff --git a/prdoc/pr_6608.prdoc b/prdoc/pr_6608.prdoc deleted file mode 100644 index b9cd7008de47..000000000000 --- a/prdoc/pr_6608.prdoc +++ /dev/null @@ -1,14 +0,0 @@ -title: '[pallet-revive] eth-prc fix geth diff' -doc: -- audience: Runtime Dev - description: |- - * Add a bunch of differential tests to ensure that responses from eth-rpc matches the one from `geth` - * EVM RPC server will not fail gas_estimation if no gas is specified, I updated pallet-revive to add an extra `skip_transfer` boolean check to replicate this behavior in our pallet - * `eth_transact` and `bare_eth_transact` api have been updated to use `GenericTransaction` directly as this is what is used by `eth_estimateGas` and `eth_call` -crates: -- name: pallet-revive-eth-rpc - bump: minor -- name: pallet-revive - bump: minor -- name: asset-hub-westend-runtime - bump: minor diff --git a/prdoc/pr_6624.prdoc b/prdoc/pr_6624.prdoc deleted file mode 100644 index 4db55a46e8df..000000000000 --- a/prdoc/pr_6624.prdoc +++ /dev/null @@ -1,11 +0,0 @@ -# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 -# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json - -title: Use `cmd_lib` instead of `std::process::Command` when using `#[docify::export]` - -doc: - - audience: Runtime Dev - description: | - Simplified the display of commands and ensured they are tested for chain spec builder's `polkadot-sdk` reference docs. - -crates: [] \ No newline at end of file diff --git a/prdoc/pr_6628.prdoc b/prdoc/pr_6628.prdoc deleted file mode 100644 index 7ea0c4968385..000000000000 --- a/prdoc/pr_6628.prdoc +++ /dev/null @@ -1,12 +0,0 @@ -title: "Remove ReportCollator message" - -doc: - - audience: Node Dev - description: | - Remove unused message ReportCollator and test related to this message on the collator protocol validator side. - -crates: - - name: polkadot-node-subsystem-types - bump: patch - - name: polkadot-collator-protocol - bump: major \ No newline at end of file diff --git a/prdoc/pr_6636.prdoc b/prdoc/pr_6636.prdoc deleted file mode 100644 index 1db5fd54d971..000000000000 --- a/prdoc/pr_6636.prdoc +++ /dev/null @@ -1,9 +0,0 @@ -title: Optimize initialization of networking protocol benchmarks -doc: -- audience: Node Dev - description: |- - These changes should enhance the quality of benchmark results by excluding worker initialization time from the measurements and reducing the overall duration of the benchmarks. - -crates: -- name: sc-network - validate: false diff --git a/prdoc/pr_6665.prdoc b/prdoc/pr_6665.prdoc deleted file mode 100644 index b5aaf8a3b184..000000000000 --- a/prdoc/pr_6665.prdoc +++ /dev/null @@ -1,15 +0,0 @@ -title: Fix runtime api impl detection by construct runtime -doc: -- audience: Runtime Dev - description: |- - Construct runtime uses autoref-based specialization to fetch the metadata about the implemented runtime apis. This is done to not fail to compile when there are no runtime apis implemented. However, there was an issue with detecting runtime apis when they were implemented in a different file. The problem is solved by moving the trait implemented by `impl_runtime_apis!` to the metadata ir crate. - - - Closes: https://github.com/paritytech/polkadot-sdk/issues/6659 -crates: -- name: frame-support-procedural - bump: patch -- name: sp-api-proc-macro - bump: patch -- name: sp-metadata-ir - bump: patch diff --git a/prdoc/pr_6673.prdoc b/prdoc/pr_6673.prdoc deleted file mode 100644 index d2ca3c61ff39..000000000000 --- a/prdoc/pr_6673.prdoc +++ /dev/null @@ -1,7 +0,0 @@ -title: 'chain-spec-guide-runtime: path to wasm blob fixed' -doc: -- audience: Runtime Dev - description: In `chain-spec-guide-runtime` crate's tests, there was assumption that - release version of wasm blob exists. This PR uses `chain_spec_guide_runtime::runtime::WASM_BINARY_PATH` - const to use correct path to runtime blob. -crates: [] diff --git a/prdoc/pr_6681.prdoc b/prdoc/pr_6681.prdoc deleted file mode 100644 index 93a967d4a66c..000000000000 --- a/prdoc/pr_6681.prdoc +++ /dev/null @@ -1,406 +0,0 @@ -# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 -# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json - -title: update scale-info to 2.11.6 - -doc: - - audience: Runtime Dev - description: | - Updates scale-info to 2.11.1 from 2.11.5. - Updated version of scale-info annotates generated code with `allow(deprecated)` - -crates: - - name: bridge-runtime-common - bump: none - - name: bp-header-chain - bump: none - - name: bp-runtime - bump: none - - name: frame-support - bump: none - - name: sp-core - bump: none - - name: sp-trie - bump: none - - name: sp-runtime - bump: none - - name: sp-application-crypto - bump: none - - name: sp-arithmetic - bump: none - - name: sp-weights - bump: none - - name: sp-api - bump: none - - name: sp-metadata-ir - bump: none - - name: sp-version - bump: none - - name: sp-inherents - bump: none - - name: frame-executive - bump: none - - name: frame-system - bump: none - - name: pallet-balances - bump: none - - name: frame-benchmarking - bump: none - - name: pallet-migrations - bump: none - - name: cumulus-pallet-parachain-system - bump: none - - name: cumulus-primitives-core - bump: none - - name: polkadot-core-primitives - bump: none - - name: polkadot-parachain-primitives - bump: none - - name: polkadot-primitives - bump: none - - name: sp-authority-discovery - bump: none - - name: sp-consensus-slots - bump: none - - name: sp-staking - bump: none - - name: staging-xcm - bump: none - - name: cumulus-primitives-parachain-inherent - bump: none - - name: pallet-message-queue - bump: none - - name: polkadot-runtime-common - bump: none - - name: frame-election-provider-support - bump: none - - name: sp-npos-elections - bump: none - - name: sp-consensus-grandpa - bump: none - - name: polkadot-primitives - bump: none - - name: sp-authority-discovery - bump: none - - name: sp-consensus-grandpa - bump: none - - name: sp-genesis-builder - bump: none - - name: sp-consensus-babe - bump: none - - name: sp-mixnet - bump: none - - name: sc-rpc-api - bump: none - - name: sp-session - bump: none - - name: sp-statement-store - bump: none - - name: sp-transaction-storage-proof - bump: none - - name: pallet-asset-rate - bump: none - - name: pallet-authorship - bump: none - - name: pallet-babe - bump: none - - name: pallet-session - bump: none - - name: pallet-timestamp - bump: none - - name: pallet-offences - bump: none - - name: pallet-staking - bump: none - - name: pallet-bags-list - bump: none - - name: pallet-broker - bump: none - - name: pallet-election-provider-multi-phase - bump: none - - name: pallet-fast-unstake - bump: none - - name: pallet-identity - bump: none - - name: pallet-transaction-payment - bump: none - - name: pallet-treasury - bump: none - - name: pallet-utility - bump: none - - name: pallet-collective - bump: none - - name: pallet-root-testing - bump: none - - name: pallet-vesting - bump: none - - name: polkadot-runtime-parachains - bump: none - - name: pallet-authority-discovery - bump: none - - name: pallet-mmr - bump: none - - name: sp-mmr-primitives - bump: none - - name: staging-xcm-executor - bump: none - - name: staging-xcm-builder - bump: none - - name: pallet-asset-conversion - bump: none - - name: pallet-assets - bump: none - - name: pallet-salary - bump: none - - name: pallet-ranked-collective - bump: none - - name: pallet-xcm - bump: none - - name: xcm-runtime-apis - bump: none - - name: pallet-grandpa - bump: none - - name: pallet-indices - bump: none - - name: pallet-sudo - bump: none - - name: sp-consensus-beefy - bump: none - - name: cumulus-primitives-storage-weight-reclaim - bump: none - - name: cumulus-pallet-aura-ext - bump: none - - name: pallet-aura - bump: none - - name: sp-consensus-aura - bump: none - - name: pallet-collator-selection - bump: none - - name: pallet-glutton - bump: none - - name: staging-parachain-info - bump: none - - name: westend-runtime - bump: none - - name: frame-metadata-hash-extension - bump: none - - name: frame-system-benchmarking - bump: none - - name: pallet-beefy - bump: none - - name: pallet-beefy-mmr - bump: none - - name: pallet-conviction-voting - bump: none - - name: pallet-scheduler - bump: none - - name: pallet-preimage - bump: none - - name: pallet-delegated-staking - bump: none - - name: pallet-nomination-pools - bump: none - - name: pallet-democracy - bump: none - - name: pallet-elections-phragmen - bump: none - - name: pallet-membership - bump: none - - name: pallet-multisig - bump: none - - name: polkadot-sdk-frame - bump: none - - name: pallet-dev-mode - bump: none - - name: pallet-verify-signature - bump: none - - name: pallet-nomination-pools-benchmarking - bump: none - - name: pallet-offences-benchmarking - bump: none - - name: pallet-im-online - bump: none - - name: pallet-parameters - bump: none - - name: pallet-proxy - bump: none - - name: pallet-recovery - bump: none - - name: pallet-referenda - bump: none - - name: pallet-society - bump: none - - name: pallet-state-trie-migration - bump: none - - name: pallet-whitelist - bump: none - - name: pallet-xcm-benchmarks - bump: none - - name: rococo-runtime - bump: none - - name: pallet-bounties - bump: none - - name: pallet-child-bounties - bump: none - - name: pallet-nis - bump: none - - name: pallet-tips - bump: none - - name: parachains-common - bump: none - - name: pallet-asset-tx-payment - bump: none - - name: cumulus-pallet-xcmp-queue - bump: none - - name: bp-xcm-bridge-hub-router - bump: none - - name: pallet-xcm-bridge-hub-router - bump: none - - name: assets-common - bump: none - - name: bp-messages - bump: none - - name: bp-parachains - bump: none - - name: bp-polkadot-core - bump: none - - name: bp-relayers - bump: none - - name: bp-xcm-bridge-hub - bump: none - - name: bridge-hub-common - bump: none - - name: snowbridge-core - bump: none - - name: snowbridge-beacon-primitives - bump: none - - name: snowbridge-ethereum - bump: none - - name: pallet-bridge-grandpa - bump: none - - name: pallet-bridge-messages - bump: none - - name: pallet-bridge-parachains - bump: none - - name: pallet-bridge-relayers - bump: none - - name: pallet-xcm-bridge-hub - bump: none - - name: cumulus-pallet-dmp-queue - bump: none - - name: cumulus-pallet-solo-to-para - bump: none - - name: cumulus-pallet-xcm - bump: none - - name: cumulus-ping - bump: none - - name: frame-benchmarking-pallet-pov - bump: none - - name: pallet-alliance - bump: none - - name: pallet-asset-conversion-ops - bump: none - - name: pallet-asset-conversion-tx-payment - bump: none - - name: pallet-assets-freezer - bump: none - - name: pallet-atomic-swap - bump: none - - name: pallet-collective-content - bump: none - - name: pallet-contracts - bump: none - - name: pallet-contracts-uapi - bump: none - - name: pallet-insecure-randomness-collective-flip - bump: none - - name: pallet-contracts-mock-network - bump: none - - name: xcm-simulator - bump: none - - name: pallet-core-fellowship - bump: none - - name: pallet-lottery - bump: none - - name: pallet-mixnet - bump: none - - name: pallet-nft-fractionalization - bump: none - - name: pallet-nfts - bump: none - - name: pallet-node-authorization - bump: none - - name: pallet-paged-list - bump: none - - name: pallet-remark - bump: none - - name: pallet-revive - bump: none - - name: pallet-revive-uapi - bump: none - - name: pallet-revive-eth-rpc - bump: none - - name: pallet-skip-feeless-payment - bump: none - - name: pallet-revive-mock-network - bump: none - - name: pallet-root-offences - bump: none - - name: pallet-safe-mode - bump: none - - name: pallet-scored-pool - bump: none - - name: pallet-statement - bump: none - - name: pallet-transaction-storage - bump: none - - name: pallet-tx-pause - bump: none - - name: pallet-uniques - bump: none - - name: snowbridge-outbound-queue-merkle-tree - bump: none - - name: snowbridge-pallet-ethereum-client - bump: none - - name: snowbridge-pallet-inbound-queue - bump: none - - name: snowbridge-router-primitives - bump: none - - name: snowbridge-pallet-outbound-queue - bump: none - - name: snowbridge-pallet-system - bump: none - - name: bp-asset-hub-rococo - bump: none - - name: bp-asset-hub-westend - bump: none - - name: bp-polkadot-bulletin - bump: none - - name: asset-hub-rococo-runtime - bump: none - - name: asset-hub-westend-runtime - bump: none - - name: bridge-hub-rococo-runtime - bump: none - - name: bridge-hub-westend-runtime - bump: none - - name: collectives-westend-runtime - bump: none - - name: coretime-rococo-runtime - bump: none - - name: coretime-westend-runtime - bump: none - - name: people-rococo-runtime - bump: none - - name: people-westend-runtime - bump: none - - name: penpal-runtime - bump: none - - name: contracts-rococo-runtime - bump: none - - name: glutton-westend-runtime - bump: none - - name: rococo-parachain-runtime - bump: none - - name: xcm-simulator-example - bump: none \ No newline at end of file diff --git a/prdoc/pr_6695.prdoc b/prdoc/pr_6695.prdoc deleted file mode 100644 index 7a950e8546cd..000000000000 --- a/prdoc/pr_6695.prdoc +++ /dev/null @@ -1,8 +0,0 @@ -title: '[pallet-revive] bugfix decoding 64bit args in the decoder' -doc: -- audience: Runtime Dev - description: The argument index of the next argument is dictated by the size of - the current one. -crates: -- name: pallet-revive-proc-macro - bump: patch diff --git a/prdoc/pr_6703.prdoc b/prdoc/pr_6703.prdoc deleted file mode 100644 index 2dd0962a3eea..000000000000 --- a/prdoc/pr_6703.prdoc +++ /dev/null @@ -1,7 +0,0 @@ -title: 'network/libp2p-backend: Suppress warning adding already reserved node as reserved' -doc: -- audience: Node Dev - description: Fixes https://github.com/paritytech/polkadot-sdk/issues/6598. -crates: -- name: sc-network - bump: patch diff --git a/prdoc/pr_6711.prdoc b/prdoc/pr_6711.prdoc deleted file mode 100644 index ec09035e1356..000000000000 --- a/prdoc/pr_6711.prdoc +++ /dev/null @@ -1,13 +0,0 @@ -title: Expose DHT content providers API from `sc-network` -doc: -- audience: Node Dev - description: |- - Expose the Kademlia content providers API for the use by `sc-network` client code: - 1. Extend the `NetworkDHTProvider` trait with functions to start/stop providing content and query the DHT for the list of content providers for a given key. - 2. Extend the `DhtEvent` enum with events reporting the found providers or query failures. - 3. Implement the above for libp2p & litep2p network backends. -crates: -- name: sc-network - bump: major -- name: sc-authority-discovery - bump: major diff --git a/prdoc/pr_6728.prdoc b/prdoc/pr_6728.prdoc deleted file mode 100644 index 68f61190d947..000000000000 --- a/prdoc/pr_6728.prdoc +++ /dev/null @@ -1,12 +0,0 @@ -title: '[pallet-revive] eth-rpc add missing tests' -doc: -- audience: Runtime Dev - description: |- - Add tests for #6608 - - fix https://github.com/paritytech/contract-issues/issues/12 -crates: -- name: pallet-revive-eth-rpc - bump: minor -- name: pallet-revive - bump: minor diff --git a/prdoc/pr_6741.prdoc b/prdoc/pr_6741.prdoc deleted file mode 100644 index d4b795038bcd..000000000000 --- a/prdoc/pr_6741.prdoc +++ /dev/null @@ -1,16 +0,0 @@ -title: 'pallet-revive: Adjust error handling of sub calls' -doc: -- audience: Runtime Dev - description: |- - We were trapping the host context in case a sub call was exhausting the storage deposit limit set for this sub call. This prevents the caller from handling this error. In this PR we added a new error code that is returned when either gas or storage deposit limit is exhausted by the sub call. - - We also remove the longer used `NotCallable` error. No longer used because this is no longer an error: It will just be a balance transfer. - - We also make `set_code_hash` infallible to be consistent with other host functions which just trap on any error condition. -crates: -- name: pallet-revive - bump: major -- name: pallet-revive-uapi - bump: major -- name: pallet-revive-fixtures - bump: major diff --git a/prdoc/pr_6743.prdoc b/prdoc/pr_6743.prdoc deleted file mode 100644 index 4c35ff46ca67..000000000000 --- a/prdoc/pr_6743.prdoc +++ /dev/null @@ -1,10 +0,0 @@ -title: 'umbrella: Remove `pallet-revive-fixtures`' -doc: -- audience: Runtime Dev - description: |- - No need to have them in the umbrella crate also by having them in the umbrella crate they are bleeding into the normal build. -crates: -- name: pallet-revive-fixtures - bump: major -- name: polkadot-sdk - bump: major diff --git a/prdoc/pr_6759.prdoc b/prdoc/pr_6759.prdoc deleted file mode 100644 index 3dff12d740d4..000000000000 --- a/prdoc/pr_6759.prdoc +++ /dev/null @@ -1,16 +0,0 @@ -title: 'pallet-revive: Statically verify imports on code deployment' -doc: -- audience: Runtime Dev - description: |- - Previously, we failed at runtime if an unknown or unstable host function was called. This requires us to keep track of when a host function was added and when a code was deployed. We used the `api_version` to track at which API version each code was deployed. This made sure that when a new host function was added that old code won't have access to it. This is necessary as otherwise the behavior of a contract that made calls to this previously non existent host function would change from "trap" to "do something". - - In this PR we remove the API version. Instead, we statically verify on upload that no non-existent host function is ever used in the code. This will allow us to add new host function later without needing to keep track when they were added. - - This simplifies the code and also gives an immediate feedback if unknown host functions are used. -crates: -- name: pallet-revive-proc-macro - bump: major -- name: pallet-revive - bump: major -- name: pallet-revive-fixtures - bump: major diff --git a/prdoc/pr_6768.prdoc b/prdoc/pr_6768.prdoc deleted file mode 100644 index 3e194078df26..000000000000 --- a/prdoc/pr_6768.prdoc +++ /dev/null @@ -1,14 +0,0 @@ -title: '`basic-authorship`: debug level is now less spammy' -doc: -- audience: Node Dev - description: |- - The `debug` level in `sc-basic-authorship` is now less spammy. Previously it was outputing logs per individual transactions. It made quite hard to follow the logs (and also generates unneeded traffic in grafana). - - Now debug level only show some internal details, without spamming output with per-transaction logs. They were moved to `trace` level. - - I also added the `EndProposingReason` to the summary INFO message. This allows us to know what was the block limit (which is very useful for debugging). -crates: -- name: sc-basic-authorship - bump: major -- name: sc-proposer-metrics - bump: major diff --git a/prdoc/pr_6792.prdoc b/prdoc/pr_6792.prdoc deleted file mode 100644 index 80982a34b3e8..000000000000 --- a/prdoc/pr_6792.prdoc +++ /dev/null @@ -1,11 +0,0 @@ -title: Add fallback_max_weight to snowbridge Transact -doc: -- audience: Runtime Dev - description: |- - We removed the `require_weight_at_most` field and later changed it to `fallback_max_weight`. - This was to have a fallback when sending a message to v4 chains, which happens in the small time window when chains are upgrading. - We originally put no fallback for a message in snowbridge's inbound queue but we should have one. - This PR adds it. -crates: -- name: snowbridge-router-primitives - bump: patch diff --git a/prdoc/pr_6796.prdoc b/prdoc/pr_6796.prdoc deleted file mode 100644 index aeb305847bf8..000000000000 --- a/prdoc/pr_6796.prdoc +++ /dev/null @@ -1,9 +0,0 @@ -title: 'pallet-revive: Remove unused dependencies' -doc: -- audience: Runtime Dev - description: The dependency on `pallet_balances` doesn't seem to be necessary. At - least everything compiles for me without it. Removed this dependency and a few - others that seem to be left overs. -crates: -- name: pallet-revive - bump: major diff --git a/prdoc/pr_6832.prdoc b/prdoc/pr_6832.prdoc deleted file mode 100644 index bd0abbfba853..000000000000 --- a/prdoc/pr_6832.prdoc +++ /dev/null @@ -1,13 +0,0 @@ -# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 -# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json - -title: "Remove collation-generation subsystem from validator nodes" - -doc: - - audience: Node Dev - description: | - Collation-generation is only needed for Collators, and therefore not needed for validators - -crates: - - name: polkadot-service - bump: patch \ No newline at end of file diff --git a/prdoc/pr_6835.prdoc b/prdoc/pr_6835.prdoc deleted file mode 100644 index 73d1a81e761c..000000000000 --- a/prdoc/pr_6835.prdoc +++ /dev/null @@ -1,12 +0,0 @@ -title: '[pallet-revive] implement the call data load API' -doc: -- audience: Runtime Dev - description: |- - This PR implements the call data load API akin to [how it works on ethereum](https://www.evm.codes/?fork=cancun#35). -crates: -- name: pallet-revive-fixtures - bump: minor -- name: pallet-revive - bump: minor -- name: pallet-revive-uapi - bump: minor diff --git a/prdoc/pr_6844.prdoc b/prdoc/pr_6844.prdoc deleted file mode 100644 index 32901bf04df9..000000000000 --- a/prdoc/pr_6844.prdoc +++ /dev/null @@ -1,8 +0,0 @@ -title: 'pallet-revive: disable host functions unused in solidity PolkaVM compiler' -doc: -- audience: Runtime Dev - description: Disables host functions in contracts that are not enabled - in solidity PolkaVM compiler to reduce surface of possible attack vectors. -crates: -- name: pallet-revive - bump: major diff --git a/prdoc/pr_6857.prdoc b/prdoc/pr_6857.prdoc deleted file mode 100644 index 3930f5910487..000000000000 --- a/prdoc/pr_6857.prdoc +++ /dev/null @@ -1,14 +0,0 @@ -title: '[pallet-revive] implement the call data size API' -doc: -- audience: Runtime Dev - description: |- - This PR adds an API method to query the contract call data input size. - - Part of #6770 -crates: -- name: pallet-revive-fixtures - bump: minor -- name: pallet-revive - bump: minor -- name: pallet-revive-uapi - bump: minor diff --git a/prdoc/pr_6865.prdoc b/prdoc/pr_6865.prdoc deleted file mode 100644 index c0581f2af24f..000000000000 --- a/prdoc/pr_6865.prdoc +++ /dev/null @@ -1,9 +0,0 @@ -title: Rename PanicInfo to PanicHookInfo -doc: -- audience: Node Dev - description: Starting with Rust 1.82 `PanicInfo` is deprecated and will throw warnings - when used. The new type is available since Rust 1.81 and should be available on - our CI. -crates: -- name: sp-panic-handler - bump: patch diff --git a/prdoc/pr_6866.prdoc b/prdoc/pr_6866.prdoc deleted file mode 100644 index fac40dc103d7..000000000000 --- a/prdoc/pr_6866.prdoc +++ /dev/null @@ -1,13 +0,0 @@ -title: Refactor `pallet-revive-uapi` pallet -doc: -- audience: Runtime Dev - description: Puts unstable host functions in `uapi` under - `unstable-api` feature while moving those functions after - stable functions. -crates: -- name: pallet-revive - bump: patch -- name: pallet-revive-fixtures - bump: patch -- name: pallet-revive-uapi - bump: major diff --git a/prdoc/pr_6880.prdoc b/prdoc/pr_6880.prdoc deleted file mode 100644 index 9d59382f0e0b..000000000000 --- a/prdoc/pr_6880.prdoc +++ /dev/null @@ -1,14 +0,0 @@ -title: '[pallet-revive] implement the call data copy API' -doc: -- audience: Runtime Dev - description: |- - This PR implements the call data copy API by adjusting the input method. - - Closes #6770 -crates: -- name: pallet-revive-fixtures - bump: major -- name: pallet-revive - bump: major -- name: pallet-revive-uapi - bump: major \ No newline at end of file diff --git a/prdoc/pr_6889.prdoc b/prdoc/pr_6889.prdoc deleted file mode 100644 index 01edd49b685a..000000000000 --- a/prdoc/pr_6889.prdoc +++ /dev/null @@ -1,13 +0,0 @@ -# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 -# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json - -title: Remove polkadot-omni-node-lib unused dependency - -doc: - - audience: Node Dev - description: - Removed an unused dependency for `polkadot-omni-node-lib`. - -crates: - - name: polkadot-omni-node-lib - bump: patch diff --git a/prdoc/pr_6896.prdoc b/prdoc/pr_6896.prdoc deleted file mode 100644 index a56e4303d9af..000000000000 --- a/prdoc/pr_6896.prdoc +++ /dev/null @@ -1,16 +0,0 @@ -title: 'pallet-revive: Fix docs.rs' -doc: -- audience: Runtime Dev - description: |- - - Fixed failing docs.rs build for `pallet-revive-uapi` by fixing a writing attribute in the manifest (we were using `default-target` instead of `targets`) - - Removed the macros defining host functions because the cfg attributes introduced in #6866 won't work on them - - Added an docs.rs specific attribute so that the `unstable-hostfn` feature tag will show up on the functions that are guarded behind it. -crates: -- name: pallet-contracts-uapi - bump: major -- name: pallet-revive-uapi - bump: major -- name: pallet-revive-fixtures - bump: major -- name: pallet-revive-proc-macro - bump: major diff --git a/prdoc/pr_6908.prdoc b/prdoc/pr_6908.prdoc deleted file mode 100644 index 0be9e613f88a..000000000000 --- a/prdoc/pr_6908.prdoc +++ /dev/null @@ -1,12 +0,0 @@ -title: '[pallet-revive] implement the ref_time_left API' -doc: -- audience: Runtime Dev - description: This PR implements the ref_time_left API method. Solidity knows only - a single "gas" dimension; Solidity contracts will use this to query the gas left. -crates: -- name: pallet-revive-fixtures - bump: minor -- name: pallet-revive - bump: minor -- name: pallet-revive-uapi - bump: minor diff --git a/prdoc/pr_6917.prdoc b/prdoc/pr_6917.prdoc deleted file mode 100644 index dd7f59b95126..000000000000 --- a/prdoc/pr_6917.prdoc +++ /dev/null @@ -1,14 +0,0 @@ -title: Remove unused dependencies from pallet_revive -doc: -- audience: Runtime Dev - description: Removing apparently unused dependencies from `pallet_revive` and related - crates. -crates: -- name: pallet-revive - bump: major -- name: pallet-revive-fixtures - bump: major -- name: pallet-revive-mock-network - bump: major -- name: pallet-revive-eth-rpc - bump: major diff --git a/prdoc/pr_6920.prdoc b/prdoc/pr_6920.prdoc deleted file mode 100644 index d80a77e0a71f..000000000000 --- a/prdoc/pr_6920.prdoc +++ /dev/null @@ -1,14 +0,0 @@ -title: '[pallet-revive] change some getter APIs to return value in register' -doc: -- audience: Runtime Dev - description: Call data, return data and code sizes can never exceed `u32::MAX`; - they are also not generic. Hence we know that they are guaranteed to always fit - into a 64bit register and `revive` can just zero extend them into a 256bit integer - value. Which is slightly more efficient than passing them on the stack. -crates: -- name: pallet-revive-fixtures - bump: major -- name: pallet-revive - bump: major -- name: pallet-revive-uapi - bump: major diff --git a/prdoc/pr_6923.prdoc b/prdoc/pr_6923.prdoc deleted file mode 100644 index 5d88d7158e7f..000000000000 --- a/prdoc/pr_6923.prdoc +++ /dev/null @@ -1,12 +0,0 @@ -title: 'omni-node: Tolerate failing metadata check' -doc: -- audience: Node Operator - description: |- - #6450 introduced metadata checks. Supported are metadata v14 and higher. - - However, of course old chain-specs have a genesis code blob that might be on older version. This needs to be tolerated. We should just skip the checks in that case. - - Fixes #6921 -crates: -- name: polkadot-omni-node-lib - bump: patch diff --git a/prdoc/pr_6926.prdoc b/prdoc/pr_6926.prdoc deleted file mode 100644 index 788d6c110873..000000000000 --- a/prdoc/pr_6926.prdoc +++ /dev/null @@ -1,13 +0,0 @@ -title: '[pallet-revive] implement the gas limit API' -doc: -- audience: Runtime Dev - description: This PR implements the gas limit API, returning the maximum ref_time - per block. Solidity contracts only know a single weight dimension and can use - this method to get the block ref_time limit. -crates: -- name: pallet-revive-fixtures - bump: major -- name: pallet-revive - bump: major -- name: pallet-revive-uapi - bump: major diff --git a/prdoc/pr_6928.prdoc b/prdoc/pr_6928.prdoc deleted file mode 100644 index 4b9023ab03a6..000000000000 --- a/prdoc/pr_6928.prdoc +++ /dev/null @@ -1,34 +0,0 @@ -title: '[Backport] Version bumps and `prdocs` reordering form 2412' -doc: -- audience: Runtime Dev - description: This PR includes backport of the regular version bumps and `prdocs` - reordering from the `stable2412` branch back ro master -crates: -- name: polkadot-node-primitives - bump: none -- name: asset-hub-rococo-runtime - bump: none -- name: bridge-hub-rococo-runtime - bump: none -- name: bridge-hub-westend-runtime - bump: none -- name: collectives-westend-runtime - bump: none -- name: contracts-rococo-runtime - bump: none -- name: coretime-rococo-runtime - bump: none -- name: coretime-westend-runtime - bump: none -- name: glutton-westend-runtime - bump: none -- name: people-rococo-runtime - bump: none -- name: people-westend-runtime - bump: none -- name: rococo-runtime - bump: none -- name: westend-runtime - bump: none -- name: asset-hub-westend-runtime - bump: none diff --git a/prdoc/pr_6937.prdoc b/prdoc/pr_6937.prdoc deleted file mode 100644 index 5c6806df0b5c..000000000000 --- a/prdoc/pr_6937.prdoc +++ /dev/null @@ -1,12 +0,0 @@ -title: '[pallet-revive] bump polkavm to 0.18' -doc: -- audience: Runtime Dev - description: Update to the latest polkavm version, containing a linker fix I need - for revive. -crates: -- name: pallet-revive - bump: patch -- name: pallet-revive-fixtures - bump: patch -- name: pallet-revive-uapi - bump: patch diff --git a/prdoc/pr_6954.prdoc b/prdoc/pr_6954.prdoc deleted file mode 100644 index 8e8faf5fffd2..000000000000 --- a/prdoc/pr_6954.prdoc +++ /dev/null @@ -1,13 +0,0 @@ -title: '[pallet-revive] implement the gas price API' -doc: -- audience: Runtime Dev - description: This PR implements the EVM gas price syscall API method. Currently - this is a compile time constant in revive, but in the EVM it is an opcode. Thus - we should provide an opcode for this in the pallet. -crates: -- name: pallet-revive-fixtures - bump: minor -- name: pallet-revive - bump: minor -- name: pallet-revive-uapi - bump: minor diff --git a/prdoc/pr_6963.prdoc b/prdoc/pr_6963.prdoc deleted file mode 100644 index 7657349277b3..000000000000 --- a/prdoc/pr_6963.prdoc +++ /dev/null @@ -1,10 +0,0 @@ -title: 'grandpa: Ensure `WarpProof` stays in its limits' -doc: -- audience: Node Dev - description: |- - There was the chance that a `WarpProof` was bigger than the maximum warp sync proof size. This could have happened when inserting the last justification, which then may pushed the total proof size above the maximum. The solution is simply to ensure that the last justfication also fits into the limits. - - Close: https://github.com/paritytech/polkadot-sdk/issues/6957 -crates: -- name: sc-consensus-grandpa - bump: patch diff --git a/prdoc/pr_6964.prdoc b/prdoc/pr_6964.prdoc deleted file mode 100644 index 3a88fa72e963..000000000000 --- a/prdoc/pr_6964.prdoc +++ /dev/null @@ -1,15 +0,0 @@ -title: '[pallet-revive] implement the base fee API' -doc: -- audience: Runtime Dev - description: This PR implements the base fee syscall API method. Currently this - is implemented as a compile time constant in the revive compiler, returning 0. - However, since this is an opocde, if we ever need to implement it for compatibility - reasons with [EIP-1559](https://github.com/ethereum/EIPs/blob/master/EIPS/eip-1559.md), - it would break already deployed contracts. Thus we provide a syscall method instead. -crates: -- name: pallet-revive-fixtures - bump: minor -- name: pallet-revive - bump: minor -- name: pallet-revive-uapi - bump: minor diff --git a/prdoc/pr_6979.prdoc b/prdoc/pr_6979.prdoc deleted file mode 100644 index fae7feeec2df..000000000000 --- a/prdoc/pr_6979.prdoc +++ /dev/null @@ -1,8 +0,0 @@ -title: Update prometheus binding failure logging format -doc: -- audience: Node Dev - description: |- - Using `{:#?}` for the error details is a bit annoying, this change makes a more consistent formatting style for error messages. -crates: -- name: substrate-prometheus-endpoint - bump: patch diff --git a/prdoc/pr_6981.prdoc b/prdoc/pr_6981.prdoc deleted file mode 100644 index 8ed70e51ef45..000000000000 --- a/prdoc/pr_6981.prdoc +++ /dev/null @@ -1,7 +0,0 @@ -title: '[pallet-revive] fix file case' -doc: -- audience: Runtime Dev - description: "fix https://github.com/paritytech/polkadot-sdk/issues/6970\r\n" -crates: -- name: pallet-revive-eth-rpc - bump: minor diff --git a/prdoc/pr_6986.prdoc b/prdoc/pr_6986.prdoc deleted file mode 100644 index 8deb6b04bd1c..000000000000 --- a/prdoc/pr_6986.prdoc +++ /dev/null @@ -1,18 +0,0 @@ -# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 -# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json - -title: '[pallet-mixnet] Migrate to using frame umbrella crate' - -doc: - - audience: Runtime Dev - description: This PR migrates the pallet-mixnet to use the frame umbrella crate. This - is part of the ongoing effort to migrate all pallets to use the frame umbrella crate. - The effort is tracked [here](https://github.com/paritytech/polkadot-sdk/issues/6504). - -crates: - - name: pallet-mixnet - bump: minor - - name: polkadot-sdk-frame - bump: minor - - name: polkadot-sdk - bump: none \ No newline at end of file diff --git a/prdoc/pr_6989.prdoc b/prdoc/pr_6989.prdoc deleted file mode 100644 index 86c56698d41e..000000000000 --- a/prdoc/pr_6989.prdoc +++ /dev/null @@ -1,10 +0,0 @@ -title: 'paras-registrar: Improve error reporting' -doc: -- audience: Runtime User - description: |- - This pr improves the error reporting by paras registrar when an owner wants to access a locked parachain. - - Closes: https://github.com/paritytech/polkadot-sdk/issues/6745 -crates: -- name: polkadot-runtime-common - bump: patch diff --git a/prdoc/pr_7005.prdoc b/prdoc/pr_7005.prdoc deleted file mode 100644 index a61f7c5b9b71..000000000000 --- a/prdoc/pr_7005.prdoc +++ /dev/null @@ -1,7 +0,0 @@ -title: Log peerset set ID -> protocol name mapping -doc: -- audience: Node Dev - description: To simplify debugging of peerset related issues like https://github.com/paritytech/polkadot-sdk/issues/6573#issuecomment-2563091343. -crates: -- name: sc-network - bump: patch diff --git a/prdoc/pr_7011.prdoc b/prdoc/pr_7011.prdoc deleted file mode 100644 index 55fe0c73ca09..000000000000 --- a/prdoc/pr_7011.prdoc +++ /dev/null @@ -1,16 +0,0 @@ -title: 'sync: Send already connected peers to new subscribers' -doc: -- audience: Node Dev - description: |- - Introduce `SyncEvent::InitialPeers` message sent to new subscribers to allow them correctly tracking sync peers. This resolves a race condition described in https://github.com/paritytech/polkadot-sdk/issues/6573#issuecomment-2563091343. - - Fixes https://github.com/paritytech/polkadot-sdk/issues/6573. -crates: -- name: sc-network-gossip - bump: major -- name: sc-network-statement - bump: patch -- name: sc-network-sync - bump: major -- name: sc-network-transactions - bump: patch diff --git a/prdoc/pr_7013.prdoc b/prdoc/pr_7013.prdoc deleted file mode 100644 index 138fa7f23102..000000000000 --- a/prdoc/pr_7013.prdoc +++ /dev/null @@ -1,7 +0,0 @@ -title: 'pallet-bounties: Fix benchmarks for 0 ED' -doc: -- audience: Runtime Dev - description: 'Closes: https://github.com/paritytech/polkadot-sdk/issues/7009' -crates: -- name: pallet-bounties - bump: patch diff --git a/prdoc/pr_7020.prdoc b/prdoc/pr_7020.prdoc deleted file mode 100644 index 5bbdb44c45a0..000000000000 --- a/prdoc/pr_7020.prdoc +++ /dev/null @@ -1,18 +0,0 @@ -title: Remove warning log from frame-omni-bencher CLI -doc: -- audience: Node Operator - description: |- - # Description - - This PR removes the outdated warning message from the `frame-omni-bencher` CLI that states the tool is "not yet battle tested". Fixes #7019 - - ## Integration - - No integration steps are required. - - ## Review Notes - - The functionality of the tool remains unchanged. Removes the warning message from the CLI output. -crates: -- name: frame-omni-bencher - bump: patch diff --git a/prdoc/pr_7021.prdoc b/prdoc/pr_7021.prdoc deleted file mode 100644 index 5443579bbd92..000000000000 --- a/prdoc/pr_7021.prdoc +++ /dev/null @@ -1,8 +0,0 @@ -title: Improve remote externalities logging -doc: -- audience: Node Dev - description: |- - Automatically detect if current env is tty. If not disable the spinner logging. -crates: -- name: frame-remote-externalities - bump: patch diff --git a/prdoc/pr_7028.prdoc b/prdoc/pr_7028.prdoc deleted file mode 100644 index ead918fc2e00..000000000000 --- a/prdoc/pr_7028.prdoc +++ /dev/null @@ -1,25 +0,0 @@ -title: 'Fix implication order in implementation of `TransactionExtension` for tuple' -doc: -- audience: - - Runtime Dev - - Runtime User - description: |- - Before this PR, the implications were different in the pipeline `(A, B, C)` and `((A, B), C)`. - This PR fixes this behavior and make nested tuple transparant, the implication order of tuple of - tuple is now the same as in a single tuple. - - For runtime users this mean that the implication can be breaking depending on the pipeline used - in the runtime. - - For runtime developers this breaks usage of `TransactionExtension::validate`. - When calling `TransactionExtension::validate` the implication must now implement `Implication` - trait, you can use `TxBaseImplication` to wrap the type and use it as the base implication. - E.g. instead of `&(extension_version, call),` you can write `&TxBaseImplication((extension_version, call))`. - -crates: -- name: sp-runtime - bump: major -- name: pallet-skip-feeless-payment - bump: major -- name: frame-system - bump: major diff --git a/prdoc/stable2412/pr_4834.prdoc b/prdoc/stable2412/pr_4834.prdoc deleted file mode 100644 index b7c8b15cb073..000000000000 --- a/prdoc/stable2412/pr_4834.prdoc +++ /dev/null @@ -1,15 +0,0 @@ -title: "xcm-executor: take delivery fee from transferred assets if necessary" - -doc: - - audience: Runtime Dev - description: | - In asset transfers, as a last resort, XCM delivery fees are taken from - transferred assets rather than failing the transfer. - -crates: - - name: staging-xcm-executor - bump: patch - - name: snowbridge-router-primitives - bump: patch - - name: snowbridge-pallet-inbound-queue - bump: patch diff --git a/prdoc/stable2412/pr_5311.prdoc b/prdoc/stable2412/pr_5311.prdoc deleted file mode 100644 index 07affa5cb2ee..000000000000 --- a/prdoc/stable2412/pr_5311.prdoc +++ /dev/null @@ -1,16 +0,0 @@ -title: No-op Impl Polling Trait - -doc: - - audience: Runtime Dev - description: | - Provide a NoOp implementation of the Polling trait for unit where the trait is defined and skiping benchmarks that necessitate it's definition. - -crates: - - name: pallet-core-fellowship - bump: minor - - name: pallet-ranked-collective - bump: minor - - name: pallet-salary - bump: minor - - name: frame-support - bump: minor diff --git a/prdoc/stable2412/pr_5732.prdoc b/prdoc/stable2412/pr_5732.prdoc deleted file mode 100644 index 6f3f9b8a1668..000000000000 --- a/prdoc/stable2412/pr_5732.prdoc +++ /dev/null @@ -1,29 +0,0 @@ -title: Expose the unstable metadata v16 -doc: -- audience: Node Dev - description: | - This PR exposes the *unstable* metadata V16. The metadata is exposed under the unstable u32::MAX number. - Developers can start experimenting with the new features of the metadata v16. *Please note that this metadata is under development and expect breaking changes until stabilization.* - The `ExtrinsicMetadata` trait receives a breaking change. Its associated type `VERSION` is rename to `VERSIONS` and now supports a constant static list of metadata versions. - The versions implemented for `UncheckedExtrinsic` are v4 (legacy version) and v5 (new version). - For metadata collection, it is assumed that all `TransactionExtensions` are under version 0. - -crates: - - name: sp-metadata-ir - bump: major - - name: frame-support-procedural - bump: patch - - name: frame-support - bump: minor - - name: frame-support-test - bump: major - - name: frame-metadata-hash-extension - bump: patch - - name: substrate-wasm-builder - bump: minor - - name: pallet-revive - bump: minor - - name: sp-runtime - bump: major - - name: frame-benchmarking-cli - bump: patch diff --git a/prdoc/stable2412/pr_5997.prdoc b/prdoc/stable2412/pr_5997.prdoc deleted file mode 100644 index 6bac36a44586..000000000000 --- a/prdoc/stable2412/pr_5997.prdoc +++ /dev/null @@ -1,18 +0,0 @@ -# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 -# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json - -title: Implement archive_unstable_storageDiff method - -doc: - - audience: Node Dev - description: | - This PR implements the `archive_unstable_storageDiff` rpc-v2 method. - Developers can use this method to fetch the storage differences - between two blocks. This is useful for oracles and archive nodes. - For more details see: https://github.com/paritytech/json-rpc-interface-spec/blob/main/src/api/archive_unstable_storageDiff.md. - -crates: - - name: sc-rpc-spec-v2 - bump: major - - name: sc-service - bump: patch diff --git a/prdoc/stable2412/pr_6304.prdoc b/prdoc/stable2412/pr_6304.prdoc deleted file mode 100644 index 1c8f1bb25deb..000000000000 --- a/prdoc/stable2412/pr_6304.prdoc +++ /dev/null @@ -1,45 +0,0 @@ -# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 -# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json - -title: XCMv5 - Add ExecuteWithOrigin instruction - -doc: - - audience: [Runtime User, Runtime Dev] - description: | - Added a new instruction to XCMv5, ExecuteWithOrigin, that allows you to specify an interior origin - and a set of instructions that will be executed using that origin. - The origins you can choose are `None` to clear it during the execution of the inner instructions, - or `Some(InteriorLocation)` to descend into an interior location. - These two options mimic the behaviour of `ClearOrigin` and `DescendOrigin` respectively. - Crucially, this instruction goes back to the previous origin once the execution of those inner - instructions end. - This allows use-cases like a parent location paying fees with one interior location, fetching funds - with another, claiming assets on behalf of many different ones, etc. - -crates: - - name: staging-xcm - bump: major - - name: staging-xcm-executor - bump: minor - - name: staging-xcm-builder - bump: minor - - name: asset-hub-rococo-runtime - bump: minor - - name: asset-hub-westend-runtime - bump: minor - - name: bridge-hub-rococo-runtime - bump: minor - - name: bridge-hub-westend-runtime - bump: minor - - name: people-rococo-runtime - bump: minor - - name: people-westend-runtime - bump: minor - - name: coretime-rococo-runtime - bump: minor - - name: coretime-westend-runtime - bump: minor - - name: rococo-runtime - bump: minor - - name: westend-runtime - bump: minor diff --git a/prdoc/stable2412/pr_6323.prdoc b/prdoc/stable2412/pr_6323.prdoc deleted file mode 100644 index ec632a14f946..000000000000 --- a/prdoc/stable2412/pr_6323.prdoc +++ /dev/null @@ -1,32 +0,0 @@ -title: add `TransactionSource` to `TransactionExtension::validate` -doc: -- audience: Runtime Dev - description: | - Add a the source of the extrinsic as an argument in `TransactionExtension::validate`. - The transaction source can be useful for transactions that should only be valid if it comes from the node. For example from offchain worker. - To update the current code. The transaction source can simply be ignored: `_source: TransactionSource` - - -crates: -- name: sp-runtime - bump: major -- name: bridge-runtime-common - bump: patch -- name: frame-system - bump: patch -- name: pallet-transaction-payment - bump: patch -- name: polkadot-runtime-common - bump: patch -- name: pallet-sudo - bump: patch -- name: pallet-verify-signature - bump: patch -- name: pallet-asset-tx-payment - bump: patch -- name: pallet-bridge-relayers - bump: patch -- name: pallet-asset-conversion-tx-payment - bump: patch -- name: pallet-skip-feeless-payment - bump: patch diff --git a/prdoc/stable2412/pr_6418.prdoc b/prdoc/stable2412/pr_6418.prdoc deleted file mode 100644 index 6696b54024b9..000000000000 --- a/prdoc/stable2412/pr_6418.prdoc +++ /dev/null @@ -1,151 +0,0 @@ -# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 -# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json - -title: Follow up work on TransactionExtension - fix weights and clean up UncheckedExtrinsic - -doc: - - audience: Runtime Dev - description: | - This PR removes the redundant extension version byte from the signed v4 extrinsic, previously - unused and defaulted to 0. The extension version byte is also made to be part of the inherited - implication handed to extensions in General transactions. Also, some system extensions - benchmarks were adjusted through whitelisting to not count the reads for frequently read - storage keys. - -crates: - - name: node-testing - bump: patch - - name: pallet-example-offchain-worker - bump: patch - - name: sp-runtime - bump: major - - name: substrate-test-utils - bump: patch - - name: pallet-alliance - bump: patch - - name: pallet-asset-conversion - bump: patch - - name: pallet-asset-conversion-ops - bump: patch - - name: pallet-asset-rate - bump: patch - - name: pallet-assets - bump: patch - - name: pallet-authorship - bump: patch - - name: pallet-bags-list - bump: patch - - name: pallet-balances - bump: patch - - name: pallet-beefy-mmr - bump: patch - - name: frame-benchmarking - bump: patch - - name: pallet-bounties - bump: patch - - name: pallet-broker - bump: patch - - name: pallet-child-bounties - bump: patch - - name: pallet-collective - bump: patch - - name: pallet-contracts - bump: patch - - name: pallet-conviction-voting - bump: patch - - name: pallet-core-fellowship - bump: patch - - name: pallet-democracy - bump: patch - - name: pallet-election-provider-multi-phase - bump: patch - - name: pallet-elections-phragmen - bump: patch - - name: pallet-fast-unstake - bump: patch - - name: pallet-glutton - bump: patch - - name: pallet-identity - bump: patch - - name: pallet-im-online - bump: patch - - name: pallet-indices - bump: patch - - name: pallet-lottery - bump: patch - - name: pallet-membership - bump: patch - - name: pallet-message-queue - bump: patch - - name: pallet-migrations - bump: patch - - name: pallet-multisig - bump: patch - - name: pallet-nft-fractionalization - bump: patch - - name: pallet-nfts - bump: patch - - name: pallet-nis - bump: patch - - name: pallet-nomination-pools - bump: patch - - name: pallet-parameters - bump: patch - - name: pallet-preimage - bump: patch - - name: pallet-proxy - bump: patch - - name: pallet-ranked-collective - bump: patch - - name: pallet-recovery - bump: patch - - name: pallet-referenda - bump: patch - - name: pallet-remark - bump: patch - - name: pallet-revive - bump: patch - - name: pallet-safe-mode - bump: patch - - name: pallet-salary - bump: patch - - name: pallet-scheduler - bump: patch - - name: pallet-session - bump: patch - - name: pallet-society - bump: patch - - name: pallet-staking - bump: patch - - name: pallet-state-trie-migration - bump: patch - - name: pallet-sudo - bump: patch - - name: frame-support - bump: patch - - name: pallet-timestamp - bump: patch - - name: pallet-tips - bump: patch - - name: pallet-asset-conversion-tx-payment - bump: patch - - name: pallet-transaction-payment - bump: patch - - name: pallet-transaction-storage - bump: patch - - name: pallet-treasury - bump: patch - - name: pallet-tx-pause - bump: patch - - name: pallet-uniques - bump: patch - - name: pallet-utility - bump: patch - - name: pallet-verify-signature - bump: patch - - name: pallet-vesting - bump: patch - - name: pallet-whitelist - bump: patch - - name: sp-runtime - bump: major diff --git a/prdoc/stable2412/pr_6454.prdoc b/prdoc/stable2412/pr_6454.prdoc deleted file mode 100644 index 3fd3e39db604..000000000000 --- a/prdoc/stable2412/pr_6454.prdoc +++ /dev/null @@ -1,7 +0,0 @@ -title: 'rpc server: fix ipv6 host filter for localhost' -doc: -- audience: Node Operator - description: "This PR fixes that ipv6 connections to localhost was faulty rejected by the host filter because only [::1] was allowed" -crates: -- name: sc-rpc-server - bump: minor diff --git a/prdoc/stable2412/pr_6484.prdoc b/prdoc/stable2412/pr_6484.prdoc deleted file mode 100644 index c212692e6ab4..000000000000 --- a/prdoc/stable2412/pr_6484.prdoc +++ /dev/null @@ -1,10 +0,0 @@ -title: Update litep2p network backend to version 0.8.1 - -doc: - - audience: [ Node Dev, Node Operator ] - description: | - Release 0.8.1 of litep2p includes critical fixes to further enhance the stability and performance of the litep2p network backend. - -crates: - - name: sc-network - bump: patch diff --git a/prdoc/stable2412/pr_6505.prdoc b/prdoc/stable2412/pr_6505.prdoc deleted file mode 100644 index ae00dd17fed5..000000000000 --- a/prdoc/stable2412/pr_6505.prdoc +++ /dev/null @@ -1,14 +0,0 @@ -title: '[pallet-broker] Fix auto renew benchmarks' -doc: -- audience: Runtime Dev - description: |- - Fix the broker pallet auto-renew benchmarks which have been broken since #4424, yielding `Weightless` due to some prices being set too low, as reported in #6474. - - Upon further investigation it turned out that the auto-renew contribution to `rotate_sale` was always failing but the error was mapped. This is also fixed at the cost of a bit of setup overhead. -crates: -- name: pallet-broker - bump: patch -- name: coretime-rococo-runtime - bump: patch -- name: coretime-westend-runtime - bump: patch diff --git a/prdoc/stable2412/pr_6536.prdoc b/prdoc/stable2412/pr_6536.prdoc deleted file mode 100644 index 676b5c131f17..000000000000 --- a/prdoc/stable2412/pr_6536.prdoc +++ /dev/null @@ -1,24 +0,0 @@ -title: Bridges testing improvements -doc: -- audience: Runtime Dev - description: |- - This PR includes: - - Refactored integrity tests to support standalone deployment of `pallet-bridge-messages`. - - Refactored the `open_and_close_bridge_works` test case to support multiple scenarios, such as: - 1. A local chain opening a bridge. - 2. Sibling parachains opening a bridge. - 3. The relay chain opening a bridge. - - Previously, we added instance support for `pallet-bridge-relayer` but overlooked updating the `DeliveryConfirmationPaymentsAdapter`. -crates: -- name: bridge-runtime-common - bump: patch -- name: pallet-bridge-relayers - bump: patch -- name: bridge-hub-rococo-runtime - bump: patch -- name: bridge-hub-westend-runtime - bump: patch -- name: bridge-hub-test-utils - bump: major -- name: parachains-runtimes-test-utils - bump: major diff --git a/prdoc/stable2412/pr_6566.prdoc b/prdoc/stable2412/pr_6566.prdoc deleted file mode 100644 index bbd48b799538..000000000000 --- a/prdoc/stable2412/pr_6566.prdoc +++ /dev/null @@ -1,45 +0,0 @@ -# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 -# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json - -title: XCMv5 - SetHints instruction - -doc: - - audience: Runtime Dev - description: | - Implementation of fellowship RFC 107. - The new SetHints instruction is a repackaging of SetAssetClaimer that also allows future - "hints" which alter the default behaviour of the executor. - The AllowTopLevelPaidExecutionFrom barrier allows this instruction between WithdrawAsset and - BuyExecution/PayFees to configure things before the actual meat of the program. - -crates: - - name: asset-hub-rococo-runtime - bump: major - - name: asset-hub-westend-runtime - bump: major - - name: bridge-hub-rococo-runtime - bump: major - - name: bridge-hub-westend-runtime - bump: major - - name: coretime-rococo-runtime - bump: major - - name: coretime-westend-runtime - bump: major - - name: people-rococo-runtime - bump: major - - name: people-westend-runtime - bump: major - - name: rococo-runtime - bump: major - - name: westend-runtime - bump: major - - name: pallet-xcm-benchmarks - bump: major - - name: xcm-procedural - bump: minor - - name: staging-xcm - bump: major - - name: staging-xcm-builder - bump: major - - name: staging-xcm-executor - bump: major diff --git a/prdoc/stable2412/pr_6588.prdoc b/prdoc/stable2412/pr_6588.prdoc deleted file mode 100644 index bf44b2ed3784..000000000000 --- a/prdoc/stable2412/pr_6588.prdoc +++ /dev/null @@ -1,14 +0,0 @@ -# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 -# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json - -title: "rpc server: fix subscription id_provider being reset to default one" - -doc: - - audience: Node Dev - description: | - The modification ensures that the id_provider variable is cloned instead of taken, which can help prevent issues related id provider being reset to the default. - - -crates: - - name: sc-rpc-server - bump: patch \ No newline at end of file diff --git a/prdoc/stable2412/pr_6603.prdoc b/prdoc/stable2412/pr_6603.prdoc deleted file mode 100644 index 20c5e7294dfa..000000000000 --- a/prdoc/stable2412/pr_6603.prdoc +++ /dev/null @@ -1,16 +0,0 @@ -# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 -# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json - -title: Always provide main protocol name in litep2p responses - -doc: - - audience: [ Node Dev, Node Operator ] - description: | - This PR aligns litep2p behavior with libp2p. Previously, litep2p network backend - would provide the actual negotiated request-response protocol that produced a - response message. After this PR, only the main protocol name is reported to other - subsystems. - -crates: - - name: sc-network - bump: patch diff --git a/prdoc/stable2412/pr_6643.prdoc b/prdoc/stable2412/pr_6643.prdoc deleted file mode 100644 index c111f6356519..000000000000 --- a/prdoc/stable2412/pr_6643.prdoc +++ /dev/null @@ -1,47 +0,0 @@ -title: Added fallback_max_weight to Transact for sending messages to V4 chains -doc: -- audience: Runtime Dev - description: |- - Removing the `require_weight_at_most` parameter in V5 Transact introduced a problem when converting a message from V5 to V4 to send to chains that didn't upgrade yet. - The local chain doesn't know how to decode calls for remote chains so it can't automatically populate `require_weight_at_most` required by V4 Transact. - To fix this, XCM v5 Transact now also takes a `fallback_max_weight: Option` parameter. - This can be set to `None` if the instruction is not meant to be sent to chains running XCM versions lower than V5. - If set to `Some(weight)`, a subsequent conversion to V4 will result in `Transact { require_weight_at_most: weight, .. }`. - The plan is to remove this workaround in V6 since there will be a good conversion path from V6 to V5. -crates: -- name: snowbridge-router-primitives - bump: major -- name: emulated-integration-tests-common - bump: major -- name: asset-hub-rococo-runtime - bump: major -- name: asset-hub-westend-runtime - bump: major -- name: asset-test-utils - bump: major -- name: bridge-hub-rococo-runtime - bump: major -- name: bridge-hub-westend-runtime - bump: major -- name: coretime-rococo-runtime - bump: major -- name: coretime-westend-runtime - bump: major -- name: people-rococo-runtime - bump: major -- name: people-westend-runtime - bump: major -- name: parachains-runtimes-test-utils - bump: major -- name: polkadot-runtime-parachains - bump: major -- name: rococo-runtime - bump: major -- name: westend-runtime - bump: major -- name: staging-xcm - bump: major -- name: staging-xcm-builder - bump: major -- name: staging-xcm-executor - bump: major diff --git a/prdoc/stable2412/pr_6645.prdoc b/prdoc/stable2412/pr_6645.prdoc deleted file mode 100644 index f033cadc0b6e..000000000000 --- a/prdoc/stable2412/pr_6645.prdoc +++ /dev/null @@ -1,14 +0,0 @@ -title: 'xcm: fix local/remote exports when inner routers return `NotApplicable`' -doc: -- audience: Runtime Dev - description: |- - Resolved a bug in the `local/remote exporters` used for bridging. Previously, they consumed `dest` and `msg` without returning them when inner routers/exporters failed with `NotApplicable`. This PR ensures compliance with the [`SendXcm`](https://github.com/paritytech/polkadot-sdk/blob/master/polkadot/xcm/src/v5/traits.rs#L449-L450) and [`ExportXcm`](https://github.com/paritytech/polkadot-sdk/blob/master/polkadot/xcm/xcm-executor/src/traits/export.rs#L44-L45) traits. -crates: -- name: staging-xcm-builder - bump: patch -- name: polkadot - bump: none -- name: staging-xcm - bump: none -- name: staging-xcm-executor - bump: none diff --git a/prdoc/stable2412/pr_6646.prdoc b/prdoc/stable2412/pr_6646.prdoc deleted file mode 100644 index 4dcda8d41bda..000000000000 --- a/prdoc/stable2412/pr_6646.prdoc +++ /dev/null @@ -1,19 +0,0 @@ -# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 -# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json - -title: OmniNode --dev flag starts node with manual seal - -doc: - - audience: [ Runtime Dev, Node Dev ] - description: | - `polkadot-omni-node` lib supports `--dev` flag now by allowing also to pass over a chain spec, - and starts the node with manual seal. It will seal the node at each `dev_block_time` milliseconds, - which can be set via `--dev-block-time`, and if not set will default to `3000ms`. - -crates: - - name: sc-cli - bump: patch - - name: polkadot-omni-node-lib - bump: patch - - name: polkadot-omni-node - bump: patch diff --git a/prdoc/stable2412/pr_6652.prdoc b/prdoc/stable2412/pr_6652.prdoc deleted file mode 100644 index a303311e138f..000000000000 --- a/prdoc/stable2412/pr_6652.prdoc +++ /dev/null @@ -1,13 +0,0 @@ -# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 -# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json - -title: "rpc server: re-use server builder per rpc interface" - -doc: - - audience: Node Dev - description: | - This changes that the RPC server builder is re-used for each RPC interface which is more efficient than to build it for every connection. - -crates: - - name: sc-rpc-server - bump: patch diff --git a/prdoc/stable2412/pr_6677.prdoc b/prdoc/stable2412/pr_6677.prdoc deleted file mode 100644 index c6766889e68d..000000000000 --- a/prdoc/stable2412/pr_6677.prdoc +++ /dev/null @@ -1,11 +0,0 @@ -title: 'chore: Update litep2p to v0.8.2' -doc: -- audience: Node Dev - description: |- - This includes a critical fix for debug release versions of litep2p (which are running in Kusama as validators). - - While at it, have stopped the oncall pain of alerts around `incoming_connections_total`. We can rethink the metric expose of litep2p in Q1. - -crates: -- name: sc-network - bump: minor diff --git a/prdoc/stable2412/pr_6690.prdoc b/prdoc/stable2412/pr_6690.prdoc deleted file mode 100644 index 0e4a2437ef96..000000000000 --- a/prdoc/stable2412/pr_6690.prdoc +++ /dev/null @@ -1,17 +0,0 @@ -# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 -# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json - -title: Fix Possible bug, Vote import failed after aggression is enabled - -doc: - - audience: Node Dev - description: | - Fix the appearance of Possible bug: Vote import failed after aggression is enabled, the log itself is - harmless because approval gets imported anyway and aggression is able to distribute it, nevertheless - is something that can be easily be fixed by picking the highest required routing possible. - -crates: - - name: polkadot-node-network-protocol - bump: minor - - name: polkadot-approval-distribution - bump: minor diff --git a/prdoc/stable2412/pr_6696.prdoc b/prdoc/stable2412/pr_6696.prdoc deleted file mode 100644 index c5c73f831886..000000000000 --- a/prdoc/stable2412/pr_6696.prdoc +++ /dev/null @@ -1,15 +0,0 @@ -# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 -# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json - -title: Make approval-distribution aggression a bit more robust and less spammy - -doc: - - audience: Node Dev - description: | - The problem with the current implementation of approval-distribution aggression is that is too spammy, - and can overload the nodes, so make it less spammy by moving back the moment we trigger L2 aggression - and make resend enable only for the latest unfinalized block. - -crates: - - name: polkadot-approval-distribution - bump: minor diff --git a/prdoc/stable2412/pr_6729.prdoc b/prdoc/stable2412/pr_6729.prdoc deleted file mode 100644 index 9eaa67363c9a..000000000000 --- a/prdoc/stable2412/pr_6729.prdoc +++ /dev/null @@ -1,15 +0,0 @@ -# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 -# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json - -title: Fix order of resending messages after restart - -doc: - - audience: Node Dev - description: | - At restart when dealing with a coalesced approval we might end up in a situation where we sent to - approval-distribution the approval before all assignments covering it, in that case, the approval - is ignored and never distribute, which will lead to no-shows. - -crates: - - name: polkadot-node-core-approval-voting - bump: minor diff --git a/prdoc/stable2412/pr_6742.prdoc b/prdoc/stable2412/pr_6742.prdoc deleted file mode 100644 index 92c3755a3c28..000000000000 --- a/prdoc/stable2412/pr_6742.prdoc +++ /dev/null @@ -1,11 +0,0 @@ -title: Update litep2p backend to v0.8.3 -doc: -- audience: Node Dev - description: |- - This release includes two fixes for small memory leaks on edge-cases in the notification and request-response protocols. - While at it, have downgraded a log message from litep2p. - -crates: -- name: sc-network - bump: patch - diff --git a/prdoc/stable2412/pr_6760.prdoc b/prdoc/stable2412/pr_6760.prdoc deleted file mode 100644 index 8224b72fb0a4..000000000000 --- a/prdoc/stable2412/pr_6760.prdoc +++ /dev/null @@ -1,9 +0,0 @@ -title: 'chainHead: Always report discarded items for storage operations' -doc: -- audience: [Node Dev, Node Operator] - description: |- - This PR ensures that substrate always reports discarded items as zero. - This is needed to align with the rpc-v2 spec -crates: -- name: sc-rpc-spec-v2 - bump: patch diff --git a/prdoc/stable2412/pr_6781.prdoc b/prdoc/stable2412/pr_6781.prdoc deleted file mode 100644 index 8090be420341..000000000000 --- a/prdoc/stable2412/pr_6781.prdoc +++ /dev/null @@ -1,28 +0,0 @@ -title: Bridges - revert-back congestion mechanism - -doc: -- audience: Runtime Dev - description: |- - With [permissionless lanes PR#4949](https://github.com/paritytech/polkadot-sdk/pull/4949), the congestion mechanism based on sending `Transact(report_bridge_status(is_congested))` from `pallet-xcm-bridge-hub` to `pallet-xcm-bridge-hub-router` was replaced with a congestion mechanism that relied on monitoring XCMP queues. However, this approach could cause issues, such as suspending the entire XCMP queue instead of isolating the affected bridge. This PR reverts back to using `report_bridge_status` as before. - -crates: -- name: pallet-xcm-bridge-hub-router - bump: patch -- name: pallet-xcm-bridge-hub - bump: patch -- name: bp-xcm-bridge-hub - bump: patch -- name: bp-asset-hub-rococo - bump: patch -- name: bp-asset-hub-westend - bump: patch -- name: asset-hub-rococo-runtime - bump: patch -- name: asset-hub-westend-runtime - bump: patch -- name: asset-test-utils - bump: patch -- name: bridge-hub-rococo-runtime - bump: patch -- name: bridge-hub-westend-runtime - bump: patch diff --git a/prdoc/stable2412/pr_6814.prdoc b/prdoc/stable2412/pr_6814.prdoc deleted file mode 100644 index 4edbf2f8ed28..000000000000 --- a/prdoc/stable2412/pr_6814.prdoc +++ /dev/null @@ -1,32 +0,0 @@ -title: Add aliasers to westend chains -doc: -- audience: Runtime Dev - description: |- - `InitiateTransfer`, the new instruction introduced in XCMv5, allows preserving the origin after a cross-chain transfer via the usage of the `AliasOrigin` instruction. The receiving chain needs to be configured to allow such this instruction to have its intended effect and not just throw an error. - - In this PR, I add the alias rules specified in the [RFC for origin preservation](https://github.com/polkadot-fellows/RFCs/blob/main/text/0122-alias-origin-on-asset-transfers.md) to westend chains so we can test these scenarios in the testnet. - - The new scenarios include: - - Sending a cross-chain transfer from one system chain to another and doing a Transact on the same message (1 hop) - - Sending a reserve asset transfer from one chain to another going through asset hub and doing Transact on the same message (2 hops) - - The updated chains are: - - Relay: added `AliasChildLocation` - - Collectives: added `AliasChildLocation` and `AliasOriginRootUsingFilter` - - People: added `AliasChildLocation` and `AliasOriginRootUsingFilter` - - Coretime: added `AliasChildLocation` and `AliasOriginRootUsingFilter` - - AssetHub already has `AliasChildLocation` and doesn't need the other config item. - BridgeHub is not intended to be used by end users so I didn't add any config item. - Only added `AliasChildOrigin` to the relay since we intend for it to be used less. -crates: -- name: westend-runtime - bump: patch -- name: collectives-westend-runtime - bump: patch -- name: people-westend-runtime - bump: patch -- name: coretime-westend-runtime - bump: patch -- name: pallet-xcm-benchmarks - bump: patch diff --git a/prdoc/stable2412/pr_6860.prdoc b/prdoc/stable2412/pr_6860.prdoc deleted file mode 100644 index 76b460ce52dd..000000000000 --- a/prdoc/stable2412/pr_6860.prdoc +++ /dev/null @@ -1,10 +0,0 @@ -title: Update litep2p network backend to v0.8.4 - -doc: - - audience: [ Node Dev, Node Operator ] - description: | - This PR updates the Litep2p network backend to version 0.8.4 - -crates: - - name: sc-network - bump: patch diff --git a/prdoc/stable2412/pr_6863.prdoc b/prdoc/stable2412/pr_6863.prdoc deleted file mode 100644 index 0dd416e5e438..000000000000 --- a/prdoc/stable2412/pr_6863.prdoc +++ /dev/null @@ -1,9 +0,0 @@ -title: Update merkleized-metadata to 0.2.0 -doc: -- audience: Node Dev - description: |- - 0.1.2 was yanked as it was breaking semver. -crates: - - name: substrate-wasm-builder - bump: patch - validate: false diff --git a/prdoc/stable2412/pr_6864.prdoc b/prdoc/stable2412/pr_6864.prdoc deleted file mode 100644 index 6d6c84e22da4..000000000000 --- a/prdoc/stable2412/pr_6864.prdoc +++ /dev/null @@ -1,18 +0,0 @@ -# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 -# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json - -title: Fix approval-voting canonicalize off by one - -doc: - - audience: Node Dev - description: | - The approval-voting canonicalize was off by one, which lead to blocks being - cleaned up every other 2 blocks. Normally, this is not an issue, but on restart - we might end up sending NewBlocks to approval-distribution with finalized blocks. - This would be problematic in the case were finalization was already lagging before - restart, so after restart approval-distribution will trigger aggression on the wrong - already finalized block. - -crates: - - name: polkadot-node-core-approval-voting - bump: minor diff --git a/prdoc/stable2412/pr_6885.prdoc b/prdoc/stable2412/pr_6885.prdoc deleted file mode 100644 index 986d76962289..000000000000 --- a/prdoc/stable2412/pr_6885.prdoc +++ /dev/null @@ -1,11 +0,0 @@ -title: 'Omni-node: Detect pending code in storage and send go ahead signal in dev-mode.' -doc: -- audience: Runtime Dev - description: |- - When using the polkadot-omni-node with manual seal (`--dev-block-time`), it is now possible to perform runtime - upgrades. The node will detect the pending validation code and send a go-ahead signal to the parachain. -crates: -- name: cumulus-client-parachain-inherent - bump: major -- name: polkadot-omni-node-lib - bump: patch diff --git a/scripts/generate-umbrella.py b/scripts/generate-umbrella.py index ae3873180553..8326909c3449 100644 --- a/scripts/generate-umbrella.py +++ b/scripts/generate-umbrella.py @@ -120,8 +120,6 @@ def main(path, version): "edition": { "workspace": True }, "authors": { "workspace": True }, "description": "Polkadot SDK umbrella crate.", - "homepage": { "workspace": True }, - "repository": { "workspace": True }, "license": "Apache-2.0", "metadata": { "docs": { "rs": { "features": ["runtime-full", "node"], diff --git a/scripts/release/templates/audience.md.tera b/scripts/release/templates/audience.md.tera index d962030d0225..237643cfa392 100644 --- a/scripts/release/templates/audience.md.tera +++ b/scripts/release/templates/audience.md.tera @@ -4,7 +4,7 @@ {% for file in prdoc -%} {% for doc_item in file.content.doc %} -{%- if doc_item.audience is containing(env.TARGET_AUDIENCE) %} +{%- if doc_item.audience == env.TARGET_AUDIENCE %} #### [#{{file.doc_filename.number}}]: {{ file.content.title }} {{ doc_item.description }} {% endif -%} diff --git a/substrate/.config/nextest.toml b/substrate/.config/nextest.toml new file mode 100644 index 000000000000..eb0ed09cad92 --- /dev/null +++ b/substrate/.config/nextest.toml @@ -0,0 +1,124 @@ +# This is the default config used by nextest. It is embedded in the binary at +# build time. It may be used as a template for .config/nextest.toml. + +[store] +# The directory under the workspace root at which nextest-related files are +# written. Profile-specific storage is currently written to dir/. +dir = "target/nextest" + +# This section defines the default nextest profile. Custom profiles are layered +# on top of the default profile. +[profile.default] +# "retries" defines the number of times a test should be retried. If set to a +# non-zero value, tests that succeed on a subsequent attempt will be marked as +# non-flaky. Can be overridden through the `--retries` option. +# Examples +# * retries = 3 +# * retries = { backoff = "fixed", count = 2, delay = "1s" } +# * retries = { backoff = "exponential", count = 10, delay = "1s", jitter = true, max-delay = "10s" } +retries = 5 + +# The number of threads to run tests with. Supported values are either an integer or +# the string "num-cpus". Can be overridden through the `--test-threads` option. +test-threads = "num-cpus" + +# The number of threads required for each test. This is generally used in overrides to +# mark certain tests as heavier than others. However, it can also be set as a global parameter. +threads-required = 1 + +# Show these test statuses in the output. +# +# The possible values this can take are: +# * none: no output +# * fail: show failed (including exec-failed) tests +# * retry: show flaky and retried tests +# * slow: show slow tests +# * pass: show passed tests +# * skip: show skipped tests (most useful for CI) +# * all: all of the above +# +# Each value includes all the values above it; for example, "slow" includes +# failed and retried tests. +# +# Can be overridden through the `--status-level` flag. +status-level = "pass" + +# Similar to status-level, show these test statuses at the end of the run. +final-status-level = "flaky" + +# "failure-output" defines when standard output and standard error for failing tests are produced. +# Accepted values are +# * "immediate": output failures as soon as they happen +# * "final": output failures at the end of the test run +# * "immediate-final": output failures as soon as they happen and at the end of +# the test run; combination of "immediate" and "final" +# * "never": don't output failures at all +# +# For large test suites and CI it is generally useful to use "immediate-final". +# +# Can be overridden through the `--failure-output` option. +failure-output = "immediate" + +# "success-output" controls production of standard output and standard error on success. This should +# generally be set to "never". +success-output = "never" + +# Cancel the test run on the first failure. For CI runs, consider setting this +# to false. +fail-fast = true + +# Treat a test that takes longer than the configured 'period' as slow, and print a message. +# See for more information. +# +# Optional: specify the parameter 'terminate-after' with a non-zero integer, +# which will cause slow tests to be terminated after the specified number of +# periods have passed. +# Example: slow-timeout = { period = "60s", terminate-after = 2 } +slow-timeout = { period = "60s" } + +# Treat a test as leaky if after the process is shut down, standard output and standard error +# aren't closed within this duration. +# +# This usually happens in case of a test that creates a child process and lets it inherit those +# handles, but doesn't clean the child process up (especially when it fails). +# +# See for more information. +leak-timeout = "100ms" + +[profile.default.junit] +# Output a JUnit report into the given file inside 'store.dir/'. +# If unspecified, JUnit is not written out. + +path = "junit.xml" + +# The name of the top-level "report" element in JUnit report. If aggregating +# reports across different test runs, it may be useful to provide separate names +# for each report. +report-name = "substrate" + +# Whether standard output and standard error for passing tests should be stored in the JUnit report. +# Output is stored in the and elements of the element. +store-success-output = false + +# Whether standard output and standard error for failing tests should be stored in the JUnit report. +# Output is stored in the and elements of the element. +# +# Note that if a description can be extracted from the output, it is always stored in the +# element. +store-failure-output = true + +# This profile is activated if MIRI_SYSROOT is set. +[profile.default-miri] +# Miri tests take up a lot of memory, so only run 1 test at a time by default. +test-threads = 1 + +# Mutual exclusion of tests with `cargo build` invocation as a lock to avoid multiple +# simultaneous invocations clobbering each other. +[test-groups] +serial-integration = { max-threads = 1 } + +# Running UI tests sequentially +# More info can be found here: https://github.com/paritytech/ci_cd/issues/754 +[[profile.default.overrides]] +filter = 'test(/(^ui$|_ui|ui_)/)' +test-group = 'serial-integration' diff --git a/substrate/.maintain/frame-umbrella-weight-template.hbs b/substrate/.maintain/frame-umbrella-weight-template.hbs index b174823b3840..0f26fae1d8f1 100644 --- a/substrate/.maintain/frame-umbrella-weight-template.hbs +++ b/substrate/.maintain/frame-umbrella-weight-template.hbs @@ -32,7 +32,7 @@ pub trait WeightInfo { /// Weights for `{{pallet}}` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); -{{#if (or (eq pallet "frame_system") (eq pallet "frame_system_extensions"))}} +{{#if (eq pallet "frame_system")}} impl WeightInfo for SubstrateWeight { {{else}} impl WeightInfo for SubstrateWeight { diff --git a/substrate/bin/node/bench/Cargo.toml b/substrate/bin/node/bench/Cargo.toml index 83f7b82cd2b5..8c6556da682c 100644 --- a/substrate/bin/node/bench/Cargo.toml +++ b/substrate/bin/node/bench/Cargo.toml @@ -16,32 +16,31 @@ workspace = true [dependencies] array-bytes = { workspace = true, default-features = true } -async-trait = { workspace = true } clap = { features = ["derive"], workspace = true } -derive_more = { features = ["display"], workspace = true } -fs_extra = { workspace = true } -futures = { features = ["thread-pool"], workspace = true } -hash-db = { workspace = true, default-features = true } -kitchensink-runtime = { workspace = true } -kvdb = { workspace = true } -kvdb-rocksdb = { workspace = true } log = { workspace = true, default-features = true } node-primitives = { workspace = true, default-features = true } node-testing = { workspace = true } -parity-db = { workspace = true } -rand = { features = ["small_rng"], workspace = true, default-features = true } -sc-basic-authorship = { workspace = true, default-features = true } +kitchensink-runtime = { workspace = true } sc-client-api = { workspace = true, default-features = true } -sc-transaction-pool = { workspace = true, default-features = true } -sc-transaction-pool-api = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-state-machine = { workspace = true, default-features = true } serde = { workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } -sp-consensus = { workspace = true, default-features = true } +derive_more = { features = ["display"], workspace = true } +kvdb = { workspace = true } +kvdb-rocksdb = { workspace = true } +sp-trie = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sc-basic-authorship = { workspace = true, default-features = true } sp-inherents = { workspace = true, default-features = true } -sp-runtime = { workspace = true, default-features = true } -sp-state-machine = { workspace = true, default-features = true } sp-timestamp = { workspace = true } sp-tracing = { workspace = true, default-features = true } -sp-trie = { workspace = true, default-features = true } +hash-db = { workspace = true, default-features = true } tempfile = { workspace = true } +fs_extra = { workspace = true } +rand = { features = ["small_rng"], workspace = true, default-features = true } +parity-db = { workspace = true } +sc-transaction-pool = { workspace = true, default-features = true } +sc-transaction-pool-api = { workspace = true, default-features = true } +futures = { features = ["thread-pool"], workspace = true } diff --git a/substrate/bin/node/bench/src/construct.rs b/substrate/bin/node/bench/src/construct.rs index 22129c6a1d69..bed6e3d914c2 100644 --- a/substrate/bin/node/bench/src/construct.rs +++ b/substrate/bin/node/bench/src/construct.rs @@ -24,14 +24,14 @@ //! DO NOT depend on user input). Thus transaction generation should be //! based on randomized data. +use futures::Future; use std::{borrow::Cow, collections::HashMap, pin::Pin, sync::Arc}; -use async_trait::async_trait; use node_primitives::Block; use node_testing::bench::{BenchDb, BlockType, DatabaseType, KeyTypes}; use sc_transaction_pool_api::{ - ImportNotificationStream, PoolStatus, ReadyTransactions, TransactionFor, TransactionSource, - TransactionStatusStreamFor, TxHash, + ImportNotificationStream, PoolFuture, PoolStatus, ReadyTransactions, TransactionFor, + TransactionSource, TransactionStatusStreamFor, TxHash, }; use sp_consensus::{Environment, Proposer}; use sp_inherents::InherentDataProvider; @@ -224,47 +224,54 @@ impl ReadyTransactions for TransactionsIterator { fn report_invalid(&mut self, _tx: &Self::Item) {} } -#[async_trait] impl sc_transaction_pool_api::TransactionPool for Transactions { type Block = Block; type Hash = node_primitives::Hash; type InPoolTransaction = PoolTransaction; type Error = sc_transaction_pool_api::error::Error; - /// Asynchronously imports a bunch of unverified transactions to the pool. - async fn submit_at( + /// Returns a future that imports a bunch of unverified transactions to the pool. + fn submit_at( &self, _at: Self::Hash, _source: TransactionSource, _xts: Vec>, - ) -> Result>, Self::Error> { + ) -> PoolFuture>, Self::Error> { unimplemented!() } - /// Asynchronously imports one unverified transaction to the pool. - async fn submit_one( + /// Returns a future that imports one unverified transaction to the pool. + fn submit_one( &self, _at: Self::Hash, _source: TransactionSource, _xt: TransactionFor, - ) -> Result, Self::Error> { + ) -> PoolFuture, Self::Error> { unimplemented!() } - async fn submit_and_watch( + fn submit_and_watch( &self, _at: Self::Hash, _source: TransactionSource, _xt: TransactionFor, - ) -> Result>>, Self::Error> { + ) -> PoolFuture>>, Self::Error> { unimplemented!() } - async fn ready_at( + fn ready_at( &self, _at: Self::Hash, - ) -> Box> + Send> { - Box::new(TransactionsIterator(self.0.clone().into_iter())) + ) -> Pin< + Box< + dyn Future< + Output = Box> + Send>, + > + Send, + >, + > { + let iter: Box> + Send> = + Box::new(TransactionsIterator(self.0.clone().into_iter())); + Box::pin(futures::future::ready(iter)) } fn ready(&self) -> Box> + Send> { @@ -299,11 +306,18 @@ impl sc_transaction_pool_api::TransactionPool for Transactions { unimplemented!() } - async fn ready_at_with_timeout( + fn ready_at_with_timeout( &self, _at: Self::Hash, _timeout: std::time::Duration, - ) -> Box> + Send> { + ) -> Pin< + Box< + dyn Future< + Output = Box> + Send>, + > + Send + + '_, + >, + > { unimplemented!() } } diff --git a/substrate/bin/node/cli/Cargo.toml b/substrate/bin/node/cli/Cargo.toml index 9e063ee3cde0..c179579c1885 100644 --- a/substrate/bin/node/cli/Cargo.toml +++ b/substrate/bin/node/cli/Cargo.toml @@ -40,11 +40,11 @@ crate-type = ["cdylib", "rlib"] array-bytes = { workspace = true, default-features = true } clap = { features = ["derive"], optional = true, workspace = true } codec = { workspace = true, default-features = true } -futures = { workspace = true } +serde = { features = ["derive"], workspace = true, default-features = true } jsonrpsee = { features = ["server"], workspace = true } +futures = { workspace = true } log = { workspace = true, default-features = true } rand = { workspace = true, default-features = true } -serde = { features = ["derive"], workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } subxt-signer = { workspace = true, features = ["unstable-eth"] } @@ -135,32 +135,32 @@ polkadot-sdk = { features = [ # Shared code between the staging node and kitchensink runtime: kitchensink-runtime = { workspace = true } -node-inspect = { optional = true, workspace = true, default-features = true } -node-primitives = { workspace = true, default-features = true } node-rpc = { workspace = true } +node-primitives = { workspace = true, default-features = true } +node-inspect = { optional = true, workspace = true, default-features = true } [dev-dependencies] -assert_cmd = { workspace = true } -criterion = { features = ["async_tokio"], workspace = true, default-features = true } futures = { workspace = true } +tempfile = { workspace = true } +assert_cmd = { workspace = true } nix = { features = ["signal"], workspace = true } -platforms = { workspace = true } -pretty_assertions.workspace = true regex = { workspace = true } -scale-info = { features = ["derive", "serde"], workspace = true, default-features = true } -serde_json = { workspace = true, default-features = true } +platforms = { workspace = true } soketto = { workspace = true } -sp-keyring = { workspace = true } -tempfile = { workspace = true } +criterion = { features = ["async_tokio"], workspace = true, default-features = true } tokio = { features = ["macros", "parking_lot", "time"], workspace = true, default-features = true } tokio-util = { features = ["compat"], workspace = true } wait-timeout = { workspace = true } wat = { workspace = true } +serde_json = { workspace = true, default-features = true } +scale-info = { features = ["derive", "serde"], workspace = true, default-features = true } +sp-keyring = { workspace = true } +pretty_assertions.workspace = true # These testing-only dependencies are not exported by the Polkadot-SDK crate: node-testing = { workspace = true } -sc-service-test = { workspace = true } substrate-cli-test-utils = { workspace = true } +sc-service-test = { workspace = true } [build-dependencies] clap = { optional = true, workspace = true } diff --git a/substrate/bin/node/cli/src/chain_spec.rs b/substrate/bin/node/cli/src/chain_spec.rs index 038aa2f60928..0c4a48a19260 100644 --- a/substrate/bin/node/cli/src/chain_spec.rs +++ b/substrate/bin/node/cli/src/chain_spec.rs @@ -427,7 +427,7 @@ fn props() -> Properties { fn eth_account(from: subxt_signer::eth::Keypair) -> AccountId32 { let mut account_id = AccountId32::new([0xEE; 32]); >::as_mut(&mut account_id)[..20] - .copy_from_slice(&from.public_key().to_account_id().as_ref()); + .copy_from_slice(&from.account_id().0); account_id } diff --git a/substrate/bin/node/cli/src/service.rs b/substrate/bin/node/cli/src/service.rs index 5f6806c235f6..008cac4ef8a8 100644 --- a/substrate/bin/node/cli/src/service.rs +++ b/substrate/bin/node/cli/src/service.rs @@ -513,7 +513,7 @@ pub fn new_full_base::Hash>>( Vec::default(), )); - let (network, system_rpc_tx, tx_handler_controller, sync_service) = + let (network, system_rpc_tx, tx_handler_controller, network_starter, sync_service) = sc_service::build_network(sc_service::BuildNetworkParams { config: &config, net_config, @@ -801,6 +801,7 @@ pub fn new_full_base::Hash>>( ); } + network_starter.start_network(); Ok(NewFullBase { task_manager, client, @@ -871,7 +872,7 @@ mod tests { use sp_consensus::{BlockOrigin, Environment, Proposer}; use sp_core::crypto::Pair; use sp_inherents::InherentDataProvider; - use sp_keyring::Sr25519Keyring; + use sp_keyring::AccountKeyring; use sp_keystore::KeystorePtr; use sp_runtime::{ generic::{self, Digest, Era, SignedPayload}, @@ -906,8 +907,8 @@ mod tests { let mut slot = 1u64; // For the extrinsics factory - let bob = Arc::new(Sr25519Keyring::Bob.pair()); - let charlie = Arc::new(Sr25519Keyring::Charlie.pair()); + let bob = Arc::new(AccountKeyring::Bob.pair()); + let charlie = Arc::new(AccountKeyring::Charlie.pair()); let mut index = 0; sc_service_test::sync( diff --git a/substrate/bin/node/inspect/Cargo.toml b/substrate/bin/node/inspect/Cargo.toml index 0cf13bef71f1..6c8a4e59f68d 100644 --- a/substrate/bin/node/inspect/Cargo.toml +++ b/substrate/bin/node/inspect/Cargo.toml @@ -17,6 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] clap = { features = ["derive"], workspace = true } codec = { workspace = true, default-features = true } +thiserror = { workspace = true } sc-cli = { workspace = true } sc-client-api = { workspace = true, default-features = true } sc-service = { workspace = true } @@ -25,7 +26,6 @@ sp-core = { workspace = true, default-features = true } sp-io = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } sp-statement-store = { workspace = true, default-features = true } -thiserror = { workspace = true } [features] runtime-benchmarks = [ diff --git a/substrate/bin/node/rpc/Cargo.toml b/substrate/bin/node/rpc/Cargo.toml index c8b20287650b..02f5d9a4a702 100644 --- a/substrate/bin/node/rpc/Cargo.toml +++ b/substrate/bin/node/rpc/Cargo.toml @@ -17,15 +17,16 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] jsonrpsee = { features = ["server"], workspace = true } -mmr-rpc = { workspace = true, default-features = true } node-primitives = { workspace = true, default-features = true } pallet-transaction-payment-rpc = { workspace = true, default-features = true } +mmr-rpc = { workspace = true, default-features = true } sc-chain-spec = { workspace = true, default-features = true } sc-client-api = { workspace = true, default-features = true } sc-consensus-babe = { workspace = true, default-features = true } sc-consensus-babe-rpc = { workspace = true, default-features = true } sc-consensus-beefy = { workspace = true, default-features = true } sc-consensus-beefy-rpc = { workspace = true, default-features = true } +sp-consensus-beefy = { workspace = true, default-features = true } sc-consensus-grandpa = { workspace = true, default-features = true } sc-consensus-grandpa-rpc = { workspace = true, default-features = true } sc-mixnet = { workspace = true, default-features = true } @@ -33,14 +34,13 @@ sc-rpc = { workspace = true, default-features = true } sc-sync-state-rpc = { workspace = true, default-features = true } sc-transaction-pool-api = { workspace = true, default-features = true } sp-api = { workspace = true, default-features = true } -sp-application-crypto = { workspace = true, default-features = true } sp-block-builder = { workspace = true, default-features = true } sp-blockchain = { workspace = true, default-features = true } sp-consensus = { workspace = true, default-features = true } sp-consensus-babe = { workspace = true, default-features = true } -sp-consensus-beefy = { workspace = true, default-features = true } sp-keystore = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } +sp-application-crypto = { workspace = true, default-features = true } sp-statement-store = { workspace = true, default-features = true } substrate-frame-rpc-system = { workspace = true, default-features = true } substrate-state-trie-migration-rpc = { workspace = true, default-features = true } diff --git a/substrate/bin/node/runtime/Cargo.toml b/substrate/bin/node/runtime/Cargo.toml index 6d377cc92cce..3ad6315561d0 100644 --- a/substrate/bin/node/runtime/Cargo.toml +++ b/substrate/bin/node/runtime/Cargo.toml @@ -23,11 +23,11 @@ codec = { features = [ "derive", "max-encoded-len", ], workspace = true } -log = { workspace = true } scale-info = { features = ["derive", "serde"], workspace = true } +static_assertions = { workspace = true, default-features = true } +log = { workspace = true } serde_json = { features = ["alloc", "arbitrary_precision"], workspace = true } sp-debug-derive = { workspace = true, features = ["force-debug"] } -static_assertions = { workspace = true, default-features = true } # pallet-asset-conversion: turn on "num-traits" feature primitive-types = { features = ["codec", "num-traits", "scale-info"], workspace = true } diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs index 45ae378cc00e..5a2ff3ceb7f6 100644 --- a/substrate/bin/node/runtime/src/lib.rs +++ b/substrate/bin/node/runtime/src/lib.rs @@ -54,7 +54,7 @@ use frame_support::{ }, tokens::{ imbalance::ResolveAssetTo, nonfungibles_v2::Inspect, pay::PayAssetFromAccount, - GetSalary, PayFromAccount, + Fortitude::Polite, GetSalary, PayFromAccount, Preservation::Preserve, }, AsEnsureOriginWithArg, ConstBool, ConstU128, ConstU16, ConstU32, ConstU64, Contains, Currency, EitherOfDiverse, EnsureOriginWithArg, EqualPrivilegeOnly, Imbalance, InsideBoth, @@ -86,7 +86,6 @@ use pallet_nis::WithMaximumOf; use pallet_nomination_pools::PoolId; use pallet_revive::{evm::runtime::EthExtra, AddressMapper}; use pallet_session::historical as pallet_session_historical; -use sp_core::U256; // Can't use `FungibleAdapter` here until Treasury pallet migrates to fungibles // use pallet_broker::TaskId; @@ -392,7 +391,6 @@ impl pallet_multisig::Config for Runtime { type DepositFactor = DepositFactor; type MaxSignatories = ConstU32<100>; type WeightInfo = pallet_multisig::weights::SubstrateWeight; - type BlockNumberProvider = frame_system::Pallet; } parameter_types! { @@ -480,7 +478,6 @@ impl pallet_proxy::Config for Runtime { type CallHasher = BlakeTwo256; type AnnouncementDepositBase = AnnouncementDepositBase; type AnnouncementDepositFactor = AnnouncementDepositFactor; - type BlockNumberProvider = frame_system::Pallet; } parameter_types! { @@ -744,7 +741,7 @@ impl pallet_staking::Config for Runtime { type EventListeners = NominationPools; type WeightInfo = pallet_staking::weights::SubstrateWeight; type BenchmarkingConfig = StakingBenchmarkingConfig; - type DisablingStrategy = pallet_staking::UpToLimitWithReEnablingDisablingStrategy; + type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; } impl pallet_fast_unstake::Config for Runtime { @@ -1653,7 +1650,6 @@ impl pallet_recovery::Config for Runtime { type RuntimeEvent = RuntimeEvent; type WeightInfo = pallet_recovery::weights::SubstrateWeight; type RuntimeCall = RuntimeCall; - type BlockNumberProvider = System; type Currency = Balances; type ConfigDepositBase = ConfigDepositBase; type FriendDepositFactor = FriendDepositFactor; @@ -2051,7 +2047,6 @@ impl pallet_nfts::Config for Runtime { type Helper = (); type CreateOrigin = AsEnsureOriginWithArg>; type Locker = (); - type BlockNumberProvider = frame_system::Pallet; } impl pallet_transaction_storage::Config for Runtime { @@ -3210,8 +3205,10 @@ impl_runtime_apis! { impl pallet_revive::ReviveApi for Runtime { - fn balance(address: H160) -> U256 { - Revive::evm_balance(&address) + fn balance(address: H160) -> Balance { + use frame_support::traits::fungible::Inspect; + let account = ::AddressMapper::to_account_id(&address); + Balances::reducible_balance(&account, Preserve, Polite) } fn nonce(address: H160) -> Nonce { @@ -3219,9 +3216,18 @@ impl_runtime_apis! { System::account_nonce(account) } - fn eth_transact(tx: pallet_revive::evm::GenericTransaction) -> Result, pallet_revive::EthTransactError> + fn eth_transact( + from: H160, + dest: Option, + value: Balance, + input: Vec, + gas_limit: Option, + storage_deposit_limit: Option, + ) -> pallet_revive::EthContractResult { + use pallet_revive::AddressMapper; let blockweights: BlockWeights = ::BlockWeights::get(); + let origin = ::AddressMapper::to_account_id(&from); let encoded_size = |pallet_call| { let call = RuntimeCall::Revive(pallet_call); @@ -3230,9 +3236,15 @@ impl_runtime_apis! { }; Revive::bare_eth_transact( - tx, - blockweights.max_block, + origin, + dest, + value, + input, + gas_limit.unwrap_or(blockweights.max_block), + storage_deposit_limit.unwrap_or(u128::MAX), encoded_size, + pallet_revive::DebugInfo::UnsafeDebug, + pallet_revive::CollectEvents::UnsafeCollect, ) } @@ -3249,7 +3261,7 @@ impl_runtime_apis! { dest, value, gas_limit.unwrap_or(RuntimeBlockWeights::get().max_block), - pallet_revive::DepositLimit::Balance(storage_deposit_limit.unwrap_or(u128::MAX)), + storage_deposit_limit.unwrap_or(u128::MAX), input_data, pallet_revive::DebugInfo::UnsafeDebug, pallet_revive::CollectEvents::UnsafeCollect, @@ -3270,7 +3282,7 @@ impl_runtime_apis! { RuntimeOrigin::signed(origin), value, gas_limit.unwrap_or(RuntimeBlockWeights::get().max_block), - pallet_revive::DepositLimit::Balance(storage_deposit_limit.unwrap_or(u128::MAX)), + storage_deposit_limit.unwrap_or(u128::MAX), code, data, salt, diff --git a/substrate/bin/node/testing/Cargo.toml b/substrate/bin/node/testing/Cargo.toml index 13477a172fb8..16112386ad7c 100644 --- a/substrate/bin/node/testing/Cargo.toml +++ b/substrate/bin/node/testing/Cargo.toml @@ -17,26 +17,27 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { workspace = true, default-features = true } -frame-metadata-hash-extension = { workspace = true, default-features = true } -frame-system = { workspace = true, default-features = true } fs_extra = { workspace = true } futures = { workspace = true } -kitchensink-runtime = { workspace = true } log = { workspace = true, default-features = true } +tempfile = { workspace = true } +frame-metadata-hash-extension = { workspace = true, default-features = true } +frame-system = { workspace = true, default-features = true } node-cli = { workspace = true } node-primitives = { workspace = true, default-features = true } +kitchensink-runtime = { workspace = true } pallet-asset-conversion = { workspace = true, default-features = true } -pallet-asset-conversion-tx-payment = { workspace = true, default-features = true } -pallet-asset-tx-payment = { workspace = true, default-features = true } pallet-assets = { workspace = true, default-features = true } pallet-revive = { workspace = true, default-features = true } +pallet-asset-conversion-tx-payment = { workspace = true, default-features = true } +pallet-asset-tx-payment = { workspace = true, default-features = true } pallet-skip-feeless-payment = { workspace = true, default-features = true } sc-block-builder = { workspace = true, default-features = true } sc-client-api = { workspace = true, default-features = true } sc-client-db = { features = ["rocksdb"], workspace = true, default-features = true } sc-consensus = { workspace = true, default-features = true } sc-executor = { workspace = true, default-features = true } -sc-service = { features = ["rocksdb"], workspace = true, default-features = true } +sc-service = { features = ["rocksdb", "test-helpers"], workspace = true, default-features = true } sp-api = { workspace = true, default-features = true } sp-block-builder = { workspace = true, default-features = true } sp-blockchain = { workspace = true, default-features = true } @@ -49,4 +50,3 @@ sp-keyring = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } sp-timestamp = { workspace = true } substrate-test-client = { workspace = true } -tempfile = { workspace = true } diff --git a/substrate/bin/node/testing/src/bench.rs b/substrate/bin/node/testing/src/bench.rs index 35f041ef0445..3812524f0b1f 100644 --- a/substrate/bin/node/testing/src/bench.rs +++ b/substrate/bin/node/testing/src/bench.rs @@ -590,6 +590,7 @@ impl BenchKeyring { preamble: Preamble::Signed( sp_runtime::MultiAddress::Id(signed), signature, + 0, tx_ext, ), function: payload.0, @@ -601,8 +602,8 @@ impl BenchKeyring { function: xt.function, } .into(), - ExtrinsicFormat::General(ext_version, tx_ext) => generic::UncheckedExtrinsic { - preamble: sp_runtime::generic::Preamble::General(ext_version, tx_ext), + ExtrinsicFormat::General(tx_ext) => generic::UncheckedExtrinsic { + preamble: sp_runtime::generic::Preamble::General(0, tx_ext), function: xt.function, } .into(), diff --git a/substrate/bin/node/testing/src/keyring.rs b/substrate/bin/node/testing/src/keyring.rs index e5b0299f01a8..20497e85eab9 100644 --- a/substrate/bin/node/testing/src/keyring.rs +++ b/substrate/bin/node/testing/src/keyring.rs @@ -123,6 +123,7 @@ pub fn sign( preamble: sp_runtime::generic::Preamble::Signed( sp_runtime::MultiAddress::Id(signed), signature, + 0, tx_ext, ), function: payload.0, @@ -134,8 +135,8 @@ pub fn sign( function: xt.function, } .into(), - ExtrinsicFormat::General(ext_version, tx_ext) => generic::UncheckedExtrinsic { - preamble: sp_runtime::generic::Preamble::General(ext_version, tx_ext), + ExtrinsicFormat::General(tx_ext) => generic::UncheckedExtrinsic { + preamble: sp_runtime::generic::Preamble::General(0, tx_ext), function: xt.function, } .into(), diff --git a/substrate/bin/utils/chain-spec-builder/Cargo.toml b/substrate/bin/utils/chain-spec-builder/Cargo.toml index f3adc5682969..b71e935a918f 100644 --- a/substrate/bin/utils/chain-spec-builder/Cargo.toml +++ b/substrate/bin/utils/chain-spec-builder/Cargo.toml @@ -34,14 +34,14 @@ log = { workspace = true, default-features = true } sc-chain-spec = { features = [ "clap", ], workspace = true, default-features = true } -serde = { workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } +serde = { workspace = true, default-features = true } sp-tracing = { workspace = true, default-features = true } [dev-dependencies] +substrate-test-runtime = { workspace = true } cmd_lib = { workspace = true } docify = { workspace = true } -substrate-test-runtime = { workspace = true } [features] # `cargo build --feature=generate-readme` updates the `README.md` file. diff --git a/substrate/client/allocator/Cargo.toml b/substrate/client/allocator/Cargo.toml index c0ce640566b0..a8b3bdc864c9 100644 --- a/substrate/client/allocator/Cargo.toml +++ b/substrate/client/allocator/Cargo.toml @@ -18,6 +18,6 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] log = { workspace = true, default-features = true } +thiserror = { workspace = true } sp-core = { workspace = true, default-features = true } sp-wasm-interface = { workspace = true, default-features = true } -thiserror = { workspace = true } diff --git a/substrate/client/api/Cargo.toml b/substrate/client/api/Cargo.toml index fe961b4690fc..670c74684467 100644 --- a/substrate/client/api/Cargo.toml +++ b/substrate/client/api/Cargo.toml @@ -41,6 +41,6 @@ sp-storage = { workspace = true, default-features = true } sp-trie = { workspace = true, default-features = true } [dev-dependencies] +thiserror = { workspace = true } sp-test-primitives = { workspace = true } substrate-test-runtime = { workspace = true } -thiserror = { workspace = true } diff --git a/substrate/client/authority-discovery/Cargo.toml b/substrate/client/authority-discovery/Cargo.toml index ac1891451ec0..09381ec6b553 100644 --- a/substrate/client/authority-discovery/Cargo.toml +++ b/substrate/client/authority-discovery/Cargo.toml @@ -20,17 +20,18 @@ targets = ["x86_64-unknown-linux-gnu"] prost-build = { workspace = true } [dependencies] -async-trait = { workspace = true } codec = { workspace = true } futures = { workspace = true } futures-timer = { workspace = true } ip_network = { workspace = true } +libp2p = { features = ["ed25519", "kad"], workspace = true } +multihash = { workspace = true } linked_hash_set = { workspace = true } log = { workspace = true, default-features = true } -multihash = { workspace = true } -prometheus-endpoint = { workspace = true, default-features = true } prost = { workspace = true } rand = { workspace = true, default-features = true } +thiserror = { workspace = true } +prometheus-endpoint = { workspace = true, default-features = true } sc-client-api = { workspace = true, default-features = true } sc-network = { workspace = true, default-features = true } sc-network-types = { workspace = true, default-features = true } @@ -40,7 +41,7 @@ sp-blockchain = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } sp-keystore = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } -thiserror = { workspace = true } +async-trait = { workspace = true } [dev-dependencies] quickcheck = { workspace = true } diff --git a/substrate/client/authority-discovery/src/tests.rs b/substrate/client/authority-discovery/src/tests.rs index a73515ee00d2..acfd0e61de01 100644 --- a/substrate/client/authority-discovery/src/tests.rs +++ b/substrate/client/authority-discovery/src/tests.rs @@ -25,7 +25,7 @@ use crate::{ }; use futures::{channel::mpsc::channel, executor::LocalPool, task::LocalSpawn}; -use sc_network_types::ed25519; +use libp2p::identity::ed25519; use std::{collections::HashSet, sync::Arc}; use sc_network::{multiaddr::Protocol, Multiaddr, PeerId}; diff --git a/substrate/client/authority-discovery/src/worker.rs b/substrate/client/authority-discovery/src/worker.rs index 6630b7157d96..9319fbe6321e 100644 --- a/substrate/client/authority-discovery/src/worker.rs +++ b/substrate/client/authority-discovery/src/worker.rs @@ -34,8 +34,8 @@ use futures::{channel::mpsc, future, stream::Fuse, FutureExt, Stream, StreamExt} use addr_cache::AddrCache; use codec::{Decode, Encode}; use ip_network::IpNetwork; +use libp2p::kad::{PeerRecord, Record}; use linked_hash_set::LinkedHashSet; -use sc_network_types::kad::{Key, PeerRecord, Record}; use log::{debug, error, trace}; use prometheus_endpoint::{register, Counter, CounterVec, Gauge, Opts, U64}; @@ -677,15 +677,12 @@ where metrics.dht_event_received.with_label_values(&["put_record_req"]).inc(); } }, - DhtEvent::StartProvidingFailed(..) => {}, - DhtEvent::ProvidersFound(..) => {}, - DhtEvent::ProvidersNotFound(..) => {}, } } async fn handle_put_record_requested( &mut self, - record_key: Key, + record_key: KademliaKey, record_value: Vec, publisher: Option, expires: Option, @@ -946,7 +943,7 @@ where authority_id, new_record.creation_time, current_record_info.creation_time, ); self.network.put_record_to( - current_record_info.record.clone().into(), + current_record_info.record.clone(), new_record.peers_with_record.clone(), // If this is empty it means we received the answer from our node local // storage, so we need to update that as well. diff --git a/substrate/client/authority-discovery/src/worker/schema/tests.rs b/substrate/client/authority-discovery/src/worker/schema/tests.rs index 1dff1b93e06d..557fa9641f97 100644 --- a/substrate/client/authority-discovery/src/worker/schema/tests.rs +++ b/substrate/client/authority-discovery/src/worker/schema/tests.rs @@ -26,9 +26,9 @@ mod schema_v2 { use super::*; use codec::Encode; +use libp2p::identity::Keypair; use prost::Message; use sc_network::{Multiaddr, PeerId}; -use sc_network_types::ed25519::Keypair; #[test] fn v2_decodes_v1() { @@ -61,7 +61,7 @@ fn v2_decodes_v1() { #[test] fn v1_decodes_v2() { - let peer_secret = Keypair::generate(); + let peer_secret = Keypair::generate_ed25519(); let peer_public = peer_secret.public(); let peer_id = peer_public.to_peer_id(); let multiaddress: Multiaddr = @@ -73,7 +73,7 @@ fn v1_decodes_v2() { let record_v2 = schema_v2::AuthorityRecord { addresses: vec_addresses.clone() }; let mut vec_record_v2 = vec![]; record_v2.encode(&mut vec_record_v2).unwrap(); - let vec_peer_public = peer_public.to_bytes().to_vec(); + let vec_peer_public = peer_public.encode_protobuf(); let peer_signature_v2 = PeerSignature { public_key: vec_peer_public, signature: vec_peer_signature }; let signed_record_v2 = SignedAuthorityRecord { @@ -97,7 +97,7 @@ fn v1_decodes_v2() { #[test] fn v1_decodes_v3() { - let peer_secret = Keypair::generate(); + let peer_secret = Keypair::generate_ed25519(); let peer_public = peer_secret.public(); let peer_id = peer_public.to_peer_id(); let multiaddress: Multiaddr = @@ -112,7 +112,7 @@ fn v1_decodes_v3() { }; let mut vec_record_v3 = vec![]; record_v3.encode(&mut vec_record_v3).unwrap(); - let vec_peer_public = peer_public.to_bytes().to_vec(); + let vec_peer_public = peer_public.encode_protobuf(); let peer_signature_v3 = PeerSignature { public_key: vec_peer_public, signature: vec_peer_signature }; let signed_record_v3 = SignedAuthorityRecord { @@ -136,7 +136,7 @@ fn v1_decodes_v3() { #[test] fn v3_decodes_v2() { - let peer_secret = Keypair::generate(); + let peer_secret = Keypair::generate_ed25519(); let peer_public = peer_secret.public(); let peer_id = peer_public.to_peer_id(); let multiaddress: Multiaddr = @@ -148,7 +148,7 @@ fn v3_decodes_v2() { let record_v2 = schema_v2::AuthorityRecord { addresses: vec_addresses.clone() }; let mut vec_record_v2 = vec![]; record_v2.encode(&mut vec_record_v2).unwrap(); - let vec_peer_public = peer_public.to_bytes().to_vec(); + let vec_peer_public = peer_public.encode_protobuf(); let peer_signature_v2 = schema_v2::PeerSignature { public_key: vec_peer_public, signature: vec_peer_signature }; let signed_record_v2 = schema_v2::SignedAuthorityRecord { diff --git a/substrate/client/authority-discovery/src/worker/tests.rs b/substrate/client/authority-discovery/src/worker/tests.rs index c14771585655..8018b5ea492d 100644 --- a/substrate/client/authority-discovery/src/worker/tests.rs +++ b/substrate/client/authority-discovery/src/worker/tests.rs @@ -30,14 +30,12 @@ use futures::{ sink::SinkExt, task::LocalSpawn, }; +use libp2p::{identity::SigningError, kad::record::Key as KademliaKey}; use prometheus_endpoint::prometheus::default_registry; + use sc_client_api::HeaderBackend; -use sc_network::{ - service::signature::{Keypair, SigningError}, - PublicKey, Signature, -}; +use sc_network::{service::signature::Keypair, Signature}; use sc_network_types::{ - kad::Key as KademliaKey, multiaddr::{Multiaddr, Protocol}, PeerId, }; @@ -180,8 +178,8 @@ impl NetworkSigner for TestNetwork { signature: &Vec, message: &Vec, ) -> std::result::Result { - let public_key = - PublicKey::try_decode_protobuf(&public_key).map_err(|error| error.to_string())?; + let public_key = libp2p::identity::PublicKey::try_decode_protobuf(&public_key) + .map_err(|error| error.to_string())?; let peer_id: PeerId = peer_id.into(); let remote: PeerId = public_key.to_peer_id().into(); @@ -231,18 +229,6 @@ impl NetworkDHTProvider for TestNetwork { .unbounded_send(TestNetworkEvent::StoreRecordCalled) .unwrap(); } - - fn start_providing(&self, _: KademliaKey) { - unimplemented!() - } - - fn stop_providing(&self, _: KademliaKey) { - unimplemented!() - } - - fn get_providers(&self, _: KademliaKey) { - unimplemented!() - } } impl NetworkStateInfo for TestNetwork { diff --git a/substrate/client/basic-authorship/src/basic_authorship.rs b/substrate/client/basic-authorship/src/basic_authorship.rs index 2096af1c25bb..79e6fddae99f 100644 --- a/substrate/client/basic-authorship/src/basic_authorship.rs +++ b/substrate/client/basic-authorship/src/basic_authorship.rs @@ -483,7 +483,7 @@ where match sc_block_builder::BlockBuilder::push(block_builder, pending_tx_data) { Ok(()) => { transaction_pushed = true; - trace!(target: LOG_TARGET, "[{:?}] Pushed to the block.", pending_tx_hash); + debug!(target: LOG_TARGET, "[{:?}] Pushed to the block.", pending_tx_hash); }, Err(ApplyExtrinsicFailed(Validity(e))) if e.exhausted_resources() => { pending_iterator.report_invalid(&pending_tx); @@ -565,22 +565,20 @@ where if log::log_enabled!(log::Level::Info) { info!( - "🎁 Prepared block for proposing at {} ({} ms) hash: {:?}; parent_hash: {}; end: {:?}; extrinsics_count: {}", + "🎁 Prepared block for proposing at {} ({} ms) [hash: {:?}; parent_hash: {}; extrinsics_count: {}", block.header().number(), block_took.as_millis(), ::Hash::from(block.header().hash()), block.header().parent_hash(), - end_reason, extrinsics.len() ) - } else if log::log_enabled!(log::Level::Trace) { - trace!( - "🎁 Prepared block for proposing at {} ({} ms) hash: {:?}; parent_hash: {}; end: {:?}; {extrinsics_summary}", + } else if log::log_enabled!(log::Level::Debug) { + debug!( + "🎁 Prepared block for proposing at {} ({} ms) [hash: {:?}; parent_hash: {}; {extrinsics_summary}", block.header().number(), block_took.as_millis(), ::Hash::from(block.header().hash()), block.header().parent_hash(), - end_reason ); } @@ -910,8 +908,8 @@ mod tests { let extrinsics_num = 5; let extrinsics = std::iter::once( Transfer { - from: Sr25519Keyring::Alice.into(), - to: Sr25519Keyring::Bob.into(), + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Bob.into(), amount: 100, nonce: 0, } @@ -1016,7 +1014,7 @@ mod tests { }; let huge = |who| { ExtrinsicBuilder::new_fill_block(Perbill::from_parts(HUGE)) - .signer(Sr25519Keyring::numeric(who)) + .signer(AccountKeyring::numeric(who)) .build() }; @@ -1082,13 +1080,13 @@ mod tests { let tiny = |who| { ExtrinsicBuilder::new_fill_block(Perbill::from_parts(TINY)) - .signer(Sr25519Keyring::numeric(who)) + .signer(AccountKeyring::numeric(who)) .nonce(1) .build() }; let huge = |who| { ExtrinsicBuilder::new_fill_block(Perbill::from_parts(HUGE)) - .signer(Sr25519Keyring::numeric(who)) + .signer(AccountKeyring::numeric(who)) .build() }; diff --git a/substrate/client/basic-authorship/src/lib.rs b/substrate/client/basic-authorship/src/lib.rs index 13c75fd08c3c..adea7a3571dd 100644 --- a/substrate/client/basic-authorship/src/lib.rs +++ b/substrate/client/basic-authorship/src/lib.rs @@ -26,7 +26,7 @@ //! # use sp_runtime::generic::BlockId; //! # use std::{sync::Arc, time::Duration}; //! # use substrate_test_runtime_client::{ -//! # runtime::Transfer, Sr25519Keyring, +//! # runtime::Transfer, AccountKeyring, //! # DefaultTestClientBuilderExt, TestClientBuilderExt, //! # }; //! # use sc_transaction_pool::{BasicPool, FullChainApi}; diff --git a/substrate/client/block-builder/Cargo.toml b/substrate/client/block-builder/Cargo.toml index c61a5a7ad3c1..08392e18227f 100644 --- a/substrate/client/block-builder/Cargo.toml +++ b/substrate/client/block-builder/Cargo.toml @@ -23,9 +23,9 @@ sp-api = { workspace = true, default-features = true } sp-block-builder = { workspace = true, default-features = true } sp-blockchain = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } +sp-trie = { workspace = true, default-features = true } sp-inherents = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } -sp-trie = { workspace = true, default-features = true } [dev-dependencies] sp-state-machine = { workspace = true, default-features = true } diff --git a/substrate/client/chain-spec/Cargo.toml b/substrate/client/chain-spec/Cargo.toml index f63ff6c64447..2e885240936f 100644 --- a/substrate/client/chain-spec/Cargo.toml +++ b/substrate/client/chain-spec/Cargo.toml @@ -16,31 +16,31 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -array-bytes = { workspace = true, default-features = true } clap = { features = ["derive"], optional = true, workspace = true } codec = { features = ["derive"], workspace = true } -docify = { workspace = true } -log = { workspace = true } memmap2 = { workspace = true } -sc-chain-spec-derive = { workspace = true, default-features = true } +serde = { features = ["derive"], workspace = true, default-features = true } +serde_json = { workspace = true, default-features = true } sc-client-api = { workspace = true, default-features = true } +sc-chain-spec-derive = { workspace = true, default-features = true } sc-executor = { workspace = true, default-features = true } +sp-io = { workspace = true } sc-network = { workspace = true, default-features = true } sc-telemetry = { workspace = true, default-features = true } -serde = { features = ["derive"], workspace = true, default-features = true } -serde_json = { workspace = true, default-features = true } sp-blockchain = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } sp-crypto-hashing = { workspace = true, default-features = true } sp-genesis-builder = { workspace = true, default-features = true } -sp-io = { workspace = true } sp-runtime = { workspace = true, default-features = true } sp-state-machine = { workspace = true, default-features = true } +log = { workspace = true } sp-tracing = { workspace = true, default-features = true } +array-bytes = { workspace = true, default-features = true } +docify = { workspace = true } [dev-dependencies] -regex = { workspace = true } +substrate-test-runtime = { workspace = true } +sp-keyring = { workspace = true, default-features = true } sp-application-crypto = { features = ["serde"], workspace = true } sp-consensus-babe = { features = ["serde"], workspace = true } -sp-keyring = { workspace = true, default-features = true } -substrate-test-runtime = { workspace = true } +regex = { workspace = true } diff --git a/substrate/client/chain-spec/src/chain_spec.rs b/substrate/client/chain-spec/src/chain_spec.rs index fa161f1202ab..aa3c1ba3e6f1 100644 --- a/substrate/client/chain-spec/src/chain_spec.rs +++ b/substrate/client/chain-spec/src/chain_spec.rs @@ -782,7 +782,7 @@ mod tests { use serde_json::{from_str, json, Value}; use sp_application_crypto::Ss58Codec; use sp_core::storage::well_known_keys; - use sp_keyring::Sr25519Keyring; + use sp_keyring::AccountKeyring; type TestSpec = ChainSpec; @@ -924,8 +924,8 @@ mod tests { }, "substrateTest": { "authorities": [ - Sr25519Keyring::Ferdie.public().to_ss58check(), - Sr25519Keyring::Alice.public().to_ss58check() + AccountKeyring::Ferdie.public().to_ss58check(), + AccountKeyring::Alice.public().to_ss58check() ], } })) @@ -980,8 +980,8 @@ mod tests { }, "substrateTest": { "authorities": [ - Sr25519Keyring::Ferdie.public().to_ss58check(), - Sr25519Keyring::Alice.public().to_ss58check() + AccountKeyring::Ferdie.public().to_ss58check(), + AccountKeyring::Alice.public().to_ss58check() ], } })) @@ -1083,8 +1083,8 @@ mod tests { "invalid_pallet": {}, "substrateTest": { "authorities": [ - Sr25519Keyring::Ferdie.public().to_ss58check(), - Sr25519Keyring::Alice.public().to_ss58check() + AccountKeyring::Ferdie.public().to_ss58check(), + AccountKeyring::Alice.public().to_ss58check() ], } })) diff --git a/substrate/client/cli/Cargo.toml b/substrate/client/cli/Cargo.toml index d7b4489b6cc5..f0b9f8f9b905 100644 --- a/substrate/client/cli/Cargo.toml +++ b/substrate/client/cli/Cargo.toml @@ -19,13 +19,13 @@ targets = ["x86_64-unknown-linux-gnu"] array-bytes = { workspace = true, default-features = true } chrono = { workspace = true } clap = { features = ["derive", "string", "wrap_help"], workspace = true } -codec = { workspace = true, default-features = true } fdlimit = { workspace = true } futures = { workspace = true } itertools = { workspace = true } libp2p-identity = { features = ["ed25519", "peerid"], workspace = true } log = { workspace = true, default-features = true } names = { workspace = true } +codec = { workspace = true, default-features = true } rand = { workspace = true, default-features = true } regex = { workspace = true } rpassword = { workspace = true } @@ -34,6 +34,7 @@ serde_json = { workspace = true, default-features = true } thiserror = { workspace = true } # personal fork here as workaround for: https://github.com/rust-bitcoin/rust-bip39/pull/64 bip39 = { package = "parity-bip39", version = "2.0.1", features = ["rand"] } +tokio = { features = ["parking_lot", "rt-multi-thread", "signal"], workspace = true, default-features = true } sc-client-api = { workspace = true, default-features = true } sc-client-db = { workspace = true } sc-keystore = { workspace = true, default-features = true } @@ -51,12 +52,11 @@ sp-keystore = { workspace = true, default-features = true } sp-panic-handler = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } sp-version = { workspace = true, default-features = true } -tokio = { features = ["parking_lot", "rt-multi-thread", "signal"], workspace = true, default-features = true } [dev-dependencies] +tempfile = { workspace = true } futures-timer = { workspace = true } sp-tracing = { workspace = true, default-features = true } -tempfile = { workspace = true } [features] default = ["rocksdb"] diff --git a/substrate/client/cli/src/params/import_params.rs b/substrate/client/cli/src/params/import_params.rs index e4b8b9644feb..add7cb4f8505 100644 --- a/substrate/client/cli/src/params/import_params.rs +++ b/substrate/client/cli/src/params/import_params.rs @@ -78,13 +78,21 @@ pub struct ImportParams { /// Specify the state cache size. /// /// Providing `0` will disable the cache. - #[arg(long, value_name = "Bytes", default_value_t = 1024 * 1024 * 1024)] + #[arg(long, value_name = "Bytes", default_value_t = 67108864)] pub trie_cache_size: usize, + + /// DEPRECATED: switch to `--trie-cache-size`. + #[arg(long)] + state_cache_size: Option, } impl ImportParams { /// Specify the trie cache maximum size. pub fn trie_cache_maximum_size(&self) -> Option { + if self.state_cache_size.is_some() { + eprintln!("`--state-cache-size` was deprecated. Please switch to `--trie-cache-size`."); + } + if self.trie_cache_size == 0 { None } else { diff --git a/substrate/client/cli/src/params/shared_params.rs b/substrate/client/cli/src/params/shared_params.rs index e0c52deb44ca..465372fba17d 100644 --- a/substrate/client/cli/src/params/shared_params.rs +++ b/substrate/client/cli/src/params/shared_params.rs @@ -33,12 +33,10 @@ pub struct SharedParams { /// Specify the development chain. /// - /// This flag sets `--chain=dev`, `--force-authoring`, `--rpc-cors=all`, `--alice`, and `--tmp` - /// flags, unless explicitly overridden. It also disables local peer discovery (see `--no-mdns` - /// and `--discover-local`). With this flag some nodes might start with manual seal, producing - /// blocks at certain events (e.g. `polkadot-omni-node`, which produces blocks at certain - /// intervals dictated by `--dev-block-time`). - #[arg(long)] + /// This flag sets `--chain=dev`, `--force-authoring`, `--rpc-cors=all`, + /// `--alice`, and `--tmp` flags, unless explicitly overridden. + /// It also disables local peer discovery (see --no-mdns and --discover-local) + #[arg(long, conflicts_with_all = &["chain"])] pub dev: bool, /// Specify custom base path. @@ -111,8 +109,12 @@ impl SharedParams { pub fn chain_id(&self, is_dev: bool) -> String { match self.chain { Some(ref chain) => chain.clone(), - None if is_dev => "dev".into(), - _ => "".into(), + None => + if is_dev { + "dev".into() + } else { + "".into() + }, } } diff --git a/substrate/client/consensus/aura/Cargo.toml b/substrate/client/consensus/aura/Cargo.toml index 6af673617118..98e8ad676be3 100644 --- a/substrate/client/consensus/aura/Cargo.toml +++ b/substrate/client/consensus/aura/Cargo.toml @@ -20,6 +20,7 @@ async-trait = { workspace = true } codec = { workspace = true, default-features = true } futures = { workspace = true } log = { workspace = true, default-features = true } +thiserror = { workspace = true } prometheus-endpoint = { workspace = true, default-features = true } sc-block-builder = { workspace = true, default-features = true } sc-client-api = { workspace = true, default-features = true } @@ -37,10 +38,10 @@ sp-core = { workspace = true, default-features = true } sp-inherents = { workspace = true, default-features = true } sp-keystore = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } -thiserror = { workspace = true } [dev-dependencies] parking_lot = { workspace = true, default-features = true } +tempfile = { workspace = true } sc-keystore = { workspace = true, default-features = true } sc-network = { workspace = true, default-features = true } sc-network-test = { workspace = true } @@ -48,5 +49,4 @@ sp-keyring = { workspace = true, default-features = true } sp-timestamp = { workspace = true, default-features = true } sp-tracing = { workspace = true, default-features = true } substrate-test-runtime-client = { workspace = true } -tempfile = { workspace = true } tokio = { workspace = true, default-features = true } diff --git a/substrate/client/consensus/babe/Cargo.toml b/substrate/client/consensus/babe/Cargo.toml index 305409b80c78..af55e72a9b7e 100644 --- a/substrate/client/consensus/babe/Cargo.toml +++ b/substrate/client/consensus/babe/Cargo.toml @@ -19,13 +19,14 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] async-trait = { workspace = true } codec = { features = ["derive"], workspace = true, default-features = true } -fork-tree = { workspace = true, default-features = true } futures = { workspace = true } log = { workspace = true, default-features = true } num-bigint = { workspace = true } num-rational = { workspace = true } num-traits = { workspace = true, default-features = true } parking_lot = { workspace = true, default-features = true } +thiserror = { workspace = true } +fork-tree = { workspace = true, default-features = true } prometheus-endpoint = { workspace = true, default-features = true } sc-client-api = { workspace = true, default-features = true } sc-consensus = { workspace = true, default-features = true } @@ -45,12 +46,11 @@ sp-crypto-hashing = { workspace = true, default-features = true } sp-inherents = { workspace = true, default-features = true } sp-keystore = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } -thiserror = { workspace = true } [dev-dependencies] sc-block-builder = { workspace = true, default-features = true } -sc-network-test = { workspace = true } sp-keyring = { workspace = true, default-features = true } +sc-network-test = { workspace = true } sp-timestamp = { workspace = true, default-features = true } sp-tracing = { workspace = true, default-features = true } substrate-test-runtime-client = { workspace = true } diff --git a/substrate/client/consensus/babe/rpc/Cargo.toml b/substrate/client/consensus/babe/rpc/Cargo.toml index 3e3834189938..ce5b1baec0b5 100644 --- a/substrate/client/consensus/babe/rpc/Cargo.toml +++ b/substrate/client/consensus/babe/rpc/Cargo.toml @@ -16,12 +16,13 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -futures = { workspace = true } jsonrpsee = { features = ["client-core", "macros", "server-core"], workspace = true } +futures = { workspace = true } +serde = { features = ["derive"], workspace = true, default-features = true } +thiserror = { workspace = true } sc-consensus-babe = { workspace = true, default-features = true } sc-consensus-epochs = { workspace = true, default-features = true } sc-rpc-api = { workspace = true, default-features = true } -serde = { features = ["derive"], workspace = true, default-features = true } sp-api = { workspace = true, default-features = true } sp-application-crypto = { workspace = true, default-features = true } sp-blockchain = { workspace = true, default-features = true } @@ -30,13 +31,12 @@ sp-consensus-babe = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } sp-keystore = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } -thiserror = { workspace = true } [dev-dependencies] +serde_json = { workspace = true, default-features = true } +tokio = { workspace = true, default-features = true } sc-consensus = { workspace = true, default-features = true } sc-keystore = { workspace = true, default-features = true } sc-transaction-pool-api = { workspace = true, default-features = true } -serde_json = { workspace = true, default-features = true } sp-keyring = { workspace = true, default-features = true } substrate-test-runtime-client = { workspace = true } -tokio = { workspace = true, default-features = true } diff --git a/substrate/client/consensus/beefy/Cargo.toml b/substrate/client/consensus/beefy/Cargo.toml index bfe7e2c3d5dc..900a44b95e04 100644 --- a/substrate/client/consensus/beefy/Cargo.toml +++ b/substrate/client/consensus/beefy/Cargo.toml @@ -20,6 +20,8 @@ fnv = { workspace = true } futures = { workspace = true } log = { workspace = true, default-features = true } parking_lot = { workspace = true, default-features = true } +thiserror = { workspace = true } +wasm-timer = { workspace = true } prometheus-endpoint = { workspace = true, default-features = true } sc-client-api = { workspace = true, default-features = true } sc-consensus = { workspace = true, default-features = true } @@ -38,20 +40,18 @@ sp-core = { workspace = true, default-features = true } sp-crypto-hashing = { workspace = true, default-features = true } sp-keystore = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } -thiserror = { workspace = true } tokio = { workspace = true, default-features = true } -wasm-timer = { workspace = true } [dev-dependencies] +serde = { workspace = true, default-features = true } +tempfile = { workspace = true } sc-block-builder = { workspace = true, default-features = true } sc-network-test = { workspace = true } -serde = { workspace = true, default-features = true } sp-consensus-grandpa = { workspace = true, default-features = true } sp-keyring = { workspace = true, default-features = true } sp-mmr-primitives = { workspace = true, default-features = true } sp-tracing = { workspace = true, default-features = true } substrate-test-runtime-client = { workspace = true } -tempfile = { workspace = true } [features] # This feature adds BLS crypto primitives. It should not be used in production since diff --git a/substrate/client/consensus/beefy/rpc/Cargo.toml b/substrate/client/consensus/beefy/rpc/Cargo.toml index f8f24250ad93..e1956dacf396 100644 --- a/substrate/client/consensus/beefy/rpc/Cargo.toml +++ b/substrate/client/consensus/beefy/rpc/Cargo.toml @@ -17,17 +17,17 @@ futures = { workspace = true } jsonrpsee = { features = ["client-core", "macros", "server-core"], workspace = true } log = { workspace = true, default-features = true } parking_lot = { workspace = true, default-features = true } -sc-consensus-beefy = { workspace = true, default-features = true } -sc-rpc = { workspace = true, default-features = true } serde = { features = ["derive"], workspace = true, default-features = true } -sp-application-crypto = { workspace = true, default-features = true } +thiserror = { workspace = true } +sc-consensus-beefy = { workspace = true, default-features = true } sp-consensus-beefy = { workspace = true, default-features = true } +sc-rpc = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } -thiserror = { workspace = true } +sp-application-crypto = { workspace = true, default-features = true } [dev-dependencies] -sc-rpc = { features = ["test-helpers"], workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } +sc-rpc = { features = ["test-helpers"], workspace = true, default-features = true } substrate-test-runtime-client = { workspace = true } tokio = { features = ["macros"], workspace = true, default-features = true } diff --git a/substrate/client/consensus/common/Cargo.toml b/substrate/client/consensus/common/Cargo.toml index 1b0f799f81bc..77cd50ad784b 100644 --- a/substrate/client/consensus/common/Cargo.toml +++ b/substrate/client/consensus/common/Cargo.toml @@ -21,18 +21,18 @@ futures = { features = ["thread-pool"], workspace = true } log = { workspace = true, default-features = true } mockall = { workspace = true } parking_lot = { workspace = true, default-features = true } +serde = { features = ["derive"], workspace = true, default-features = true } +thiserror = { workspace = true } prometheus-endpoint = { workspace = true, default-features = true } sc-client-api = { workspace = true, default-features = true } sc-network-types = { workspace = true, default-features = true } sc-utils = { workspace = true, default-features = true } -serde = { features = ["derive"], workspace = true, default-features = true } sp-api = { workspace = true, default-features = true } sp-blockchain = { workspace = true, default-features = true } sp-consensus = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } sp-state-machine = { workspace = true, default-features = true } -thiserror = { workspace = true } [dev-dependencies] sp-test-primitives = { workspace = true } diff --git a/substrate/client/consensus/grandpa/Cargo.toml b/substrate/client/consensus/grandpa/Cargo.toml index f361fac54af7..65ba39d34c21 100644 --- a/substrate/client/consensus/grandpa/Cargo.toml +++ b/substrate/client/consensus/grandpa/Cargo.toml @@ -20,48 +20,48 @@ targets = ["x86_64-unknown-linux-gnu"] ahash = { workspace = true } array-bytes = { workspace = true, default-features = true } async-trait = { workspace = true } -codec = { features = ["derive"], workspace = true, default-features = true } dyn-clone = { workspace = true } finality-grandpa = { features = ["derive-codec"], workspace = true, default-features = true } -fork-tree = { workspace = true, default-features = true } futures = { workspace = true } futures-timer = { workspace = true } log = { workspace = true, default-features = true } +codec = { features = ["derive"], workspace = true, default-features = true } parking_lot = { workspace = true, default-features = true } -prometheus-endpoint = { workspace = true, default-features = true } rand = { workspace = true, default-features = true } +serde_json = { workspace = true, default-features = true } +thiserror = { workspace = true } +fork-tree = { workspace = true, default-features = true } +prometheus-endpoint = { workspace = true, default-features = true } sc-block-builder = { workspace = true, default-features = true } sc-chain-spec = { workspace = true, default-features = true } sc-client-api = { workspace = true, default-features = true } +sc-transaction-pool-api = { workspace = true, default-features = true } sc-consensus = { workspace = true, default-features = true } sc-network = { workspace = true, default-features = true } -sc-network-common = { workspace = true, default-features = true } sc-network-gossip = { workspace = true, default-features = true } +sc-network-common = { workspace = true, default-features = true } sc-network-sync = { workspace = true, default-features = true } sc-network-types = { workspace = true, default-features = true } sc-telemetry = { workspace = true, default-features = true } -sc-transaction-pool-api = { workspace = true, default-features = true } sc-utils = { workspace = true, default-features = true } -serde_json = { workspace = true, default-features = true } sp-api = { workspace = true, default-features = true } sp-application-crypto = { workspace = true, default-features = true } sp-arithmetic = { workspace = true, default-features = true } sp-blockchain = { workspace = true, default-features = true } sp-consensus = { workspace = true, default-features = true } -sp-consensus-grandpa = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } sp-crypto-hashing = { workspace = true, default-features = true } +sp-consensus-grandpa = { workspace = true, default-features = true } sp-keystore = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } -thiserror = { workspace = true } [dev-dependencies] assert_matches = { workspace = true } finality-grandpa = { features = ["derive-codec", "test-helpers"], workspace = true, default-features = true } +serde = { workspace = true, default-features = true } +tokio = { workspace = true, default-features = true } sc-network = { workspace = true, default-features = true } sc-network-test = { workspace = true } -serde = { workspace = true, default-features = true } sp-keyring = { workspace = true, default-features = true } sp-tracing = { workspace = true, default-features = true } substrate-test-runtime-client = { workspace = true } -tokio = { workspace = true, default-features = true } diff --git a/substrate/client/consensus/grandpa/rpc/Cargo.toml b/substrate/client/consensus/grandpa/rpc/Cargo.toml index 1fb8bd9367c4..86513ac5df15 100644 --- a/substrate/client/consensus/grandpa/rpc/Cargo.toml +++ b/substrate/client/consensus/grandpa/rpc/Cargo.toml @@ -13,25 +13,25 @@ homepage.workspace = true workspace = true [dependencies] -codec = { features = ["derive"], workspace = true, default-features = true } finality-grandpa = { features = ["derive-codec"], workspace = true, default-features = true } futures = { workspace = true } jsonrpsee = { features = ["client-core", "macros", "server-core"], workspace = true } log = { workspace = true, default-features = true } +codec = { features = ["derive"], workspace = true, default-features = true } +serde = { features = ["derive"], workspace = true, default-features = true } +thiserror = { workspace = true } sc-client-api = { workspace = true, default-features = true } sc-consensus-grandpa = { workspace = true, default-features = true } sc-rpc = { workspace = true, default-features = true } -serde = { features = ["derive"], workspace = true, default-features = true } sp-blockchain = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } -thiserror = { workspace = true } [dev-dependencies] sc-block-builder = { workspace = true, default-features = true } sc-rpc = { features = ["test-helpers"], workspace = true, default-features = true } -sp-consensus-grandpa = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } +sp-consensus-grandpa = { workspace = true, default-features = true } sp-keyring = { workspace = true, default-features = true } substrate-test-runtime-client = { workspace = true } tokio = { features = ["macros"], workspace = true, default-features = true } diff --git a/substrate/client/consensus/grandpa/src/warp_proof.rs b/substrate/client/consensus/grandpa/src/warp_proof.rs index ada3a45e186e..a79581b1e9f1 100644 --- a/substrate/client/consensus/grandpa/src/warp_proof.rs +++ b/substrate/client/consensus/grandpa/src/warp_proof.rs @@ -174,20 +174,10 @@ impl WarpSyncProof { let header = blockchain.header(latest_justification.target().1)? .expect("header hash corresponds to a justification in db; must exist in db as well; qed."); - let proof = WarpSyncFragment { header, justification: latest_justification }; - - // Check for the limit. We remove some bytes from the maximum size, because we're - // only counting the size of the `WarpSyncFragment`s. The extra margin is here - // to leave room for rest of the data (the size of the `Vec` and the boolean). - if proofs_encoded_len + proof.encoded_size() >= MAX_WARP_SYNC_PROOF_SIZE - 50 { - false - } else { - proofs.push(proof); - true - } - } else { - true + proofs.push(WarpSyncFragment { header, justification: latest_justification }) } + + true }; let final_outcome = WarpSyncProof { proofs, is_finished }; diff --git a/substrate/client/consensus/manual-seal/Cargo.toml b/substrate/client/consensus/manual-seal/Cargo.toml index 4d232f7256cb..49111434015a 100644 --- a/substrate/client/consensus/manual-seal/Cargo.toml +++ b/substrate/client/consensus/manual-seal/Cargo.toml @@ -16,13 +16,15 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] +jsonrpsee = { features = ["client-core", "macros", "server-core"], workspace = true } assert_matches = { workspace = true } async-trait = { workspace = true } codec = { workspace = true, default-features = true } futures = { workspace = true } futures-timer = { workspace = true } -jsonrpsee = { features = ["client-core", "macros", "server-core"], workspace = true } log = { workspace = true, default-features = true } +serde = { features = ["derive"], workspace = true, default-features = true } +thiserror = { workspace = true } prometheus-endpoint = { workspace = true, default-features = true } sc-client-api = { workspace = true, default-features = true } sc-consensus = { workspace = true, default-features = true } @@ -31,7 +33,6 @@ sc-consensus-babe = { workspace = true, default-features = true } sc-consensus-epochs = { workspace = true, default-features = true } sc-transaction-pool = { workspace = true, default-features = true } sc-transaction-pool-api = { workspace = true, default-features = true } -serde = { features = ["derive"], workspace = true, default-features = true } sp-api = { workspace = true, default-features = true } sp-blockchain = { workspace = true, default-features = true } sp-consensus = { workspace = true, default-features = true } @@ -43,10 +44,9 @@ sp-inherents = { workspace = true, default-features = true } sp-keystore = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } sp-timestamp = { workspace = true, default-features = true } -thiserror = { workspace = true } [dev-dependencies] +tokio = { features = ["macros", "rt-multi-thread"], workspace = true, default-features = true } sc-basic-authorship = { workspace = true, default-features = true } substrate-test-runtime-client = { workspace = true } substrate-test-runtime-transaction-pool = { workspace = true } -tokio = { features = ["macros", "rt-multi-thread"], workspace = true, default-features = true } diff --git a/substrate/client/consensus/manual-seal/src/lib.rs b/substrate/client/consensus/manual-seal/src/lib.rs index af9bcc8d56d6..39f8f8609d8d 100644 --- a/substrate/client/consensus/manual-seal/src/lib.rs +++ b/substrate/client/consensus/manual-seal/src/lib.rs @@ -353,7 +353,7 @@ mod tests { use sp_inherents::InherentData; use sp_runtime::generic::{Digest, DigestItem}; use substrate_test_runtime_client::{ - DefaultTestClientBuilderExt, Sr25519Keyring::*, TestClientBuilder, TestClientBuilderExt, + AccountKeyring::*, DefaultTestClientBuilderExt, TestClientBuilder, TestClientBuilderExt, }; use substrate_test_runtime_transaction_pool::{uxt, TestApi}; diff --git a/substrate/client/consensus/pow/Cargo.toml b/substrate/client/consensus/pow/Cargo.toml index a051bf3f4779..bc89deb0b50d 100644 --- a/substrate/client/consensus/pow/Cargo.toml +++ b/substrate/client/consensus/pow/Cargo.toml @@ -22,6 +22,7 @@ futures = { workspace = true } futures-timer = { workspace = true } log = { workspace = true, default-features = true } parking_lot = { workspace = true, default-features = true } +thiserror = { workspace = true } prometheus-endpoint = { workspace = true, default-features = true } sc-client-api = { workspace = true, default-features = true } sc-consensus = { workspace = true, default-features = true } @@ -33,4 +34,3 @@ sp-consensus-pow = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } sp-inherents = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } -thiserror = { workspace = true } diff --git a/substrate/client/db/Cargo.toml b/substrate/client/db/Cargo.toml index 7e02558e007c..5725155579fc 100644 --- a/substrate/client/db/Cargo.toml +++ b/substrate/client/db/Cargo.toml @@ -39,15 +39,15 @@ sp-state-machine = { workspace = true, default-features = true } sp-trie = { workspace = true, default-features = true } [dev-dependencies] -array-bytes = { workspace = true, default-features = true } criterion = { workspace = true, default-features = true } -kitchensink-runtime = { workspace = true } kvdb-rocksdb = { workspace = true } -quickcheck = { workspace = true } rand = { workspace = true, default-features = true } +tempfile = { workspace = true } +quickcheck = { workspace = true } +kitchensink-runtime = { workspace = true } sp-tracing = { workspace = true, default-features = true } substrate-test-runtime-client = { workspace = true } -tempfile = { workspace = true } +array-bytes = { workspace = true, default-features = true } [features] default = [] diff --git a/substrate/client/db/src/lib.rs b/substrate/client/db/src/lib.rs index 092101945107..aaa1398a13bc 100644 --- a/substrate/client/db/src/lib.rs +++ b/substrate/client/db/src/lib.rs @@ -1180,7 +1180,7 @@ impl Backend { /// The second argument is the Column that stores the State. /// /// Should only be needed for benchmarking. - #[cfg(feature = "runtime-benchmarks")] + #[cfg(any(feature = "runtime-benchmarks"))] pub fn expose_db(&self) -> (Arc>, sp_database::ColumnId) { (self.storage.db.clone(), columns::STATE) } @@ -1188,7 +1188,7 @@ impl Backend { /// Expose the Storage that is used by this backend. /// /// Should only be needed for benchmarking. - #[cfg(feature = "runtime-benchmarks")] + #[cfg(any(feature = "runtime-benchmarks"))] pub fn expose_storage(&self) -> Arc>> { self.storage.clone() } @@ -1486,7 +1486,6 @@ impl Backend { .map(|(n, _)| n) .unwrap_or(Zero::zero()); let existing_header = number <= highest_leaf && self.blockchain.header(hash)?.is_some(); - let existing_body = pending_block.body.is_some(); // blocks are keyed by number + hash. let lookup_key = utils::number_and_hash_to_lookup_key(number, hash)?; @@ -1678,23 +1677,6 @@ impl Backend { children, ); } - } - - let should_check_block_gap = !existing_header || !existing_body; - - if should_check_block_gap { - let insert_new_gap = - |transaction: &mut Transaction, - new_gap: BlockGap>, - block_gap: &mut Option>>| { - transaction.set(columns::META, meta_keys::BLOCK_GAP, &new_gap.encode()); - transaction.set( - columns::META, - meta_keys::BLOCK_GAP_VERSION, - &BLOCK_GAP_CURRENT_VERSION.encode(), - ); - block_gap.replace(new_gap); - }; if let Some(mut gap) = block_gap { match gap.gap_type { @@ -1713,65 +1695,43 @@ impl Backend { block_gap = None; debug!(target: "db", "Removed block gap."); } else { - insert_new_gap(&mut transaction, gap, &mut block_gap); + block_gap = Some(gap); debug!(target: "db", "Update block gap. {block_gap:?}"); + transaction.set( + columns::META, + meta_keys::BLOCK_GAP, + &gap.encode(), + ); + transaction.set( + columns::META, + meta_keys::BLOCK_GAP_VERSION, + &BLOCK_GAP_CURRENT_VERSION.encode(), + ); } block_gap_updated = true; }, BlockGapType::MissingBody => { - // Gap increased when syncing the header chain during fast sync. - if number == gap.end + One::one() && !existing_body { - gap.end += One::one(); - utils::insert_number_to_key_mapping( - &mut transaction, - columns::KEY_LOOKUP, - number, - hash, - )?; - insert_new_gap(&mut transaction, gap, &mut block_gap); - debug!(target: "db", "Update block gap. {block_gap:?}"); - block_gap_updated = true; - // Gap decreased when downloading the full blocks. - } else if number == gap.start && existing_body { - gap.start += One::one(); - if gap.start > gap.end { - transaction.remove(columns::META, meta_keys::BLOCK_GAP); - transaction.remove(columns::META, meta_keys::BLOCK_GAP_VERSION); - block_gap = None; - debug!(target: "db", "Removed block gap."); - } else { - insert_new_gap(&mut transaction, gap, &mut block_gap); - debug!(target: "db", "Update block gap. {block_gap:?}"); - } - block_gap_updated = true; - } + unreachable!("Unsupported block gap. TODO: https://github.com/paritytech/polkadot-sdk/issues/5406") }, } - } else if operation.create_gap { - if number > best_num + One::one() && - self.blockchain.header(parent_hash)?.is_none() - { - let gap = BlockGap { - start: best_num + One::one(), - end: number - One::one(), - gap_type: BlockGapType::MissingHeaderAndBody, - }; - insert_new_gap(&mut transaction, gap, &mut block_gap); - block_gap_updated = true; - debug!(target: "db", "Detected block gap (warp sync) {block_gap:?}"); - } else if number == best_num + One::one() && - self.blockchain.header(parent_hash)?.is_some() && - !existing_body - { - let gap = BlockGap { - start: number, - end: number, - gap_type: BlockGapType::MissingBody, - }; - insert_new_gap(&mut transaction, gap, &mut block_gap); - block_gap_updated = true; - debug!(target: "db", "Detected block gap (fast sync) {block_gap:?}"); - } + } else if operation.create_gap && + number > best_num + One::one() && + self.blockchain.header(parent_hash)?.is_none() + { + let gap = BlockGap { + start: best_num + One::one(), + end: number - One::one(), + gap_type: BlockGapType::MissingHeaderAndBody, + }; + transaction.set(columns::META, meta_keys::BLOCK_GAP, &gap.encode()); + transaction.set( + columns::META, + meta_keys::BLOCK_GAP_VERSION, + &BLOCK_GAP_CURRENT_VERSION.encode(), + ); + block_gap = Some(gap); + block_gap_updated = true; + debug!(target: "db", "Detected block gap {block_gap:?}"); } } diff --git a/substrate/client/executor/Cargo.toml b/substrate/client/executor/Cargo.toml index 5cb4936e7534..ca78afd47068 100644 --- a/substrate/client/executor/Cargo.toml +++ b/substrate/client/executor/Cargo.toml @@ -38,21 +38,21 @@ sp-wasm-interface = { workspace = true, default-features = true } [dev-dependencies] array-bytes = { workspace = true, default-features = true } assert_matches = { workspace = true } -criterion = { workspace = true, default-features = true } -num_cpus = { workspace = true } -paste = { workspace = true, default-features = true } -regex = { workspace = true } +wat = { workspace = true } sc-runtime-test = { workspace = true } -sc-tracing = { workspace = true, default-features = true } +substrate-test-runtime = { workspace = true } sp-crypto-hashing = { workspace = true, default-features = true } -sp-maybe-compressed-blob = { workspace = true, default-features = true } -sp-runtime = { workspace = true, default-features = true } sp-state-machine = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-maybe-compressed-blob = { workspace = true, default-features = true } +sc-tracing = { workspace = true, default-features = true } sp-tracing = { workspace = true, default-features = true } -substrate-test-runtime = { workspace = true } -tempfile = { workspace = true } tracing-subscriber = { workspace = true } -wat = { workspace = true } +paste = { workspace = true, default-features = true } +regex = { workspace = true } +criterion = { workspace = true, default-features = true } +num_cpus = { workspace = true } +tempfile = { workspace = true } [[bench]] name = "bench" diff --git a/substrate/client/executor/common/Cargo.toml b/substrate/client/executor/common/Cargo.toml index aaf13a8ae768..58fb0b423f24 100644 --- a/substrate/client/executor/common/Cargo.toml +++ b/substrate/client/executor/common/Cargo.toml @@ -17,12 +17,12 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -polkavm = { workspace = true } +thiserror = { workspace = true } +wasm-instrument = { workspace = true, default-features = true } sc-allocator = { workspace = true, default-features = true } sp-maybe-compressed-blob = { workspace = true, default-features = true } sp-wasm-interface = { workspace = true, default-features = true } -thiserror = { workspace = true } -wasm-instrument = { workspace = true, default-features = true } +polkavm = { workspace = true } [features] default = [] diff --git a/substrate/client/executor/common/src/error.rs b/substrate/client/executor/common/src/error.rs index a94c1d493134..9d489eaae420 100644 --- a/substrate/client/executor/common/src/error.rs +++ b/substrate/client/executor/common/src/error.rs @@ -150,8 +150,8 @@ pub enum WasmError { Other(String), } -impl From for WasmError { - fn from(error: polkavm::program::ProgramParseError) -> Self { +impl From for WasmError { + fn from(error: polkavm::ProgramParseError) -> Self { WasmError::Other(error.to_string()) } } diff --git a/substrate/client/executor/common/src/runtime_blob/runtime_blob.rs b/substrate/client/executor/common/src/runtime_blob/runtime_blob.rs index e3f4b4ad9774..d689083b2f85 100644 --- a/substrate/client/executor/common/src/runtime_blob/runtime_blob.rs +++ b/substrate/client/executor/common/src/runtime_blob/runtime_blob.rs @@ -17,7 +17,6 @@ // along with this program. If not, see . use crate::{error::WasmError, wasm_runtime::HeapAllocStrategy}; -use polkavm::ArcBytes; use wasm_instrument::parity_wasm::elements::{ deserialize_buffer, serialize, ExportEntry, External, Internal, MemorySection, MemoryType, Module, Section, @@ -30,7 +29,7 @@ pub struct RuntimeBlob(BlobKind); #[derive(Clone)] enum BlobKind { WebAssembly(Module), - PolkaVM((polkavm::ProgramBlob, ArcBytes)), + PolkaVM(polkavm::ProgramBlob<'static>), } impl RuntimeBlob { @@ -53,9 +52,9 @@ impl RuntimeBlob { pub fn new(raw_blob: &[u8]) -> Result { if raw_blob.starts_with(b"PVM\0") { if crate::is_polkavm_enabled() { - let raw = ArcBytes::from(raw_blob); - let blob = polkavm::ProgramBlob::parse(raw.clone())?; - return Ok(Self(BlobKind::PolkaVM((blob, raw)))); + return Ok(Self(BlobKind::PolkaVM( + polkavm::ProgramBlob::parse(raw_blob)?.into_owned(), + ))); } else { return Err(WasmError::Other("expected a WASM runtime blob, found a PolkaVM runtime blob; set the 'SUBSTRATE_ENABLE_POLKAVM' environment variable to enable the experimental PolkaVM-based executor".to_string())); } @@ -193,7 +192,7 @@ impl RuntimeBlob { match self.0 { BlobKind::WebAssembly(raw_module) => serialize(raw_module).expect("serializing into a vec should succeed; qed"), - BlobKind::PolkaVM(ref blob) => blob.1.to_vec(), + BlobKind::PolkaVM(ref blob) => blob.as_bytes().to_vec(), } } @@ -228,7 +227,7 @@ impl RuntimeBlob { pub fn as_polkavm_blob(&self) -> Option<&polkavm::ProgramBlob> { match self.0 { BlobKind::WebAssembly(..) => None, - BlobKind::PolkaVM((ref blob, _)) => Some(blob), + BlobKind::PolkaVM(ref blob) => Some(blob), } } } diff --git a/substrate/client/executor/polkavm/src/lib.rs b/substrate/client/executor/polkavm/src/lib.rs index 134f9ea3d8c4..1bd72eb33d30 100644 --- a/substrate/client/executor/polkavm/src/lib.rs +++ b/substrate/client/executor/polkavm/src/lib.rs @@ -16,7 +16,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use polkavm::{CallError, Caller, Reg}; +use polkavm::{Caller, Reg}; use sc_executor_common::{ error::{Error, WasmError}, wasm_runtime::{AllocationStats, WasmInstance, WasmModule}, @@ -26,10 +26,10 @@ use sp_wasm_interface::{ }; #[repr(transparent)] -pub struct InstancePre(polkavm::InstancePre<(), String>); +pub struct InstancePre(polkavm::InstancePre<()>); #[repr(transparent)] -pub struct Instance(polkavm::Instance<(), String>); +pub struct Instance(polkavm::Instance<()>); impl WasmModule for InstancePre { fn new_instance(&self) -> Result, Error> { @@ -43,13 +43,11 @@ impl WasmInstance for Instance { name: &str, raw_data: &[u8], ) -> (Result, Error>, Option) { - let pc = match self.0.module().exports().find(|e| e.symbol() == name) { - Some(export) => export.program_counter(), - None => - return ( - Err(format!("cannot call into the runtime: export not found: '{name}'").into()), - None, - ), + let Some(method_index) = self.0.module().lookup_export(name) else { + return ( + Err(format!("cannot call into the runtime: export not found: '{name}'").into()), + None, + ); }; let Ok(raw_data_length) = u32::try_from(raw_data.len()) else { @@ -60,60 +58,56 @@ impl WasmInstance for Instance { }; // TODO: This will leak guest memory; find a better solution. + let mut state_args = polkavm::StateArgs::new(); - // Make sure that the memory is cleared... - if let Err(err) = self.0.reset_memory() { - return ( - Err(format!( - "call into the runtime method '{name}' failed: reset memory failed: {err}" - ) - .into()), - None, - ); - } + // Make sure the memory is cleared... + state_args.reset_memory(true); + // ...and allocate space for the input payload. + state_args.sbrk(raw_data_length); - // ... and allocate space for the input payload. - if let Err(err) = self.0.sbrk(raw_data_length) { - return ( - Err(format!( - "call into the runtime method '{name}' failed: reset memory failed: {err}" - ) - .into()), - None, - ); + match self.0.update_state(state_args) { + Ok(()) => {}, + Err(polkavm::ExecutionError::Trap(trap)) => { + return (Err(format!("call into the runtime method '{name}' failed: failed to prepare the guest's memory: {trap}").into()), None); + }, + Err(polkavm::ExecutionError::Error(error)) => { + return (Err(format!("call into the runtime method '{name}' failed: failed to prepare the guest's memory: {error}").into()), None); + }, + Err(polkavm::ExecutionError::OutOfGas) => unreachable!("gas metering is never enabled"), } // Grab the address of where the guest's heap starts; that's where we've just allocated // the memory for the input payload. let data_pointer = self.0.module().memory_map().heap_base(); - if let Err(err) = self.0.write_memory(data_pointer, raw_data) { - return (Err(format!("call into the runtime method '{name}': failed to write the input payload into guest memory: {err}").into()), None); + if let Err(error) = self.0.write_memory(data_pointer, raw_data) { + return (Err(format!("call into the runtime method '{name}': failed to write the input payload into guest memory: {error}").into()), None); } - match self.0.call_typed(&mut (), pc, (data_pointer, raw_data_length)) { + let mut state = (); + let mut call_args = polkavm::CallArgs::new(&mut state, method_index); + call_args.args_untyped(&[data_pointer, raw_data_length]); + + match self.0.call(Default::default(), call_args) { Ok(()) => {}, - Err(CallError::Trap) => + Err(polkavm::ExecutionError::Trap(trap)) => { return ( - Err(format!("call into the runtime method '{name}' failed: trap").into()), + Err(format!("call into the runtime method '{name}' failed: {trap}").into()), None, - ), - Err(CallError::Error(err)) => - return ( - Err(format!("call into the runtime method '{name}' failed: {err}").into()), - None, - ), - Err(CallError::User(err)) => + ); + }, + Err(polkavm::ExecutionError::Error(error)) => { return ( - Err(format!("call into the runtime method '{name}' failed: {err}").into()), + Err(format!("call into the runtime method '{name}' failed: {error}").into()), None, - ), - Err(CallError::NotEnoughGas) => unreachable!("gas metering is never enabled"), - }; + ); + }, + Err(polkavm::ExecutionError::OutOfGas) => unreachable!("gas metering is never enabled"), + } - let result_pointer = self.0.reg(Reg::A0); - let result_length = self.0.reg(Reg::A1); - let output = match self.0.read_memory(result_pointer as u32, result_length as u32) { + let result_pointer = self.0.get_reg(Reg::A0); + let result_length = self.0.get_reg(Reg::A1); + let output = match self.0.read_memory_into_vec(result_pointer, result_length) { Ok(output) => output, Err(error) => { return (Err(format!("call into the runtime method '{name}' failed: failed to read the return payload: {error}").into()), None) @@ -133,31 +127,20 @@ impl<'r, 'a> FunctionContext for Context<'r, 'a> { dest: &mut [u8], ) -> sp_wasm_interface::Result<()> { self.0 - .instance - .read_memory_into(u32::from(address), dest) + .read_memory_into_slice(u32::from(address), dest) .map_err(|error| error.to_string()) .map(|_| ()) } fn write_memory(&mut self, address: Pointer, data: &[u8]) -> sp_wasm_interface::Result<()> { - self.0 - .instance - .write_memory(u32::from(address), data) - .map_err(|error| error.to_string()) + self.0.write_memory(u32::from(address), data).map_err(|error| error.to_string()) } fn allocate_memory(&mut self, size: WordSize) -> sp_wasm_interface::Result> { - let pointer = match self.0.instance.sbrk(0) { - Ok(pointer) => pointer.expect("fetching the current heap pointer never fails"), - Err(err) => return Err(format!("sbrk failed: {err}")), - }; + let pointer = self.0.sbrk(0).expect("fetching the current heap pointer never fails"); // TODO: This will leak guest memory; find a better solution. - match self.0.instance.sbrk(size) { - Ok(Some(_)) => (), - Ok(None) => return Err(String::from("allocation error")), - Err(err) => return Err(format!("sbrk failed: {err}")), - } + self.0.sbrk(size).ok_or_else(|| String::from("allocation failed"))?; Ok(Pointer::new(pointer)) } @@ -172,46 +155,41 @@ impl<'r, 'a> FunctionContext for Context<'r, 'a> { } } -fn call_host_function(caller: &mut Caller<()>, function: &dyn Function) -> Result<(), String> { +fn call_host_function( + caller: &mut Caller<()>, + function: &dyn Function, +) -> Result<(), polkavm::Trap> { let mut args = [Value::I64(0); Reg::ARG_REGS.len()]; let mut nth_reg = 0; for (nth_arg, kind) in function.signature().args.iter().enumerate() { match kind { ValueType::I32 => { - args[nth_arg] = Value::I32(caller.instance.reg(Reg::ARG_REGS[nth_reg]) as i32); + args[nth_arg] = Value::I32(caller.get_reg(Reg::ARG_REGS[nth_reg]) as i32); nth_reg += 1; }, ValueType::F32 => { - args[nth_arg] = Value::F32(caller.instance.reg(Reg::ARG_REGS[nth_reg]) as u32); + args[nth_arg] = Value::F32(caller.get_reg(Reg::ARG_REGS[nth_reg])); + nth_reg += 1; + }, + ValueType::I64 => { + let value_lo = caller.get_reg(Reg::ARG_REGS[nth_reg]); + nth_reg += 1; + + let value_hi = caller.get_reg(Reg::ARG_REGS[nth_reg]); + nth_reg += 1; + + args[nth_arg] = + Value::I64((u64::from(value_lo) | (u64::from(value_hi) << 32)) as i64); + }, + ValueType::F64 => { + let value_lo = caller.get_reg(Reg::ARG_REGS[nth_reg]); nth_reg += 1; + + let value_hi = caller.get_reg(Reg::ARG_REGS[nth_reg]); + nth_reg += 1; + + args[nth_arg] = Value::F64(u64::from(value_lo) | (u64::from(value_hi) << 32)); }, - ValueType::I64 => - if caller.instance.is_64_bit() { - args[nth_arg] = Value::I64(caller.instance.reg(Reg::ARG_REGS[nth_reg]) as i64); - nth_reg += 1; - } else { - let value_lo = caller.instance.reg(Reg::ARG_REGS[nth_reg]); - nth_reg += 1; - - let value_hi = caller.instance.reg(Reg::ARG_REGS[nth_reg]); - nth_reg += 1; - - args[nth_arg] = - Value::I64((u64::from(value_lo) | (u64::from(value_hi) << 32)) as i64); - }, - ValueType::F64 => - if caller.instance.is_64_bit() { - args[nth_arg] = Value::F64(caller.instance.reg(Reg::ARG_REGS[nth_reg])); - nth_reg += 1; - } else { - let value_lo = caller.instance.reg(Reg::ARG_REGS[nth_reg]); - nth_reg += 1; - - let value_hi = caller.instance.reg(Reg::ARG_REGS[nth_reg]); - nth_reg += 1; - - args[nth_arg] = Value::F64(u64::from(value_lo) | (u64::from(value_hi) << 32)); - }, } } @@ -226,33 +204,27 @@ fn call_host_function(caller: &mut Caller<()>, function: &dyn Function) -> Resul { Ok(value) => value, Err(error) => { - let name = function.name(); - return Err(format!("call into the host function '{name}' failed: {error}")) + log::warn!("Call into the host function '{}' failed: {error}", function.name()); + return Err(polkavm::Trap::default()); }, }; if let Some(value) = value { match value { Value::I32(value) => { - caller.instance.set_reg(Reg::A0, value as u64); + caller.set_reg(Reg::A0, value as u32); }, Value::F32(value) => { - caller.instance.set_reg(Reg::A0, value as u64); + caller.set_reg(Reg::A0, value); + }, + Value::I64(value) => { + caller.set_reg(Reg::A0, value as u32); + caller.set_reg(Reg::A1, (value >> 32) as u32); + }, + Value::F64(value) => { + caller.set_reg(Reg::A0, value as u32); + caller.set_reg(Reg::A1, (value >> 32) as u32); }, - Value::I64(value) => - if caller.instance.is_64_bit() { - caller.instance.set_reg(Reg::A0, value as u64); - } else { - caller.instance.set_reg(Reg::A0, value as u64); - caller.instance.set_reg(Reg::A1, (value >> 32) as u64); - }, - Value::F64(value) => - if caller.instance.is_64_bit() { - caller.instance.set_reg(Reg::A0, value as u64); - } else { - caller.instance.set_reg(Reg::A0, value as u64); - caller.instance.set_reg(Reg::A1, (value >> 32) as u64); - }, } } @@ -278,16 +250,12 @@ where }, }; - let module = - polkavm::Module::from_blob(&engine, &polkavm::ModuleConfig::default(), blob.clone())?; - - let mut linker = polkavm::Linker::new(); - + let module = polkavm::Module::from_blob(&engine, &polkavm::ModuleConfig::default(), blob)?; + let mut linker = polkavm::Linker::new(&engine); for function in H::host_functions() { - linker.define_untyped(function.name(), |mut caller: Caller<()>| { - call_host_function(&mut caller, function) - })?; + linker.func_new(function.name(), |mut caller| call_host_function(&mut caller, function))?; } + let instance_pre = linker.instantiate_pre(&module)?; Ok(Box::new(InstancePre(instance_pre))) } diff --git a/substrate/client/executor/wasmtime/Cargo.toml b/substrate/client/executor/wasmtime/Cargo.toml index 7ea94568e1b7..ef8e5da876aa 100644 --- a/substrate/client/executor/wasmtime/Cargo.toml +++ b/substrate/client/executor/wasmtime/Cargo.toml @@ -16,18 +16,13 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] +log = { workspace = true, default-features = true } cfg-if = { workspace = true } libc = { workspace = true } -log = { workspace = true, default-features = true } parking_lot = { workspace = true, default-features = true } # When bumping wasmtime do not forget to also bump rustix # to exactly the same version as used by wasmtime! -anyhow = { workspace = true } -sc-allocator = { workspace = true, default-features = true } -sc-executor-common = { workspace = true, default-features = true } -sp-runtime-interface = { workspace = true, default-features = true } -sp-wasm-interface = { features = ["wasmtime"], workspace = true, default-features = true } wasmtime = { features = [ "cache", "cranelift", @@ -35,6 +30,11 @@ wasmtime = { features = [ "parallel-compilation", "pooling-allocator", ], workspace = true } +anyhow = { workspace = true } +sc-allocator = { workspace = true, default-features = true } +sc-executor-common = { workspace = true, default-features = true } +sp-runtime-interface = { workspace = true, default-features = true } +sp-wasm-interface = { features = ["wasmtime"], workspace = true, default-features = true } # Here we include the rustix crate in the exactly same semver-compatible version as used by # wasmtime and enable its 'use-libc' flag. @@ -45,10 +45,10 @@ wasmtime = { features = [ rustix = { features = ["fs", "mm", "param", "std", "use-libc"], workspace = true } [dev-dependencies] -cargo_metadata = { workspace = true } -codec = { workspace = true, default-features = true } -paste = { workspace = true, default-features = true } +wat = { workspace = true } sc-runtime-test = { workspace = true } sp-io = { workspace = true, default-features = true } tempfile = { workspace = true } -wat = { workspace = true } +paste = { workspace = true, default-features = true } +codec = { workspace = true, default-features = true } +cargo_metadata = { workspace = true } diff --git a/substrate/client/informant/Cargo.toml b/substrate/client/informant/Cargo.toml index 209964e02ef3..87a4be320d68 100644 --- a/substrate/client/informant/Cargo.toml +++ b/substrate/client/informant/Cargo.toml @@ -21,8 +21,8 @@ futures = { workspace = true } futures-timer = { workspace = true } log = { workspace = true, default-features = true } sc-client-api = { workspace = true, default-features = true } -sc-network = { workspace = true, default-features = true } sc-network-common = { workspace = true, default-features = true } sc-network-sync = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } sp-blockchain = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } diff --git a/substrate/client/keystore/Cargo.toml b/substrate/client/keystore/Cargo.toml index e46fafbc3729..d338bb1af61a 100644 --- a/substrate/client/keystore/Cargo.toml +++ b/substrate/client/keystore/Cargo.toml @@ -20,10 +20,10 @@ targets = ["x86_64-unknown-linux-gnu"] array-bytes = { workspace = true, default-features = true } parking_lot = { workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } +thiserror = { workspace = true } sp-application-crypto = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } sp-keystore = { workspace = true, default-features = true } -thiserror = { workspace = true } [dev-dependencies] tempfile = { workspace = true } diff --git a/substrate/client/merkle-mountain-range/Cargo.toml b/substrate/client/merkle-mountain-range/Cargo.toml index 7849eac5f516..6639a10d33f1 100644 --- a/substrate/client/merkle-mountain-range/Cargo.toml +++ b/substrate/client/merkle-mountain-range/Cargo.toml @@ -17,14 +17,14 @@ workspace = true codec = { workspace = true, default-features = true } futures = { workspace = true } log = { workspace = true, default-features = true } -sc-client-api = { workspace = true, default-features = true } -sc-offchain = { workspace = true, default-features = true } sp-api = { workspace = true, default-features = true } sp-blockchain = { workspace = true, default-features = true } -sp-consensus = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } sp-consensus-beefy = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } sp-mmr-primitives = { workspace = true, default-features = true } +sc-offchain = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } [dev-dependencies] diff --git a/substrate/client/network-gossip/Cargo.toml b/substrate/client/network-gossip/Cargo.toml index ea52913aea16..94bc9a671f84 100644 --- a/substrate/client/network-gossip/Cargo.toml +++ b/substrate/client/network-gossip/Cargo.toml @@ -21,18 +21,18 @@ ahash = { workspace = true } futures = { workspace = true } futures-timer = { workspace = true } log = { workspace = true, default-features = true } +schnellru = { workspace = true } +tracing = { workspace = true, default-features = true } prometheus-endpoint = { workspace = true, default-features = true } sc-network = { workspace = true, default-features = true } sc-network-common = { workspace = true, default-features = true } sc-network-sync = { workspace = true, default-features = true } sc-network-types = { workspace = true, default-features = true } -schnellru = { workspace = true } sp-runtime = { workspace = true, default-features = true } -tracing = { workspace = true, default-features = true } [dev-dependencies] +tokio = { workspace = true, default-features = true } async-trait = { workspace = true } codec = { features = ["derive"], workspace = true, default-features = true } quickcheck = { workspace = true } substrate-test-runtime-client = { workspace = true } -tokio = { workspace = true, default-features = true } diff --git a/substrate/client/network-gossip/src/bridge.rs b/substrate/client/network-gossip/src/bridge.rs index bff258a9a011..a4bd922a76d5 100644 --- a/substrate/client/network-gossip/src/bridge.rs +++ b/substrate/client/network-gossip/src/bridge.rs @@ -220,16 +220,18 @@ impl Future for GossipEngine { }, NotificationEvent::NotificationStreamOpened { peer, handshake, .. - } => - if let Some(role) = this.network.peer_role(peer, handshake) { - this.state_machine.new_peer( - &mut this.notification_service, - peer, - role, - ); - } else { + } => { + let Some(role) = this.network.peer_role(peer, handshake) else { log::debug!(target: "gossip", "role for {peer} couldn't be determined"); - }, + continue + }; + + this.state_machine.new_peer( + &mut this.notification_service, + peer, + role, + ); + }, NotificationEvent::NotificationStreamClosed { peer } => { this.state_machine .peer_disconnected(&mut this.notification_service, peer); @@ -254,12 +256,10 @@ impl Future for GossipEngine { match sync_event_stream { Poll::Ready(Some(event)) => match event { - SyncEvent::InitialPeers(peer_ids) => - this.network.add_set_reserved(peer_ids, this.protocol.clone()), - SyncEvent::PeerConnected(peer_id) => - this.network.add_set_reserved(vec![peer_id], this.protocol.clone()), - SyncEvent::PeerDisconnected(peer_id) => - this.network.remove_set_reserved(peer_id, this.protocol.clone()), + SyncEvent::PeerConnected(remote) => + this.network.add_set_reserved(remote, this.protocol.clone()), + SyncEvent::PeerDisconnected(remote) => + this.network.remove_set_reserved(remote, this.protocol.clone()), }, // The sync event stream closed. Do the same for [`GossipValidator`]. Poll::Ready(None) => { diff --git a/substrate/client/network-gossip/src/lib.rs b/substrate/client/network-gossip/src/lib.rs index 2ec573bf9e3e..20d9922200c2 100644 --- a/substrate/client/network-gossip/src/lib.rs +++ b/substrate/client/network-gossip/src/lib.rs @@ -82,18 +82,15 @@ mod validator; /// Abstraction over a network. pub trait Network: NetworkPeers + NetworkEventStream { - fn add_set_reserved(&self, peer_ids: Vec, protocol: ProtocolName) { - let addrs = peer_ids - .into_iter() - .map(|peer_id| Multiaddr::empty().with(Protocol::P2p(peer_id.into()))) - .collect(); - let result = self.add_peers_to_reserved_set(protocol, addrs); + fn add_set_reserved(&self, who: PeerId, protocol: ProtocolName) { + let addr = Multiaddr::empty().with(Protocol::P2p(*who.as_ref())); + let result = self.add_peers_to_reserved_set(protocol, iter::once(addr).collect()); if let Err(err) = result { log::error!(target: "gossip", "add_set_reserved failed: {}", err); } } - fn remove_set_reserved(&self, peer_id: PeerId, protocol: ProtocolName) { - let result = self.remove_peers_from_reserved_set(protocol, iter::once(peer_id).collect()); + fn remove_set_reserved(&self, who: PeerId, protocol: ProtocolName) { + let result = self.remove_peers_from_reserved_set(protocol, iter::once(who).collect()); if let Err(err) = result { log::error!(target: "gossip", "remove_set_reserved failed: {}", err); } diff --git a/substrate/client/network/Cargo.toml b/substrate/client/network/Cargo.toml index 19af70867658..c8fd28e08109 100644 --- a/substrate/client/network/Cargo.toml +++ b/substrate/client/network/Cargo.toml @@ -34,54 +34,54 @@ futures-timer = { workspace = true } ip_network = { workspace = true } libp2p = { features = ["dns", "identify", "kad", "macros", "mdns", "noise", "ping", "request-response", "tcp", "tokio", "websocket", "yamux"], workspace = true } linked_hash_set = { workspace = true } -litep2p = { workspace = true } log = { workspace = true, default-features = true } mockall = { workspace = true } -once_cell = { workspace = true } parking_lot = { workspace = true, default-features = true } partial_sort = { workspace = true } pin-project = { workspace = true } +rand = { workspace = true, default-features = true } +serde = { features = ["derive"], workspace = true, default-features = true } +serde_json = { workspace = true, default-features = true } +smallvec = { workspace = true, default-features = true } +thiserror = { workspace = true } +tokio = { features = ["macros", "sync"], workspace = true, default-features = true } +tokio-stream = { workspace = true } +unsigned-varint = { features = ["asynchronous_codec", "futures"], workspace = true } +zeroize = { workspace = true, default-features = true } prometheus-endpoint = { workspace = true, default-features = true } prost = { workspace = true } -rand = { workspace = true, default-features = true } sc-client-api = { workspace = true, default-features = true } sc-network-common = { workspace = true, default-features = true } sc-network-types = { workspace = true, default-features = true } sc-utils = { workspace = true, default-features = true } -schnellru = { workspace = true } -serde = { features = ["derive"], workspace = true, default-features = true } -serde_json = { workspace = true, default-features = true } -smallvec = { workspace = true, default-features = true } sp-arithmetic = { workspace = true, default-features = true } sp-blockchain = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } -thiserror = { workspace = true } -tokio = { features = ["macros", "sync"], workspace = true, default-features = true } -tokio-stream = { workspace = true } -unsigned-varint = { features = ["asynchronous_codec", "futures"], workspace = true } -void = { workspace = true } wasm-timer = { workspace = true } -zeroize = { workspace = true, default-features = true } +litep2p = { workspace = true } +once_cell = { workspace = true } +void = { workspace = true } +schnellru = { workspace = true } [dev-dependencies] assert_matches = { workspace = true } mockall = { workspace = true } multistream-select = { workspace = true } rand = { workspace = true, default-features = true } +tempfile = { workspace = true } +tokio = { features = ["macros", "rt-multi-thread"], workspace = true, default-features = true } +tokio-util = { features = ["compat"], workspace = true } +tokio-test = { workspace = true } sc-block-builder = { workspace = true, default-features = true } sc-network-light = { workspace = true, default-features = true } sc-network-sync = { workspace = true, default-features = true } -sp-consensus = { workspace = true, default-features = true } sp-crypto-hashing = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } sp-test-primitives = { workspace = true } sp-tracing = { workspace = true, default-features = true } substrate-test-runtime = { workspace = true } substrate-test-runtime-client = { workspace = true } -tempfile = { workspace = true } -tokio = { features = ["macros", "rt-multi-thread"], workspace = true, default-features = true } -tokio-test = { workspace = true } -tokio-util = { features = ["compat"], workspace = true } criterion = { workspace = true, default-features = true, features = ["async_tokio"] } sc-consensus = { workspace = true, default-features = true } diff --git a/substrate/client/network/benches/notifications_protocol.rs b/substrate/client/network/benches/notifications_protocol.rs index 40a810d616b5..7d32c9faeba1 100644 --- a/substrate/client/network/benches/notifications_protocol.rs +++ b/substrate/client/network/benches/notifications_protocol.rs @@ -22,48 +22,51 @@ use criterion::{ }; use sc_network::{ config::{ - FullNetworkConfiguration, MultiaddrWithPeerId, NetworkConfiguration, NonReservedPeerMode, - NotificationHandshake, Params, ProtocolId, Role, SetConfig, + FullNetworkConfiguration, MultiaddrWithPeerId, NetworkConfiguration, NonDefaultSetConfig, + NonReservedPeerMode, NotificationHandshake, Params, ProtocolId, Role, SetConfig, }, - service::traits::{NetworkService, NotificationEvent}, - Litep2pNetworkBackend, NetworkBackend, NetworkWorker, NotificationMetrics, NotificationService, - PeerId, Roles, + service::traits::NotificationEvent, + NetworkWorker, NotificationMetrics, NotificationService, Roles, +}; +use sc_network_common::sync::message::BlockAnnouncesHandshake; +use sc_network_types::build_multiaddr; +use sp_runtime::traits::Zero; +use std::{ + net::{IpAddr, Ipv4Addr, TcpListener}, + str::FromStr, }; -use sc_network_common::{sync::message::BlockAnnouncesHandshake, ExHashT}; -use sp_core::H256; -use sp_runtime::traits::{Block as BlockT, Zero}; -use std::{sync::Arc, time::Duration}; use substrate_test_runtime_client::runtime; -use tokio::{sync::Mutex, task::JoinHandle}; -const SMALL_PAYLOAD: &[(u32, usize, &'static str)] = &[ - // (Exponent of size, number of notifications, label) - (6, 100, "64B"), - (9, 100, "512B"), - (12, 100, "4KB"), - (15, 100, "64KB"), -]; -const LARGE_PAYLOAD: &[(u32, usize, &'static str)] = &[ - // (Exponent of size, number of notifications, label) - (18, 10, "256KB"), - (21, 10, "2MB"), - (24, 10, "16MB"), - (27, 10, "128MB"), -]; const MAX_SIZE: u64 = 2u64.pow(30); +const SAMPLE_SIZE: usize = 50; +const NOTIFICATIONS: usize = 50; +const EXPONENTS: &[(u32, &'static str)] = &[ + (6, "64B"), + (9, "512B"), + (12, "4KB"), + (15, "64KB"), + (18, "256KB"), + (21, "2MB"), + (24, "16MB"), + (27, "128MB"), +]; + +// TODO: It's be better to bind system-provided port when initializing the worker +fn get_listen_address() -> sc_network::Multiaddr { + let ip = Ipv4Addr::from_str("127.0.0.1").unwrap(); + let listener = TcpListener::bind((IpAddr::V4(ip), 0)).unwrap(); // Bind to a random port + let local_addr = listener.local_addr().unwrap(); + let port = local_addr.port(); + + build_multiaddr!(Ip4(ip), Tcp(port)) +} -fn create_network_worker( -) -> (N, Arc, Arc>>) -where - B: BlockT + 'static, - H: ExHashT, - N: NetworkBackend, -{ +pub fn create_network_worker( + listen_addr: sc_network::Multiaddr, +) -> (NetworkWorker, Box) { let role = Role::Full; - let net_conf = NetworkConfiguration::new_local(); - let network_config = FullNetworkConfiguration::::new(&net_conf, None); let genesis_hash = runtime::Hash::zero(); - let (block_announce_config, notification_service) = N::notification_config( + let (block_announce_config, notification_service) = NonDefaultSetConfig::new( "/block-announces/1".into(), vec!["/bench-notifications-protocol/block-announces/1".into()], MAX_SIZE, @@ -79,17 +82,21 @@ where reserved_nodes: vec![], non_reserved_mode: NonReservedPeerMode::Accept, }, - NotificationMetrics::new(None), - network_config.peer_store_handle(), ); - let worker = N::new(Params:: { + let mut net_conf = NetworkConfiguration::new_local(); + net_conf.listen_addresses = vec![listen_addr]; + let worker = NetworkWorker::::new(Params::< + runtime::Block, + runtime::Hash, + NetworkWorker<_, _>, + > { block_announce_config, role, executor: Box::new(|f| { tokio::spawn(f); }), genesis_hash, - network_config, + network_config: FullNetworkConfiguration::new(&net_conf, None), protocol_id: ProtocolId::from("bench-protocol-name"), fork_id: None, metrics_registry: None, @@ -97,122 +104,80 @@ where notification_metrics: NotificationMetrics::new(None), }) .unwrap(); - let network_service = worker.network_service(); - let notification_service = Arc::new(Mutex::new(notification_service)); - - (worker, network_service, notification_service) -} -struct BenchSetup { - notification_service1: Arc>>, - notification_service2: Arc>>, - peer_id2: PeerId, - handle1: JoinHandle<()>, - handle2: JoinHandle<()>, -} - -impl Drop for BenchSetup { - fn drop(&mut self) { - self.handle1.abort(); - self.handle2.abort(); - } + (worker, notification_service) } -fn setup_workers(rt: &tokio::runtime::Runtime) -> Arc -where - B: BlockT + 'static, - H: ExHashT, - N: NetworkBackend, -{ - let _guard = rt.enter(); +async fn run_serially(size: usize, limit: usize) { + let listen_address1 = get_listen_address(); + let listen_address2 = get_listen_address(); + let (worker1, mut notification_service1) = create_network_worker(listen_address1); + let (worker2, mut notification_service2) = create_network_worker(listen_address2.clone()); + let peer_id2: sc_network::PeerId = (*worker2.local_peer_id()).into(); - let (worker1, network_service1, notification_service1) = create_network_worker::(); - let (worker2, network_service2, notification_service2) = create_network_worker::(); - let peer_id2: sc_network::PeerId = network_service2.local_peer_id().into(); - let handle1 = tokio::spawn(worker1.run()); - let handle2 = tokio::spawn(worker2.run()); + worker1 + .add_reserved_peer(MultiaddrWithPeerId { multiaddr: listen_address2, peer_id: peer_id2 }) + .unwrap(); - let ready = tokio::spawn({ - let notification_service1 = Arc::clone(¬ification_service1); - let notification_service2 = Arc::clone(¬ification_service2); + let network1_run = worker1.run(); + let network2_run = worker2.run(); + let (tx, rx) = async_channel::bounded(10); - async move { - let listen_address2 = { - while network_service2.listen_addresses().is_empty() { - tokio::time::sleep(Duration::from_millis(10)).await; - } - network_service2.listen_addresses()[0].clone() - }; - network_service1 - .add_reserved_peer(MultiaddrWithPeerId { - multiaddr: listen_address2, - peer_id: peer_id2, - }) - .unwrap(); + let network1 = tokio::spawn(async move { + tokio::pin!(network1_run); + loop { + tokio::select! { + _ = &mut network1_run => {}, + event = notification_service1.next_event() => { + match event { + Some(NotificationEvent::NotificationStreamOpened { .. }) => { + notification_service1 + .send_async_notification(&peer_id2, vec![0; size]) + .await + .unwrap(); + }, + event => panic!("Unexpected event {:?}", event), + }; + }, + message = rx.recv() => { + match message { + Ok(Some(_)) => { + notification_service1 + .send_async_notification(&peer_id2, vec![0; size]) + .await + .unwrap(); + }, + Ok(None) => break, + Err(err) => panic!("Unexpected error {:?}", err), - let mut notification_service1 = notification_service1.lock().await; - let mut notification_service2 = notification_service2.lock().await; - loop { - tokio::select! { - Some(event) = notification_service1.next_event() => { - if let NotificationEvent::NotificationStreamOpened { .. } = event { - break; - } - }, - Some(event) = notification_service2.next_event() => { - if let NotificationEvent::ValidateInboundSubstream { result_tx, .. } = event { - result_tx.send(sc_network::service::traits::ValidationResult::Accept).unwrap(); - } - }, + } } } } }); - - tokio::task::block_in_place(|| { - let _ = tokio::runtime::Handle::current().block_on(ready); - }); - - Arc::new(BenchSetup { - notification_service1, - notification_service2, - peer_id2, - handle1, - handle2, - }) -} - -async fn run_serially(setup: Arc, size: usize, limit: usize) { - let (tx, rx) = async_channel::bounded(1); - let _ = tx.send(Some(())).await; - let network1 = tokio::spawn({ - let notification_service1 = Arc::clone(&setup.notification_service1); - let peer_id2 = setup.peer_id2; - async move { - let mut notification_service1 = notification_service1.lock().await; - while let Ok(message) = rx.recv().await { - let Some(_) = message else { break }; - notification_service1 - .send_async_notification(&peer_id2, vec![0; size]) - .await - .unwrap(); - } - } - }); - let network2 = tokio::spawn({ - let notification_service2 = Arc::clone(&setup.notification_service2); - async move { - let mut notification_service2 = notification_service2.lock().await; - let mut received_counter = 0; - while let Some(event) = notification_service2.next_event().await { - if let NotificationEvent::NotificationReceived { .. } = event { - received_counter += 1; - if received_counter >= limit { - let _ = tx.send(None).await; - break; - } - let _ = tx.send(Some(())).await; - } + let network2 = tokio::spawn(async move { + let mut received_counter = 0; + tokio::pin!(network2_run); + loop { + tokio::select! { + _ = &mut network2_run => {}, + event = notification_service2.next_event() => { + match event { + Some(NotificationEvent::ValidateInboundSubstream { result_tx, .. }) => { + result_tx.send(sc_network::service::traits::ValidationResult::Accept).unwrap(); + }, + Some(NotificationEvent::NotificationStreamOpened { .. }) => {}, + Some(NotificationEvent::NotificationReceived { .. }) => { + received_counter += 1; + if received_counter >= limit { + let _ = tx.send(None).await; + break + } + let _ = tx.send(Some(())).await; + }, + event => panic!("Unexpected event {:?}", event), + }; + }, } } }); @@ -220,34 +185,70 @@ async fn run_serially(setup: Arc, size: usize, limit: usize) { let _ = tokio::join!(network1, network2); } -async fn run_with_backpressure(setup: Arc, size: usize, limit: usize) { - let (tx, rx) = async_channel::bounded(1); - let network1 = tokio::spawn({ - let setup = Arc::clone(&setup); - async move { - let mut notification_service1 = setup.notification_service1.lock().await; - for _ in 0..limit { - notification_service1 - .send_async_notification(&setup.peer_id2, vec![0; size]) - .await - .unwrap(); +async fn run_with_backpressure(size: usize, limit: usize) { + let listen_address1 = get_listen_address(); + let listen_address2 = get_listen_address(); + let (worker1, mut notification_service1) = create_network_worker(listen_address1); + let (worker2, mut notification_service2) = create_network_worker(listen_address2.clone()); + let peer_id2: sc_network::PeerId = (*worker2.local_peer_id()).into(); + + worker1 + .add_reserved_peer(MultiaddrWithPeerId { multiaddr: listen_address2, peer_id: peer_id2 }) + .unwrap(); + + let network1_run = worker1.run(); + let network2_run = worker2.run(); + + let network1 = tokio::spawn(async move { + let mut sent_counter = 0; + tokio::pin!(network1_run); + loop { + tokio::select! { + _ = &mut network1_run => {}, + event = notification_service1.next_event() => { + match event { + Some(NotificationEvent::NotificationStreamOpened { .. }) => { + while sent_counter < limit { + sent_counter += 1; + notification_service1 + .send_async_notification(&peer_id2, vec![0; size]) + .await + .unwrap(); + } + }, + Some(NotificationEvent::NotificationStreamClosed { .. }) => { + if sent_counter != limit { panic!("Stream closed unexpectedly") } + break + }, + event => panic!("Unexpected event {:?}", event), + }; + }, } - let _ = rx.recv().await; } }); - let network2 = tokio::spawn({ - let setup = Arc::clone(&setup); - async move { - let mut notification_service2 = setup.notification_service2.lock().await; - let mut received_counter = 0; - while let Some(event) = notification_service2.next_event().await { - if let NotificationEvent::NotificationReceived { .. } = event { - received_counter += 1; - if received_counter >= limit { - let _ = tx.send(()).await; - break; - } - } + let network2 = tokio::spawn(async move { + let mut received_counter = 0; + tokio::pin!(network2_run); + loop { + tokio::select! { + _ = &mut network2_run => {}, + event = notification_service2.next_event() => { + match event { + Some(NotificationEvent::ValidateInboundSubstream { result_tx, .. }) => { + result_tx.send(sc_network::service::traits::ValidationResult::Accept).unwrap(); + }, + Some(NotificationEvent::NotificationStreamOpened { .. }) => {}, + Some(NotificationEvent::NotificationStreamClosed { .. }) => { + if received_counter != limit { panic!("Stream closed unexpectedly") } + break + }, + Some(NotificationEvent::NotificationReceived { .. }) => { + received_counter += 1; + if received_counter >= limit { break } + }, + event => panic!("Unexpected event {:?}", event), + }; + }, } } }); @@ -255,64 +256,35 @@ async fn run_with_backpressure(setup: Arc, size: usize, limit: usize let _ = tokio::join!(network1, network2); } -fn run_benchmark(c: &mut Criterion, payload: &[(u32, usize, &'static str)], group: &str) { +fn run_benchmark(c: &mut Criterion) { let rt = tokio::runtime::Runtime::new().unwrap(); let plot_config = PlotConfiguration::default().summary_scale(AxisScale::Logarithmic); - let mut group = c.benchmark_group(group); + let mut group = c.benchmark_group("notifications_benchmark"); group.plot_config(plot_config); - let libp2p_setup = setup_workers::>(&rt); - for &(exponent, limit, label) in payload.iter() { + for &(exponent, label) in EXPONENTS.iter() { let size = 2usize.pow(exponent); - group.throughput(Throughput::Bytes(limit as u64 * size as u64)); + group.throughput(Throughput::Bytes(NOTIFICATIONS as u64 * size as u64)); group.bench_with_input( - BenchmarkId::new("libp2p/serially", label), - &(size, limit), + BenchmarkId::new("consistently", label), + &(size, NOTIFICATIONS), |b, &(size, limit)| { - b.to_async(&rt).iter(|| run_serially(Arc::clone(&libp2p_setup), size, limit)); + b.to_async(&rt).iter(|| run_serially(size, limit)); }, ); group.bench_with_input( - BenchmarkId::new("libp2p/with_backpressure", label), - &(size, limit), + BenchmarkId::new("with_backpressure", label), + &(size, NOTIFICATIONS), |b, &(size, limit)| { - b.to_async(&rt) - .iter(|| run_with_backpressure(Arc::clone(&libp2p_setup), size, limit)); + b.to_async(&rt).iter(|| run_with_backpressure(size, limit)); }, ); } - drop(libp2p_setup); - - let litep2p_setup = setup_workers::(&rt); - for &(exponent, limit, label) in payload.iter() { - let size = 2usize.pow(exponent); - group.throughput(Throughput::Bytes(limit as u64 * size as u64)); - group.bench_with_input( - BenchmarkId::new("litep2p/serially", label), - &(size, limit), - |b, &(size, limit)| { - b.to_async(&rt).iter(|| run_serially(Arc::clone(&litep2p_setup), size, limit)); - }, - ); - group.bench_with_input( - BenchmarkId::new("litep2p/with_backpressure", label), - &(size, limit), - |b, &(size, limit)| { - b.to_async(&rt) - .iter(|| run_with_backpressure(Arc::clone(&litep2p_setup), size, limit)); - }, - ); - } - drop(litep2p_setup); } -fn run_benchmark_with_small_payload(c: &mut Criterion) { - run_benchmark(c, SMALL_PAYLOAD, "notifications_protocol/small_payload"); +criterion_group! { + name = benches; + config = Criterion::default().sample_size(SAMPLE_SIZE); + targets = run_benchmark } - -fn run_benchmark_with_large_payload(c: &mut Criterion) { - run_benchmark(c, LARGE_PAYLOAD, "notifications_protocol/large_payload"); -} - -criterion_group!(benches, run_benchmark_with_small_payload, run_benchmark_with_large_payload); criterion_main!(benches); diff --git a/substrate/client/network/benches/request_response_protocol.rs b/substrate/client/network/benches/request_response_protocol.rs index 85381112b753..09bf829f5a7e 100644 --- a/substrate/client/network/benches/request_response_protocol.rs +++ b/substrate/client/network/benches/request_response_protocol.rs @@ -22,70 +22,76 @@ use criterion::{ }; use sc_network::{ config::{ - FullNetworkConfiguration, IncomingRequest, NetworkConfiguration, NonReservedPeerMode, - NotificationHandshake, OutgoingResponse, Params, ProtocolId, Role, SetConfig, + FullNetworkConfiguration, IncomingRequest, NetworkConfiguration, NonDefaultSetConfig, + NonReservedPeerMode, NotificationHandshake, OutgoingResponse, Params, ProtocolId, Role, + SetConfig, }, - service::traits::NetworkService, - IfDisconnected, Litep2pNetworkBackend, NetworkBackend, NetworkRequest, NetworkWorker, - NotificationMetrics, NotificationService, PeerId, Roles, + IfDisconnected, NetworkBackend, NetworkRequest, NetworkWorker, NotificationMetrics, + NotificationService, Roles, +}; +use sc_network_common::sync::message::BlockAnnouncesHandshake; +use sc_network_types::build_multiaddr; +use sp_runtime::traits::Zero; +use std::{ + net::{IpAddr, Ipv4Addr, TcpListener}, + str::FromStr, + time::Duration, }; -use sc_network_common::{sync::message::BlockAnnouncesHandshake, ExHashT}; -use sp_core::H256; -use sp_runtime::traits::{Block as BlockT, Zero}; -use std::{sync::Arc, time::Duration}; use substrate_test_runtime_client::runtime; -use tokio::{sync::Mutex, task::JoinHandle}; const MAX_SIZE: u64 = 2u64.pow(30); -const SMALL_PAYLOAD: &[(u32, usize, &'static str)] = &[ - // (Exponent of size, number of requests, label) - (6, 100, "64B"), - (9, 100, "512B"), - (12, 100, "4KB"), - (15, 100, "64KB"), -]; -const LARGE_PAYLOAD: &[(u32, usize, &'static str)] = &[ - // (Exponent of size, number of requests, label) - (18, 10, "256KB"), - (21, 10, "2MB"), - (24, 10, "16MB"), - (27, 10, "128MB"), +const SAMPLE_SIZE: usize = 50; +const REQUESTS: usize = 50; +const EXPONENTS: &[(u32, &'static str)] = &[ + (6, "64B"), + (9, "512B"), + (12, "4KB"), + (15, "64KB"), + (18, "256KB"), + (21, "2MB"), + (24, "16MB"), + (27, "128MB"), ]; -pub fn create_network_worker() -> ( - N, - Arc, +fn get_listen_address() -> sc_network::Multiaddr { + let ip = Ipv4Addr::from_str("127.0.0.1").unwrap(); + let listener = TcpListener::bind((IpAddr::V4(ip), 0)).unwrap(); // Bind to a random port + let local_addr = listener.local_addr().unwrap(); + let port = local_addr.port(); + + build_multiaddr!(Ip4(ip), Tcp(port)) +} + +pub fn create_network_worker( + listen_addr: sc_network::Multiaddr, +) -> ( + NetworkWorker, async_channel::Receiver, - Arc>>, -) -where - B: BlockT + 'static, - H: ExHashT, - N: NetworkBackend, -{ + Box, +) { let (tx, rx) = async_channel::bounded(10); - let request_response_config = N::request_response_config( - "/request-response/1".into(), - vec![], - MAX_SIZE, - MAX_SIZE, - Duration::from_secs(2), - Some(tx), - ); - let role = Role::Full; - let net_conf = NetworkConfiguration::new_local(); + let request_response_config = + NetworkWorker::::request_response_config( + "/request-response/1".into(), + vec![], + MAX_SIZE, + MAX_SIZE, + Duration::from_secs(2), + Some(tx), + ); + let mut net_conf = NetworkConfiguration::new_local(); + net_conf.listen_addresses = vec![listen_addr]; let mut network_config = FullNetworkConfiguration::new(&net_conf, None); network_config.add_request_response_protocol(request_response_config); - let genesis_hash = runtime::Hash::zero(); - let (block_announce_config, notification_service) = N::notification_config( + let (block_announce_config, notification_service) = NonDefaultSetConfig::new( "/block-announces/1".into(), vec![], 1024, Some(NotificationHandshake::new(BlockAnnouncesHandshake::::build( Roles::from(&Role::Full), Zero::zero(), - genesis_hash, - genesis_hash, + runtime::Hash::zero(), + runtime::Hash::zero(), ))), SetConfig { in_peers: 1, @@ -93,12 +99,14 @@ where reserved_nodes: vec![], non_reserved_mode: NonReservedPeerMode::Accept, }, - NotificationMetrics::new(None), - network_config.peer_store_handle(), ); - let worker = N::new(Params:: { + let worker = NetworkWorker::::new(Params::< + runtime::Block, + runtime::Hash, + NetworkWorker<_, _>, + > { block_announce_config, - role, + role: Role::Full, executor: Box::new(|f| { tokio::spawn(f); }), @@ -111,115 +119,65 @@ where notification_metrics: NotificationMetrics::new(None), }) .unwrap(); - let notification_service = Arc::new(Mutex::new(notification_service)); - let network_service = worker.network_service(); - (worker, network_service, rx, notification_service) + (worker, rx, notification_service) } -struct BenchSetup { - #[allow(dead_code)] - notification_service1: Arc>>, - #[allow(dead_code)] - notification_service2: Arc>>, - network_service1: Arc, - peer_id2: PeerId, - handle1: JoinHandle<()>, - handle2: JoinHandle<()>, - #[allow(dead_code)] - rx1: async_channel::Receiver, - rx2: async_channel::Receiver, -} - -impl Drop for BenchSetup { - fn drop(&mut self) { - self.handle1.abort(); - self.handle2.abort(); - } -} - -fn setup_workers(rt: &tokio::runtime::Runtime) -> Arc -where - B: BlockT + 'static, - H: ExHashT, - N: NetworkBackend, -{ - let _guard = rt.enter(); +async fn run_serially(size: usize, limit: usize) { + let listen_address1 = get_listen_address(); + let listen_address2 = get_listen_address(); + let (mut worker1, _rx1, _notification_service1) = create_network_worker(listen_address1); + let service1 = worker1.service().clone(); + let (worker2, rx2, _notification_service2) = create_network_worker(listen_address2.clone()); + let peer_id2 = *worker2.local_peer_id(); - let (worker1, network_service1, rx1, notification_service1) = - create_network_worker::(); - let (worker2, network_service2, rx2, notification_service2) = - create_network_worker::(); - let peer_id2 = worker2.network_service().local_peer_id(); - let handle1 = tokio::spawn(worker1.run()); - let handle2 = tokio::spawn(worker2.run()); + worker1.add_known_address(peer_id2, listen_address2.into()); - let ready = tokio::spawn({ - let network_service1 = Arc::clone(&network_service1); - - async move { - let listen_address2 = { - while network_service2.listen_addresses().is_empty() { - tokio::time::sleep(Duration::from_millis(10)).await; - } - network_service2.listen_addresses()[0].clone() - }; - network_service1.add_known_address(peer_id2, listen_address2.into()); + let network1_run = worker1.run(); + let network2_run = worker2.run(); + let (break_tx, break_rx) = async_channel::bounded(10); + let requests = async move { + let mut sent_counter = 0; + while sent_counter < limit { + let _ = service1 + .request( + peer_id2.into(), + "/request-response/1".into(), + vec![0; 2], + None, + IfDisconnected::TryConnect, + ) + .await + .unwrap(); + sent_counter += 1; } - }); - - tokio::task::block_in_place(|| { - let _ = tokio::runtime::Handle::current().block_on(ready); - }); - - Arc::new(BenchSetup { - notification_service1, - notification_service2, - network_service1, - peer_id2, - handle1, - handle2, - rx1, - rx2, - }) -} + let _ = break_tx.send(()).await; + }; -async fn run_serially(setup: Arc, size: usize, limit: usize) { - let (break_tx, break_rx) = async_channel::bounded(1); - let network1 = tokio::spawn({ - let network_service1 = Arc::clone(&setup.network_service1); - let peer_id2 = setup.peer_id2; - async move { - for _ in 0..limit { - let _ = network_service1 - .request( - peer_id2.into(), - "/request-response/1".into(), - vec![0; 2], - None, - IfDisconnected::TryConnect, - ) - .await - .unwrap(); + let network1 = tokio::spawn(async move { + tokio::pin!(requests); + tokio::pin!(network1_run); + loop { + tokio::select! { + _ = &mut network1_run => {}, + _ = &mut requests => break, } - let _ = break_tx.send(()).await; } }); - let network2 = tokio::spawn({ - let rx2 = setup.rx2.clone(); - async move { - loop { - tokio::select! { - res = rx2.recv() => { - let IncomingRequest { pending_response, .. } = res.unwrap(); - pending_response.send(OutgoingResponse { - result: Ok(vec![0; size]), - reputation_changes: vec![], - sent_feedback: None, - }).unwrap(); - }, - _ = break_rx.recv() => break, - } + let network2 = tokio::spawn(async move { + tokio::pin!(network2_run); + loop { + tokio::select! { + _ = &mut network2_run => {}, + res = rx2.recv() => { + let IncomingRequest { pending_response, .. } = res.unwrap(); + pending_response.send(OutgoingResponse { + result: Ok(vec![0; size]), + reputation_changes: vec![], + sent_feedback: None, + }).unwrap(); + }, + _ = break_rx.recv() => break, } } }); @@ -230,12 +188,23 @@ async fn run_serially(setup: Arc, size: usize, limit: usize) { // The libp2p request-response implementation does not provide any backpressure feedback. // So this benchmark is useless until we implement it for litep2p. #[allow(dead_code)] -async fn run_with_backpressure(setup: Arc, size: usize, limit: usize) { - let (break_tx, break_rx) = async_channel::bounded(1); +async fn run_with_backpressure(size: usize, limit: usize) { + let listen_address1 = get_listen_address(); + let listen_address2 = get_listen_address(); + let (mut worker1, _rx1, _notification_service1) = create_network_worker(listen_address1); + let service1 = worker1.service().clone(); + let (worker2, rx2, _notification_service2) = create_network_worker(listen_address2.clone()); + let peer_id2 = *worker2.local_peer_id(); + + worker1.add_known_address(peer_id2, listen_address2.into()); + + let network1_run = worker1.run(); + let network2_run = worker2.run(); + let (break_tx, break_rx) = async_channel::bounded(10); let requests = futures::future::join_all((0..limit).into_iter().map(|_| { let (tx, rx) = futures::channel::oneshot::channel(); - setup.network_service1.start_request( - setup.peer_id2.into(), + service1.start_request( + peer_id2.into(), "/request-response/1".into(), vec![0; 8], None, @@ -246,72 +215,64 @@ async fn run_with_backpressure(setup: Arc, size: usize, limit: usize })); let network1 = tokio::spawn(async move { - let responses = requests.await; - for res in responses { - res.unwrap().unwrap(); + tokio::pin!(requests); + tokio::pin!(network1_run); + loop { + tokio::select! { + _ = &mut network1_run => {}, + responses = &mut requests => { + for res in responses { + res.unwrap().unwrap(); + } + let _ = break_tx.send(()).await; + break; + }, + } } - let _ = break_tx.send(()).await; }); let network2 = tokio::spawn(async move { - for _ in 0..limit { - let IncomingRequest { pending_response, .. } = setup.rx2.recv().await.unwrap(); - pending_response - .send(OutgoingResponse { - result: Ok(vec![0; size]), - reputation_changes: vec![], - sent_feedback: None, - }) - .unwrap(); + tokio::pin!(network2_run); + loop { + tokio::select! { + _ = &mut network2_run => {}, + res = rx2.recv() => { + let IncomingRequest { pending_response, .. } = res.unwrap(); + pending_response.send(OutgoingResponse { + result: Ok(vec![0; size]), + reputation_changes: vec![], + sent_feedback: None, + }).unwrap(); + }, + _ = break_rx.recv() => break, + } } - break_rx.recv().await }); let _ = tokio::join!(network1, network2); } -fn run_benchmark(c: &mut Criterion, payload: &[(u32, usize, &'static str)], group: &str) { +fn run_benchmark(c: &mut Criterion) { let rt = tokio::runtime::Runtime::new().unwrap(); let plot_config = PlotConfiguration::default().summary_scale(AxisScale::Logarithmic); - let mut group = c.benchmark_group(group); + let mut group = c.benchmark_group("request_response_benchmark"); group.plot_config(plot_config); - let libp2p_setup = setup_workers::>(&rt); - for &(exponent, limit, label) in payload.iter() { + for &(exponent, label) in EXPONENTS.iter() { let size = 2usize.pow(exponent); - group.throughput(Throughput::Bytes(limit as u64 * size as u64)); + group.throughput(Throughput::Bytes(REQUESTS as u64 * size as u64)); group.bench_with_input( - BenchmarkId::new("libp2p/serially", label), - &(size, limit), + BenchmarkId::new("consistently", label), + &(size, REQUESTS), |b, &(size, limit)| { - b.to_async(&rt).iter(|| run_serially(Arc::clone(&libp2p_setup), size, limit)); + b.to_async(&rt).iter(|| run_serially(size, limit)); }, ); } - drop(libp2p_setup); - - // TODO: NetworkRequest::request should be implemented for Litep2pNetworkService - let litep2p_setup = setup_workers::(&rt); - // for &(exponent, limit, label) in payload.iter() { - // let size = 2usize.pow(exponent); - // group.throughput(Throughput::Bytes(limit as u64 * size as u64)); - // group.bench_with_input( - // BenchmarkId::new("litep2p/serially", label), - // &(size, limit), - // |b, &(size, limit)| { - // b.to_async(&rt).iter(|| run_serially(Arc::clone(&litep2p_setup), size, limit)); - // }, - // ); - // } - drop(litep2p_setup); -} - -fn run_benchmark_with_small_payload(c: &mut Criterion) { - run_benchmark(c, SMALL_PAYLOAD, "request_response_benchmark/small_payload"); } -fn run_benchmark_with_large_payload(c: &mut Criterion) { - run_benchmark(c, LARGE_PAYLOAD, "request_response_benchmark/large_payload"); +criterion_group! { + name = benches; + config = Criterion::default().sample_size(SAMPLE_SIZE); + targets = run_benchmark } - -criterion_group!(benches, run_benchmark_with_small_payload, run_benchmark_with_large_payload); criterion_main!(benches); diff --git a/substrate/client/network/light/Cargo.toml b/substrate/client/network/light/Cargo.toml index fad7ae425858..34ba4f061c44 100644 --- a/substrate/client/network/light/Cargo.toml +++ b/substrate/client/network/light/Cargo.toml @@ -19,18 +19,18 @@ targets = ["x86_64-unknown-linux-gnu"] prost-build = { workspace = true } [dependencies] -array-bytes = { workspace = true, default-features = true } async-channel = { workspace = true } +array-bytes = { workspace = true, default-features = true } codec = { features = [ "derive", ], workspace = true, default-features = true } futures = { workspace = true } log = { workspace = true, default-features = true } prost = { workspace = true } +sp-blockchain = { workspace = true, default-features = true } sc-client-api = { workspace = true, default-features = true } -sc-network = { workspace = true, default-features = true } sc-network-types = { workspace = true, default-features = true } -sp-blockchain = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } thiserror = { workspace = true } diff --git a/substrate/client/network/src/behaviour.rs b/substrate/client/network/src/behaviour.rs index e2a91e961668..5ecbec52d507 100644 --- a/substrate/client/network/src/behaviour.rs +++ b/substrate/client/network/src/behaviour.rs @@ -68,7 +68,6 @@ pub struct Behaviour { } /// Event generated by `Behaviour`. -#[derive(Debug)] pub enum BehaviourOut { /// Started a random iterative Kademlia discovery query. RandomKademliaStarted, @@ -311,22 +310,6 @@ impl Behaviour { ) { self.discovery.store_record(record_key, record_value, publisher, expires); } - - /// Start providing `key` on the DHT. - pub fn start_providing(&mut self, key: RecordKey) { - self.discovery.start_providing(key) - } - - /// Stop providing `key` on the DHT. - pub fn stop_providing(&mut self, key: &RecordKey) { - self.discovery.stop_providing(key) - } - - /// Start searching for providers on the DHT. Will later produce either a `ProvidersFound` - /// or `ProvidersNotFound` event. - pub fn get_providers(&mut self, key: RecordKey) { - self.discovery.get_providers(key) - } } impl From for BehaviourOut { @@ -392,29 +375,18 @@ impl From for BehaviourOut { }, DiscoveryOut::Discovered(peer_id) => BehaviourOut::Discovered(peer_id), DiscoveryOut::ValueFound(results, duration) => - BehaviourOut::Dht(DhtEvent::ValueFound(results.into()), Some(duration)), + BehaviourOut::Dht(DhtEvent::ValueFound(results), Some(duration)), DiscoveryOut::ValueNotFound(key, duration) => - BehaviourOut::Dht(DhtEvent::ValueNotFound(key.into()), Some(duration)), + BehaviourOut::Dht(DhtEvent::ValueNotFound(key), Some(duration)), DiscoveryOut::ValuePut(key, duration) => - BehaviourOut::Dht(DhtEvent::ValuePut(key.into()), Some(duration)), + BehaviourOut::Dht(DhtEvent::ValuePut(key), Some(duration)), DiscoveryOut::PutRecordRequest(record_key, record_value, publisher, expires) => BehaviourOut::Dht( - DhtEvent::PutRecordRequest(record_key.into(), record_value, publisher, expires), + DhtEvent::PutRecordRequest(record_key, record_value, publisher, expires), None, ), DiscoveryOut::ValuePutFailed(key, duration) => - BehaviourOut::Dht(DhtEvent::ValuePutFailed(key.into()), Some(duration)), - DiscoveryOut::StartProvidingFailed(key) => - BehaviourOut::Dht(DhtEvent::StartProvidingFailed(key.into()), None), - DiscoveryOut::ProvidersFound(key, providers, duration) => BehaviourOut::Dht( - DhtEvent::ProvidersFound( - key.into(), - providers.into_iter().map(Into::into).collect(), - ), - Some(duration), - ), - DiscoveryOut::ProvidersNotFound(key, duration) => - BehaviourOut::Dht(DhtEvent::ProvidersNotFound(key.into()), Some(duration)), + BehaviourOut::Dht(DhtEvent::ValuePutFailed(key), Some(duration)), DiscoveryOut::RandomKademliaStarted => BehaviourOut::RandomKademliaStarted, } } diff --git a/substrate/client/network/src/discovery.rs b/substrate/client/network/src/discovery.rs index 917449cf228c..8080bda9a574 100644 --- a/substrate/client/network/src/discovery.rs +++ b/substrate/client/network/src/discovery.rs @@ -53,13 +53,13 @@ use futures::prelude::*; use futures_timer::Delay; use ip_network::IpNetwork; use libp2p::{ - core::{transport::PortUse, Endpoint, Multiaddr}, + core::{Endpoint, Multiaddr}, kad::{ self, - store::{MemoryStore, RecordStore}, + record::store::{MemoryStore, RecordStore}, Behaviour as Kademlia, BucketInserts, Config as KademliaConfig, Event as KademliaEvent, - Event, GetClosestPeersError, GetProvidersError, GetProvidersOk, GetRecordOk, PeerRecord, - QueryId, QueryResult, Quorum, Record, RecordKey, + GetClosestPeersError, GetRecordOk, PeerRecord, QueryId, QueryResult, Quorum, Record, + RecordKey, }, mdns::{self, tokio::Behaviour as TokioMdns}, multiaddr::Protocol, @@ -68,8 +68,8 @@ use libp2p::{ toggle::{Toggle, ToggleConnectionHandler}, DialFailure, ExternalAddrConfirmed, FromSwarm, }, - ConnectionDenied, ConnectionId, DialError, NetworkBehaviour, StreamProtocol, THandler, - THandlerInEvent, THandlerOutEvent, ToSwarm, + ConnectionDenied, ConnectionId, DialError, NetworkBehaviour, PollParameters, + StreamProtocol, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, }, PeerId, }; @@ -214,14 +214,23 @@ impl DiscoveryConfig { enable_mdns, kademlia_disjoint_query_paths, kademlia_protocol, - kademlia_legacy_protocol: _, + kademlia_legacy_protocol, kademlia_replication_factor, } = self; let kademlia = if let Some(ref kademlia_protocol) = kademlia_protocol { - let mut config = KademliaConfig::new(kademlia_protocol.clone()); + let mut config = KademliaConfig::default(); config.set_replication_factor(kademlia_replication_factor); + // Populate kad with both the legacy and the new protocol names. + // Remove the legacy protocol: + // https://github.com/paritytech/polkadot-sdk/issues/504 + let kademlia_protocols = if let Some(legacy_protocol) = kademlia_legacy_protocol { + vec![kademlia_protocol.clone(), legacy_protocol] + } else { + vec![kademlia_protocol.clone()] + }; + config.set_protocol_names(kademlia_protocols.into_iter().map(Into::into).collect()); config.set_record_filtering(libp2p::kad::StoreInserts::FilterBoth); @@ -457,31 +466,6 @@ impl DiscoveryBehaviour { } } } - - /// Register as a content provider on the DHT for `key`. - pub fn start_providing(&mut self, key: RecordKey) { - if let Some(kad) = self.kademlia.as_mut() { - if let Err(e) = kad.start_providing(key.clone()) { - warn!(target: "sub-libp2p", "Libp2p => Failed to start providing {key:?}: {e}."); - self.pending_events.push_back(DiscoveryOut::StartProvidingFailed(key)); - } - } - } - - /// Deregister as a content provider on the DHT for `key`. - pub fn stop_providing(&mut self, key: &RecordKey) { - if let Some(kad) = self.kademlia.as_mut() { - kad.stop_providing(key); - } - } - - /// Get content providers for `key` from the DHT. - pub fn get_providers(&mut self, key: RecordKey) { - if let Some(kad) = self.kademlia.as_mut() { - kad.get_providers(key); - } - } - /// Store a record in the Kademlia record store. pub fn store_record( &mut self, @@ -597,15 +581,6 @@ pub enum DiscoveryOut { /// Returning the corresponding key as well as the request duration. ValuePutFailed(RecordKey, Duration), - /// Starting providing a key failed. - StartProvidingFailed(RecordKey), - - /// The DHT yielded results for the providers request. - ProvidersFound(RecordKey, HashSet, Duration), - - /// Providers for the requested key were not found in the DHT. - ProvidersNotFound(RecordKey, Duration), - /// Started a random Kademlia query. /// /// Only happens if [`DiscoveryConfig::with_dht_random_walk`] has been configured to `true`. @@ -638,14 +613,12 @@ impl NetworkBehaviour for DiscoveryBehaviour { peer: PeerId, addr: &Multiaddr, role_override: Endpoint, - port_use: PortUse, ) -> Result, ConnectionDenied> { self.kademlia.handle_established_outbound_connection( connection_id, peer, addr, role_override, - port_use, ) } @@ -717,7 +690,7 @@ impl NetworkBehaviour for DiscoveryBehaviour { Ok(list.into_iter().collect()) } - fn on_swarm_event(&mut self, event: FromSwarm) { + fn on_swarm_event(&mut self, event: FromSwarm) { match event { FromSwarm::ConnectionEstablished(e) => { self.num_connections += 1; @@ -804,10 +777,6 @@ impl NetworkBehaviour for DiscoveryBehaviour { self.kademlia.on_swarm_event(FromSwarm::ExternalAddrConfirmed(e)); }, - event => { - debug!(target: "sub-libp2p", "New unknown `FromSwarm` libp2p event: {event:?}"); - self.kademlia.on_swarm_event(event); - }, } } @@ -820,7 +789,11 @@ impl NetworkBehaviour for DiscoveryBehaviour { self.kademlia.on_connection_handler_event(peer_id, connection_id, event); } - fn poll(&mut self, cx: &mut Context) -> Poll>> { + fn poll( + &mut self, + cx: &mut Context, + params: &mut impl PollParameters, + ) -> Poll>> { // Immediately process the content of `discovered`. if let Some(ev) = self.pending_events.pop_front() { return Poll::Ready(ToSwarm::GenerateEvent(ev)) @@ -863,7 +836,7 @@ impl NetworkBehaviour for DiscoveryBehaviour { } } - while let Poll::Ready(ev) = self.kademlia.poll(cx) { + while let Poll::Ready(ev) = self.kademlia.poll(cx, params) { match ev { ToSwarm::GenerateEvent(ev) => match ev { KademliaEvent::RoutingUpdated { peer, .. } => { @@ -1009,56 +982,6 @@ impl NetworkBehaviour for DiscoveryBehaviour { }; return Poll::Ready(ToSwarm::GenerateEvent(ev)) }, - KademliaEvent::OutboundQueryProgressed { - result: QueryResult::GetProviders(res), - stats, - id, - .. - } => { - let ev = match res { - Ok(GetProvidersOk::FoundProviders { key, providers }) => { - debug!( - target: "sub-libp2p", - "Libp2p => Found providers {:?} for key {:?}, id {:?}, stats {:?}", - providers, - key, - id, - stats, - ); - - DiscoveryOut::ProvidersFound( - key, - providers, - stats.duration().unwrap_or_default(), - ) - }, - Ok(GetProvidersOk::FinishedWithNoAdditionalRecord { - closest_peers: _, - }) => { - debug!( - target: "sub-libp2p", - "Libp2p => Finished with no additional providers {:?}, stats {:?}, took {:?} ms", - id, - stats, - stats.duration().map(|val| val.as_millis()) - ); - - continue - }, - Err(GetProvidersError::Timeout { key, closest_peers: _ }) => { - debug!( - target: "sub-libp2p", - "Libp2p => Failed to get providers for {key:?} due to timeout.", - ); - - DiscoveryOut::ProvidersNotFound( - key, - stats.duration().unwrap_or_default(), - ) - }, - }; - return Poll::Ready(ToSwarm::GenerateEvent(ev)) - }, KademliaEvent::OutboundQueryProgressed { result: QueryResult::PutRecord(res), stats, @@ -1096,38 +1019,30 @@ impl NetworkBehaviour for DiscoveryBehaviour { e.key(), e, ), }, - KademliaEvent::OutboundQueryProgressed { - result: QueryResult::Bootstrap(res), - .. - } => match res { - Ok(ok) => debug!( - target: "sub-libp2p", - "Libp2p => DHT bootstrap progressed: {ok:?}", - ), - Err(e) => warn!( - target: "sub-libp2p", - "Libp2p => DHT bootstrap error: {e:?}", - ), - }, // We never start any other type of query. KademliaEvent::OutboundQueryProgressed { result: e, .. } => { warn!(target: "sub-libp2p", "Libp2p => Unhandled Kademlia event: {:?}", e) }, - Event::ModeChanged { new_mode } => { - debug!(target: "sub-libp2p", "Libp2p => Kademlia mode changed: {new_mode}") - }, }, ToSwarm::Dial { opts } => return Poll::Ready(ToSwarm::Dial { opts }), - event => { - return Poll::Ready(event.map_out(|_| { - unreachable!("`GenerateEvent` is handled in a branch above; qed") - })); - }, + ToSwarm::NotifyHandler { peer_id, handler, event } => + return Poll::Ready(ToSwarm::NotifyHandler { peer_id, handler, event }), + ToSwarm::CloseConnection { peer_id, connection } => + return Poll::Ready(ToSwarm::CloseConnection { peer_id, connection }), + ToSwarm::NewExternalAddrCandidate(observed) => + return Poll::Ready(ToSwarm::NewExternalAddrCandidate(observed)), + ToSwarm::ExternalAddrConfirmed(addr) => + return Poll::Ready(ToSwarm::ExternalAddrConfirmed(addr)), + ToSwarm::ExternalAddrExpired(addr) => + return Poll::Ready(ToSwarm::ExternalAddrExpired(addr)), + ToSwarm::ListenOn { opts } => return Poll::Ready(ToSwarm::ListenOn { opts }), + ToSwarm::RemoveListener { id } => + return Poll::Ready(ToSwarm::RemoveListener { id }), } } // Poll mDNS. - while let Poll::Ready(ev) = self.mdns.poll(cx) { + while let Poll::Ready(ev) = self.mdns.poll(cx, params) { match ev { ToSwarm::GenerateEvent(event) => match event { mdns::Event::Discovered(list) => { @@ -1149,17 +1064,17 @@ impl NetworkBehaviour for DiscoveryBehaviour { }, // `event` is an enum with no variant ToSwarm::NotifyHandler { event, .. } => match event {}, - event => { - return Poll::Ready( - event - .map_in(|_| { - unreachable!("`NotifyHandler` is handled in a branch above; qed") - }) - .map_out(|_| { - unreachable!("`GenerateEvent` is handled in a branch above; qed") - }), - ); - }, + ToSwarm::CloseConnection { peer_id, connection } => + return Poll::Ready(ToSwarm::CloseConnection { peer_id, connection }), + ToSwarm::NewExternalAddrCandidate(observed) => + return Poll::Ready(ToSwarm::NewExternalAddrCandidate(observed)), + ToSwarm::ExternalAddrConfirmed(addr) => + return Poll::Ready(ToSwarm::ExternalAddrConfirmed(addr)), + ToSwarm::ExternalAddrExpired(addr) => + return Poll::Ready(ToSwarm::ExternalAddrExpired(addr)), + ToSwarm::ListenOn { opts } => return Poll::Ready(ToSwarm::ListenOn { opts }), + ToSwarm::RemoveListener { id } => + return Poll::Ready(ToSwarm::RemoveListener { id }), } } @@ -1202,14 +1117,21 @@ mod tests { }, identity::Keypair, noise, - swarm::{Swarm, SwarmEvent}, + swarm::{Executor, Swarm, SwarmEvent}, yamux, Multiaddr, }; use sp_core::hash::H256; - use std::{collections::HashSet, task::Poll, time::Duration}; + use std::{collections::HashSet, pin::Pin, task::Poll}; - #[tokio::test] - async fn discovery_working() { + struct TokioExecutor(tokio::runtime::Runtime); + impl Executor for TokioExecutor { + fn exec(&self, f: Pin + Send>>) { + let _ = self.0.spawn(f); + } + } + + #[test] + fn discovery_working() { let mut first_swarm_peer_id_and_addr = None; let genesis_hash = H256::from_low_u64_be(1); @@ -1220,40 +1142,42 @@ mod tests { // the first swarm via `with_permanent_addresses`. let mut swarms = (0..25) .map(|i| { - let mut swarm = libp2p::SwarmBuilder::with_new_identity() - .with_tokio() - .with_other_transport(|keypair| { - MemoryTransport::new() - .upgrade(upgrade::Version::V1) - .authenticate(noise::Config::new(&keypair).unwrap()) - .multiplex(yamux::Config::default()) - .boxed() - }) - .unwrap() - .with_behaviour(|keypair| { - let mut config = DiscoveryConfig::new(keypair.public().to_peer_id()); - config - .with_permanent_addresses(first_swarm_peer_id_and_addr.clone()) - .allow_private_ip(true) - .allow_non_globals_in_dht(true) - .discovery_limit(50) - .with_kademlia(genesis_hash, fork_id, &protocol_id); - - config.finish() - }) - .unwrap() - .with_swarm_config(|config| { - // This is taken care of by notification protocols in non-test environment - config.with_idle_connection_timeout(Duration::from_secs(10)) - }) - .build(); + let keypair = Keypair::generate_ed25519(); + + let transport = MemoryTransport::new() + .upgrade(upgrade::Version::V1) + .authenticate(noise::Config::new(&keypair).unwrap()) + .multiplex(yamux::Config::default()) + .boxed(); + + let behaviour = { + let mut config = DiscoveryConfig::new(keypair.public().to_peer_id()); + config + .with_permanent_addresses(first_swarm_peer_id_and_addr.clone()) + .allow_private_ip(true) + .allow_non_globals_in_dht(true) + .discovery_limit(50) + .with_kademlia(genesis_hash, fork_id, &protocol_id); + + config.finish() + }; + + let runtime = tokio::runtime::Runtime::new().unwrap(); + #[allow(deprecated)] + let mut swarm = libp2p::swarm::SwarmBuilder::with_executor( + transport, + behaviour, + keypair.public().to_peer_id(), + TokioExecutor(runtime), + ) + .build(); let listen_addr: Multiaddr = format!("/memory/{}", rand::random::()).parse().unwrap(); if i == 0 { first_swarm_peer_id_and_addr = - Some((*swarm.local_peer_id(), listen_addr.clone())) + Some((keypair.public().to_peer_id(), listen_addr.clone())) } swarm.listen_on(listen_addr.clone()).unwrap(); @@ -1340,7 +1264,7 @@ mod tests { } }); - fut.await + futures::executor::block_on(fut); } #[test] diff --git a/substrate/client/network/src/event.rs b/substrate/client/network/src/event.rs index e8ec1eee2545..5400d11cb6ac 100644 --- a/substrate/client/network/src/event.rs +++ b/substrate/client/network/src/event.rs @@ -22,13 +22,13 @@ use crate::types::ProtocolName; use bytes::Bytes; - -use sc_network_common::role::ObservedRole; -use sc_network_types::{ - kad::{Key, PeerRecord}, +use libp2p::{ + kad::{record::Key, PeerRecord}, PeerId, }; +use sc_network_common::role::ObservedRole; + /// Events generated by DHT as a response to get_value and put_value requests. #[derive(Debug, Clone)] #[must_use] @@ -45,17 +45,8 @@ pub enum DhtEvent { /// An error has occurred while putting a record into the DHT. ValuePutFailed(Key), - /// An error occured while registering as a content provider on the DHT. - StartProvidingFailed(Key), - /// The DHT received a put record request. PutRecordRequest(Key, Vec, Option, Option), - - /// The providers for [`Key`] were found. - ProvidersFound(Key, Vec), - - /// The providers for [`Key`] were not found. - ProvidersNotFound(Key), } /// Type for events generated by networking layer. diff --git a/substrate/client/network/src/litep2p/discovery.rs b/substrate/client/network/src/litep2p/discovery.rs index 2bea2e5a80dc..9043f9420e8d 100644 --- a/substrate/client/network/src/litep2p/discovery.rs +++ b/substrate/client/network/src/litep2p/discovery.rs @@ -27,12 +27,13 @@ use array_bytes::bytes2hex; use futures::{FutureExt, Stream}; use futures_timer::Delay; use ip_network::IpNetwork; +use libp2p::kad::record::Key as KademliaKey; use litep2p::{ protocol::{ libp2p::{ identify::{Config as IdentifyConfig, IdentifyEvent}, kademlia::{ - Config as KademliaConfig, ConfigBuilder as KademliaConfigBuilder, ContentProvider, + Config as KademliaConfig, ConfigBuilder as KademliaConfigBuilder, IncomingRecordValidationMode, KademliaEvent, KademliaHandle, QueryId, Quorum, Record, RecordKey, RecordsType, }, @@ -44,7 +45,6 @@ use litep2p::{ PeerId, ProtocolName, }; use parking_lot::RwLock; -use sc_network_types::kad::Key as KademliaKey; use schnellru::{ByLength, LruMap}; use std::{ @@ -144,14 +144,6 @@ pub enum DiscoveryEvent { query_id: QueryId, }, - /// Providers were successfully retrieved. - GetProvidersSuccess { - /// Query ID. - query_id: QueryId, - /// Found providers sorted by distance to provided key. - providers: Vec, - }, - /// Query failed. QueryFailed { /// Query ID. @@ -415,21 +407,6 @@ impl Discovery { .await; } - /// Start providing `key`. - pub async fn start_providing(&mut self, key: KademliaKey) { - self.kademlia_handle.start_providing(key.into()).await; - } - - /// Stop providing `key`. - pub async fn stop_providing(&mut self, key: KademliaKey) { - self.kademlia_handle.stop_providing(key.into()).await; - } - - /// Get providers for `key`. - pub async fn get_providers(&mut self, key: KademliaKey) -> QueryId { - self.kademlia_handle.get_providers(key.into()).await - } - /// Check if the observed address is a known address. fn is_known_address(known: &Multiaddr, observed: &Multiaddr) -> bool { let mut known = known.iter(); @@ -604,22 +581,8 @@ impl Stream for Discovery { return Poll::Ready(Some(DiscoveryEvent::IncomingRecord { record })) }, - Poll::Ready(Some(KademliaEvent::GetProvidersSuccess { - provided_key, - providers, - query_id, - })) => { - log::trace!( - target: LOG_TARGET, - "`GET_PROVIDERS` for {query_id:?} with {provided_key:?} yielded {providers:?}", - ); - - return Poll::Ready(Some(DiscoveryEvent::GetProvidersSuccess { - query_id, - providers, - })) - }, - // We do not validate incoming providers. + // Content provider events are ignored for now. + Poll::Ready(Some(KademliaEvent::GetProvidersSuccess { .. })) | Poll::Ready(Some(KademliaEvent::IncomingProvider { .. })) => {}, } diff --git a/substrate/client/network/src/litep2p/mod.rs b/substrate/client/network/src/litep2p/mod.rs index 52b2970525df..87b992423674 100644 --- a/substrate/client/network/src/litep2p/mod.rs +++ b/substrate/client/network/src/litep2p/mod.rs @@ -50,6 +50,7 @@ use crate::{ use codec::Encode; use futures::StreamExt; +use libp2p::kad::{PeerRecord, Record as P2PRecord, RecordKey}; use litep2p::{ config::ConfigBuilder, crypto::ed25519::Keypair, @@ -73,7 +74,6 @@ use litep2p::{ Litep2p, Litep2pEvent, ProtocolName as Litep2pProtocolName, }; use prometheus_endpoint::Registry; -use sc_network_types::kad::{Key as RecordKey, PeerRecord, Record as P2PRecord}; use sc_client_api::BlockBackend; use sc_network_common::{role::Roles, ExHashT}; @@ -143,17 +143,6 @@ struct ConnectionContext { num_connections: usize, } -/// Kademlia query we are tracking. -#[derive(Debug)] -enum KadQuery { - /// `GET_VALUE` query for key and when it was initiated. - GetValue(RecordKey, Instant), - /// `PUT_VALUE` query for key and when it was initiated. - PutValue(RecordKey, Instant), - /// `GET_PROVIDERS` query for key and when it was initiated. - GetProviders(RecordKey, Instant), -} - /// Networking backend for `litep2p`. pub struct Litep2pNetworkBackend { /// Main `litep2p` object. @@ -168,8 +157,11 @@ pub struct Litep2pNetworkBackend { /// `Peerset` handles to notification protocols. peerset_handles: HashMap, - /// Pending Kademlia queries. - pending_queries: HashMap, + /// Pending `GET_VALUE` queries. + pending_get_values: HashMap, + + /// Pending `PUT_VALUE` queries. + pending_put_values: HashMap, /// Discovery. discovery: Discovery, @@ -623,7 +615,8 @@ impl NetworkBackend for Litep2pNetworkBac peerset_handles: notif_protocols, num_connected, discovery, - pending_queries: HashMap::new(), + pending_put_values: HashMap::new(), + pending_get_values: HashMap::new(), peerstore_handle: peer_store_handle, block_announce_protocol, event_streams: out_events::OutChannels::new(None)?, @@ -711,30 +704,21 @@ impl NetworkBackend for Litep2pNetworkBac Some(command) => match command { NetworkServiceCommand::GetValue{ key } => { let query_id = self.discovery.get_value(key.clone()).await; - self.pending_queries.insert(query_id, KadQuery::GetValue(key, Instant::now())); + self.pending_get_values.insert(query_id, (key, Instant::now())); } NetworkServiceCommand::PutValue { key, value } => { let query_id = self.discovery.put_value(key.clone(), value).await; - self.pending_queries.insert(query_id, KadQuery::PutValue(key, Instant::now())); + self.pending_put_values.insert(query_id, (key, Instant::now())); } NetworkServiceCommand::PutValueTo { record, peers, update_local_storage} => { - let kademlia_key = record.key.clone(); - let query_id = self.discovery.put_value_to_peers(record.into(), peers, update_local_storage).await; - self.pending_queries.insert(query_id, KadQuery::PutValue(kademlia_key, Instant::now())); + let kademlia_key = record.key.to_vec().into(); + let query_id = self.discovery.put_value_to_peers(record, peers, update_local_storage).await; + self.pending_put_values.insert(query_id, (kademlia_key, Instant::now())); } + NetworkServiceCommand::StoreRecord { key, value, publisher, expires } => { self.discovery.store_record(key, value, publisher.map(Into::into), expires).await; } - NetworkServiceCommand::StartProviding { key } => { - self.discovery.start_providing(key).await; - } - NetworkServiceCommand::StopProviding { key } => { - self.discovery.stop_providing(key).await; - } - NetworkServiceCommand::GetProviders { key } => { - let query_id = self.discovery.get_providers(key.clone()).await; - self.pending_queries.insert(query_id, KadQuery::GetProviders(key, Instant::now())); - } NetworkServiceCommand::EventStream { tx } => { self.event_streams.push(tx); } @@ -769,7 +753,7 @@ impl NetworkBackend for Litep2pNetworkBac } if self.litep2p.add_known_address(peer.into(), iter::once(address.clone())) == 0usize { - log::debug!( + log::warn!( target: LOG_TARGET, "couldn't add known address ({address}) for {peer:?}, unsupported transport" ); @@ -837,8 +821,12 @@ impl NetworkBackend for Litep2pNetworkBac } } Some(DiscoveryEvent::GetRecordSuccess { query_id, records }) => { - match self.pending_queries.remove(&query_id) { - Some(KadQuery::GetValue(key, started)) => { + match self.pending_get_values.remove(&query_id) { + None => log::warn!( + target: LOG_TARGET, + "`GET_VALUE` succeeded for a non-existent query", + ), + Some((key, started)) => { log::trace!( target: LOG_TARGET, "`GET_VALUE` for {:?} ({query_id:?}) succeeded", @@ -848,7 +836,7 @@ impl NetworkBackend for Litep2pNetworkBac self.event_streams.send( Event::Dht( DhtEvent::ValueFound( - record.into() + record ) ) ); @@ -860,26 +848,23 @@ impl NetworkBackend for Litep2pNetworkBac .with_label_values(&["value-get"]) .observe(started.elapsed().as_secs_f64()); } - }, - query => { - log::error!( - target: LOG_TARGET, - "Missing/invalid pending query for `GET_VALUE`: {query:?}" - ); - debug_assert!(false); - }, + } } } Some(DiscoveryEvent::PutRecordSuccess { query_id }) => { - match self.pending_queries.remove(&query_id) { - Some(KadQuery::PutValue(key, started)) => { + match self.pending_put_values.remove(&query_id) { + None => log::warn!( + target: LOG_TARGET, + "`PUT_VALUE` succeeded for a non-existent query", + ), + Some((key, started)) => { log::trace!( target: LOG_TARGET, "`PUT_VALUE` for {key:?} ({query_id:?}) succeeded", ); self.event_streams.send(Event::Dht( - DhtEvent::ValuePut(key) + DhtEvent::ValuePut(libp2p::kad::RecordKey::new(&key)) )); if let Some(ref metrics) = self.metrics { @@ -888,57 +873,42 @@ impl NetworkBackend for Litep2pNetworkBac .with_label_values(&["value-put"]) .observe(started.elapsed().as_secs_f64()); } - }, - query => { - log::error!( - target: LOG_TARGET, - "Missing/invalid pending query for `PUT_VALUE`: {query:?}" - ); - debug_assert!(false); } } } - Some(DiscoveryEvent::GetProvidersSuccess { query_id, providers }) => { - match self.pending_queries.remove(&query_id) { - Some(KadQuery::GetProviders(key, started)) => { - log::trace!( + Some(DiscoveryEvent::QueryFailed { query_id }) => { + match self.pending_get_values.remove(&query_id) { + None => match self.pending_put_values.remove(&query_id) { + None => log::warn!( target: LOG_TARGET, - "`GET_PROVIDERS` for {key:?} ({query_id:?}) succeeded", - ); + "non-existent query failed ({query_id:?})", + ), + Some((key, started)) => { + log::debug!( + target: LOG_TARGET, + "`PUT_VALUE` ({query_id:?}) failed for key {key:?}", + ); - self.event_streams.send(Event::Dht( - DhtEvent::ProvidersFound( - key.into(), - providers.into_iter().map(|p| p.peer.into()).collect() - ) - )); + self.event_streams.send(Event::Dht( + DhtEvent::ValuePutFailed(libp2p::kad::RecordKey::new(&key)) + )); - if let Some(ref metrics) = self.metrics { - metrics - .kademlia_query_duration - .with_label_values(&["providers-get"]) - .observe(started.elapsed().as_secs_f64()); + if let Some(ref metrics) = self.metrics { + metrics + .kademlia_query_duration + .with_label_values(&["value-put-failed"]) + .observe(started.elapsed().as_secs_f64()); + } } - }, - query => { - log::error!( - target: LOG_TARGET, - "Missing/invalid pending query for `GET_PROVIDERS`: {query:?}" - ); - debug_assert!(false); } - } - } - Some(DiscoveryEvent::QueryFailed { query_id }) => { - match self.pending_queries.remove(&query_id) { - Some(KadQuery::GetValue(key, started)) => { + Some((key, started)) => { log::debug!( target: LOG_TARGET, "`GET_VALUE` ({query_id:?}) failed for key {key:?}", ); self.event_streams.send(Event::Dht( - DhtEvent::ValueNotFound(key) + DhtEvent::ValueNotFound(libp2p::kad::RecordKey::new(&key)) )); if let Some(ref metrics) = self.metrics { @@ -947,46 +917,6 @@ impl NetworkBackend for Litep2pNetworkBac .with_label_values(&["value-get-failed"]) .observe(started.elapsed().as_secs_f64()); } - }, - Some(KadQuery::PutValue(key, started)) => { - log::debug!( - target: LOG_TARGET, - "`PUT_VALUE` ({query_id:?}) failed for key {key:?}", - ); - - self.event_streams.send(Event::Dht( - DhtEvent::ValuePutFailed(key) - )); - - if let Some(ref metrics) = self.metrics { - metrics - .kademlia_query_duration - .with_label_values(&["value-put-failed"]) - .observe(started.elapsed().as_secs_f64()); - } - }, - Some(KadQuery::GetProviders(key, started)) => { - log::debug!( - target: LOG_TARGET, - "`GET_PROVIDERS` ({query_id:?}) failed for key {key:?}" - ); - - self.event_streams.send(Event::Dht( - DhtEvent::ProvidersNotFound(key) - )); - - if let Some(ref metrics) = self.metrics { - metrics - .kademlia_query_duration - .with_label_values(&["providers-get-failed"]) - .observe(started.elapsed().as_secs_f64()); - } - }, - None => { - log::warn!( - target: LOG_TARGET, - "non-existent query failed ({query_id:?})", - ); } } } @@ -1034,7 +964,7 @@ impl NetworkBackend for Litep2pNetworkBac Some(DiscoveryEvent::IncomingRecord { record: Record { key, value, publisher, expires }} ) => { self.event_streams.send(Event::Dht( DhtEvent::PutRecordRequest( - key.into(), + libp2p::kad::RecordKey::new(&key), value, publisher.map(Into::into), expires, @@ -1056,15 +986,7 @@ impl NetworkBackend for Litep2pNetworkBac let direction = match endpoint { Endpoint::Dialer { .. } => "out", - Endpoint::Listener { .. } => { - // Increment incoming connections counter. - // - // Note: For litep2p these are represented by established negotiated connections, - // while for libp2p (legacy) these represent not-yet-negotiated connections. - metrics.incoming_connections_total.inc(); - - "in" - }, + Endpoint::Listener { .. } => "in", }; metrics.connections_opened_total.with_label_values(&[direction]).inc(); @@ -1136,7 +1058,6 @@ impl NetworkBackend for Litep2pNetworkBac NegotiationError::ParseError(_) => "parse-error", NegotiationError::IoError(_) => "io-error", NegotiationError::WebSocket(_) => "webscoket-error", - NegotiationError::BadSignature => "bad-signature", } }; @@ -1153,13 +1074,7 @@ impl NetworkBackend for Litep2pNetworkBac metrics.pending_connections_errors_total.with_label_values(&["transport-errors"]).inc(); } } - None => { - log::error!( - target: LOG_TARGET, - "Litep2p backend terminated" - ); - return - } + _ => {} }, } } diff --git a/substrate/client/network/src/litep2p/service.rs b/substrate/client/network/src/litep2p/service.rs index d270e90efdf5..693217f5ad94 100644 --- a/substrate/client/network/src/litep2p/service.rs +++ b/substrate/client/network/src/litep2p/service.rs @@ -32,15 +32,15 @@ use crate::{ RequestFailure, Signature, }; +use crate::litep2p::Record; use codec::DecodeAll; use futures::{channel::oneshot, stream::BoxStream}; -use libp2p::identity::SigningError; +use libp2p::{identity::SigningError, kad::record::Key as KademliaKey}; use litep2p::{ addresses::PublicAddresses, crypto::ed25519::Keypair, types::multiaddr::Multiaddr as LiteP2pMultiaddr, }; use parking_lot::RwLock; -use sc_network_types::kad::{Key as KademliaKey, Record}; use sc_network_common::{ role::{ObservedRole, Roles}, @@ -104,15 +104,6 @@ pub enum NetworkServiceCommand { expires: Option, }, - /// Start providing `key`. - StartProviding { key: KademliaKey }, - - /// Stop providing `key`. - StopProviding { key: KademliaKey }, - - /// Get providers for `key`. - GetProviders { key: KademliaKey }, - /// Query network status. Status { /// `oneshot::Sender` for sending the status. @@ -275,7 +266,12 @@ impl NetworkDHTProvider for Litep2pNetworkService { let _ = self.cmd_tx.unbounded_send(NetworkServiceCommand::PutValue { key, value }); } - fn put_record_to(&self, record: Record, peers: HashSet, update_local_storage: bool) { + fn put_record_to( + &self, + record: libp2p::kad::Record, + peers: HashSet, + update_local_storage: bool, + ) { let _ = self.cmd_tx.unbounded_send(NetworkServiceCommand::PutValueTo { record: Record { key: record.key.to_vec().into(), @@ -305,18 +301,6 @@ impl NetworkDHTProvider for Litep2pNetworkService { expires, }); } - - fn start_providing(&self, key: KademliaKey) { - let _ = self.cmd_tx.unbounded_send(NetworkServiceCommand::StartProviding { key }); - } - - fn stop_providing(&self, key: KademliaKey) { - let _ = self.cmd_tx.unbounded_send(NetworkServiceCommand::StopProviding { key }); - } - - fn get_providers(&self, key: KademliaKey) { - let _ = self.cmd_tx.unbounded_send(NetworkServiceCommand::GetProviders { key }); - } } #[async_trait::async_trait] diff --git a/substrate/client/network/src/litep2p/shim/request_response/mod.rs b/substrate/client/network/src/litep2p/shim/request_response/mod.rs index 146f2e4add97..bfd7a60ef9fe 100644 --- a/substrate/client/network/src/litep2p/shim/request_response/mod.rs +++ b/substrate/client/network/src/litep2p/shim/request_response/mod.rs @@ -320,7 +320,7 @@ impl RequestResponseProtocol { &mut self, peer: litep2p::PeerId, request_id: RequestId, - _fallback: Option, + fallback: Option, response: Vec, ) { match self.pending_inbound_responses.remove(&request_id) { @@ -337,7 +337,10 @@ impl RequestResponseProtocol { response.len(), ); - let _ = tx.send(Ok((response, self.protocol.clone()))); + let _ = tx.send(Ok(( + response, + fallback.map_or_else(|| self.protocol.clone(), Into::into), + ))); self.metrics.register_outbound_request_success(started.elapsed()); }, } diff --git a/substrate/client/network/src/network_state.rs b/substrate/client/network/src/network_state.rs index 65fd494739ee..cf8b8b55a7ff 100644 --- a/substrate/client/network/src/network_state.rs +++ b/substrate/client/network/src/network_state.rs @@ -106,7 +106,7 @@ pub enum Endpoint { impl From for PeerEndpoint { fn from(endpoint: ConnectedPoint) -> Self { match endpoint { - ConnectedPoint::Dialer { address, role_override, port_use: _ } => + ConnectedPoint::Dialer { address, role_override } => Self::Dialing(address, role_override.into()), ConnectedPoint::Listener { local_addr, send_back_addr } => Self::Listening { local_addr, send_back_addr }, diff --git a/substrate/client/network/src/peer_info.rs b/substrate/client/network/src/peer_info.rs index a673f06fd622..21eeea6bcc0c 100644 --- a/substrate/client/network/src/peer_info.rs +++ b/substrate/client/network/src/peer_info.rs @@ -25,7 +25,7 @@ use either::Either; use fnv::FnvHashMap; use futures::prelude::*; use libp2p::{ - core::{transport::PortUse, ConnectedPoint, Endpoint}, + core::{ConnectedPoint, Endpoint}, identify::{ Behaviour as Identify, Config as IdentifyConfig, Event as IdentifyEvent, Info as IdentifyInfo, @@ -38,8 +38,8 @@ use libp2p::{ ExternalAddrConfirmed, FromSwarm, ListenFailure, }, ConnectionDenied, ConnectionHandler, ConnectionHandlerSelect, ConnectionId, - NetworkBehaviour, NewExternalAddrCandidate, THandler, THandlerInEvent, THandlerOutEvent, - ToSwarm, + NetworkBehaviour, NewExternalAddrCandidate, PollParameters, THandler, THandlerInEvent, + THandlerOutEvent, ToSwarm, }, Multiaddr, PeerId, }; @@ -275,26 +275,23 @@ impl NetworkBehaviour for PeerInfoBehaviour { peer: PeerId, addr: &Multiaddr, role_override: Endpoint, - port_use: PortUse, ) -> Result, ConnectionDenied> { let ping_handler = self.ping.handle_established_outbound_connection( connection_id, peer, addr, role_override, - port_use, )?; let identify_handler = self.identify.handle_established_outbound_connection( connection_id, peer, addr, role_override, - port_use, )?; Ok(ping_handler.select(identify_handler)) } - fn on_swarm_event(&mut self, event: FromSwarm) { + fn on_swarm_event(&mut self, event: FromSwarm) { match event { FromSwarm::ConnectionEstablished( e @ ConnectionEstablished { peer_id, endpoint, .. }, @@ -322,21 +319,22 @@ impl NetworkBehaviour for PeerInfoBehaviour { peer_id, connection_id, endpoint, - cause, + handler, remaining_established, }) => { + let (ping_handler, identity_handler) = handler.into_inner(); self.ping.on_swarm_event(FromSwarm::ConnectionClosed(ConnectionClosed { peer_id, connection_id, endpoint, - cause, + handler: ping_handler, remaining_established, })); self.identify.on_swarm_event(FromSwarm::ConnectionClosed(ConnectionClosed { peer_id, connection_id, endpoint, - cause, + handler: identity_handler, remaining_established, })); @@ -371,21 +369,18 @@ impl NetworkBehaviour for PeerInfoBehaviour { send_back_addr, error, connection_id, - peer_id, }) => { self.ping.on_swarm_event(FromSwarm::ListenFailure(ListenFailure { local_addr, send_back_addr, error, connection_id, - peer_id, })); self.identify.on_swarm_event(FromSwarm::ListenFailure(ListenFailure { local_addr, send_back_addr, error, connection_id, - peer_id, })); }, FromSwarm::ListenerError(e) => { @@ -443,11 +438,6 @@ impl NetworkBehaviour for PeerInfoBehaviour { self.ping.on_swarm_event(FromSwarm::NewListenAddr(e)); self.identify.on_swarm_event(FromSwarm::NewListenAddr(e)); }, - event => { - debug!(target: "sub-libp2p", "New unknown `FromSwarm` libp2p event: {event:?}"); - self.ping.on_swarm_event(event); - self.identify.on_swarm_event(event); - }, } } @@ -465,29 +455,47 @@ impl NetworkBehaviour for PeerInfoBehaviour { } } - fn poll(&mut self, cx: &mut Context) -> Poll>> { + fn poll( + &mut self, + cx: &mut Context, + params: &mut impl PollParameters, + ) -> Poll>> { if let Some(event) = self.pending_actions.pop_front() { return Poll::Ready(event) } loop { - match self.ping.poll(cx) { + match self.ping.poll(cx, params) { Poll::Pending => break, Poll::Ready(ToSwarm::GenerateEvent(ev)) => { if let PingEvent { peer, result: Ok(rtt), connection } = ev { self.handle_ping_report(&peer, rtt, connection) } }, - Poll::Ready(event) => { - return Poll::Ready(event.map_in(Either::Left).map_out(|_| { - unreachable!("`GenerateEvent` is handled in a branch above; qed") - })); - }, + Poll::Ready(ToSwarm::Dial { opts }) => return Poll::Ready(ToSwarm::Dial { opts }), + Poll::Ready(ToSwarm::NotifyHandler { peer_id, handler, event }) => + return Poll::Ready(ToSwarm::NotifyHandler { + peer_id, + handler, + event: Either::Left(event), + }), + Poll::Ready(ToSwarm::CloseConnection { peer_id, connection }) => + return Poll::Ready(ToSwarm::CloseConnection { peer_id, connection }), + Poll::Ready(ToSwarm::NewExternalAddrCandidate(observed)) => + return Poll::Ready(ToSwarm::NewExternalAddrCandidate(observed)), + Poll::Ready(ToSwarm::ExternalAddrConfirmed(addr)) => + return Poll::Ready(ToSwarm::ExternalAddrConfirmed(addr)), + Poll::Ready(ToSwarm::ExternalAddrExpired(addr)) => + return Poll::Ready(ToSwarm::ExternalAddrExpired(addr)), + Poll::Ready(ToSwarm::ListenOn { opts }) => + return Poll::Ready(ToSwarm::ListenOn { opts }), + Poll::Ready(ToSwarm::RemoveListener { id }) => + return Poll::Ready(ToSwarm::RemoveListener { id }), } } loop { - match self.identify.poll(cx) { + match self.identify.poll(cx, params) { Poll::Pending => break, Poll::Ready(ToSwarm::GenerateEvent(event)) => match event { IdentifyEvent::Received { peer_id, info, .. } => { @@ -495,20 +503,31 @@ impl NetworkBehaviour for PeerInfoBehaviour { let event = PeerInfoEvent::Identified { peer_id, info }; return Poll::Ready(ToSwarm::GenerateEvent(event)) }, - IdentifyEvent::Error { connection_id, peer_id, error } => { - debug!( - target: "sub-libp2p", - "Identification with peer {peer_id:?}({connection_id}) failed => {error}" - ); + IdentifyEvent::Error { peer_id, error } => { + debug!(target: "sub-libp2p", "Identification with peer {:?} failed => {}", peer_id, error) }, IdentifyEvent::Pushed { .. } => {}, IdentifyEvent::Sent { .. } => {}, }, - Poll::Ready(event) => { - return Poll::Ready(event.map_in(Either::Right).map_out(|_| { - unreachable!("`GenerateEvent` is handled in a branch above; qed") - })); - }, + Poll::Ready(ToSwarm::Dial { opts }) => return Poll::Ready(ToSwarm::Dial { opts }), + Poll::Ready(ToSwarm::NotifyHandler { peer_id, handler, event }) => + return Poll::Ready(ToSwarm::NotifyHandler { + peer_id, + handler, + event: Either::Right(event), + }), + Poll::Ready(ToSwarm::CloseConnection { peer_id, connection }) => + return Poll::Ready(ToSwarm::CloseConnection { peer_id, connection }), + Poll::Ready(ToSwarm::NewExternalAddrCandidate(observed)) => + return Poll::Ready(ToSwarm::NewExternalAddrCandidate(observed)), + Poll::Ready(ToSwarm::ExternalAddrConfirmed(addr)) => + return Poll::Ready(ToSwarm::ExternalAddrConfirmed(addr)), + Poll::Ready(ToSwarm::ExternalAddrExpired(addr)) => + return Poll::Ready(ToSwarm::ExternalAddrExpired(addr)), + Poll::Ready(ToSwarm::ListenOn { opts }) => + return Poll::Ready(ToSwarm::ListenOn { opts }), + Poll::Ready(ToSwarm::RemoveListener { id }) => + return Poll::Ready(ToSwarm::RemoveListener { id }), } } diff --git a/substrate/client/network/src/protocol.rs b/substrate/client/network/src/protocol.rs index 81e1848adefa..402baa7bb2a4 100644 --- a/substrate/client/network/src/protocol.rs +++ b/substrate/client/network/src/protocol.rs @@ -27,14 +27,14 @@ use crate::{ use codec::Encode; use libp2p::{ - core::{transport::PortUse, Endpoint}, + core::Endpoint, swarm::{ - behaviour::FromSwarm, ConnectionDenied, ConnectionId, NetworkBehaviour, THandler, - THandlerInEvent, THandlerOutEvent, ToSwarm, + behaviour::FromSwarm, ConnectionDenied, ConnectionId, NetworkBehaviour, PollParameters, + THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, }, Multiaddr, PeerId, }; -use log::{debug, warn}; +use log::warn; use codec::DecodeAll; use sc_network_common::role::Roles; @@ -47,15 +47,14 @@ use notifications::{Notifications, NotificationsOut}; pub(crate) use notifications::ProtocolHandle; -pub use notifications::{notification_service, NotificationsSink, ProtocolHandlePair, Ready}; +pub use notifications::{ + notification_service, NotificationsSink, NotifsHandlerError, ProtocolHandlePair, Ready, +}; mod notifications; pub mod message; -// Log target for this file. -const LOG_TARGET: &str = "sub-libp2p"; - /// Maximum size used for notifications in the block announce and transaction protocols. // Must be equal to `max(MAX_BLOCK_ANNOUNCE_SIZE, MAX_TRANSACTIONS_SIZE)`. pub(crate) const BLOCK_ANNOUNCES_TRANSACTIONS_SUBSTREAM_SIZE: u64 = MAX_RESPONSE_SIZE; @@ -127,10 +126,6 @@ impl Protocol { handle.set_metrics(notification_metrics.clone()); }); - protocol_configs.iter().enumerate().for_each(|(i, (p, _, _))| { - debug!(target: LOG_TARGET, "Notifications protocol {:?}: {}", SetId::from(i), p.name); - }); - ( Notifications::new( protocol_controller_handles, @@ -171,7 +166,7 @@ impl Protocol { { self.behaviour.disconnect_peer(peer_id, SetId::from(position)); } else { - warn!(target: LOG_TARGET, "disconnect_peer() with invalid protocol name") + warn!(target: "sub-libp2p", "disconnect_peer() with invalid protocol name") } } @@ -255,14 +250,12 @@ impl NetworkBehaviour for Protocol { peer: PeerId, addr: &Multiaddr, role_override: Endpoint, - port_use: PortUse, ) -> Result, ConnectionDenied> { self.behaviour.handle_established_outbound_connection( connection_id, peer, addr, role_override, - port_use, ) } @@ -278,7 +271,7 @@ impl NetworkBehaviour for Protocol { Ok(Vec::new()) } - fn on_swarm_event(&mut self, event: FromSwarm) { + fn on_swarm_event(&mut self, event: FromSwarm) { self.behaviour.on_swarm_event(event); } @@ -294,15 +287,26 @@ impl NetworkBehaviour for Protocol { fn poll( &mut self, cx: &mut std::task::Context, + params: &mut impl PollParameters, ) -> Poll>> { - let event = match self.behaviour.poll(cx) { + let event = match self.behaviour.poll(cx, params) { Poll::Pending => return Poll::Pending, Poll::Ready(ToSwarm::GenerateEvent(ev)) => ev, - Poll::Ready(event) => { - return Poll::Ready(event.map_out(|_| { - unreachable!("`GenerateEvent` is handled in a branch above; qed") - })); - }, + Poll::Ready(ToSwarm::Dial { opts }) => return Poll::Ready(ToSwarm::Dial { opts }), + Poll::Ready(ToSwarm::NotifyHandler { peer_id, handler, event }) => + return Poll::Ready(ToSwarm::NotifyHandler { peer_id, handler, event }), + Poll::Ready(ToSwarm::CloseConnection { peer_id, connection }) => + return Poll::Ready(ToSwarm::CloseConnection { peer_id, connection }), + Poll::Ready(ToSwarm::NewExternalAddrCandidate(observed)) => + return Poll::Ready(ToSwarm::NewExternalAddrCandidate(observed)), + Poll::Ready(ToSwarm::ExternalAddrConfirmed(addr)) => + return Poll::Ready(ToSwarm::ExternalAddrConfirmed(addr)), + Poll::Ready(ToSwarm::ExternalAddrExpired(addr)) => + return Poll::Ready(ToSwarm::ExternalAddrExpired(addr)), + Poll::Ready(ToSwarm::ListenOn { opts }) => + return Poll::Ready(ToSwarm::ListenOn { opts }), + Poll::Ready(ToSwarm::RemoveListener { id }) => + return Poll::Ready(ToSwarm::RemoveListener { id }), }; let outcome = match event { diff --git a/substrate/client/network/src/protocol/notifications.rs b/substrate/client/network/src/protocol/notifications.rs index 2691496234ad..10fa329097d1 100644 --- a/substrate/client/network/src/protocol/notifications.rs +++ b/substrate/client/network/src/protocol/notifications.rs @@ -21,7 +21,7 @@ pub use self::{ behaviour::{Notifications, NotificationsOut, ProtocolConfig}, - handler::{NotificationsSink, Ready}, + handler::{NotificationsSink, NotifsHandlerError, Ready}, service::{notification_service, ProtocolHandlePair}, }; diff --git a/substrate/client/network/src/protocol/notifications/behaviour.rs b/substrate/client/network/src/protocol/notifications/behaviour.rs index e6909fcdefea..a562546145c8 100644 --- a/substrate/client/network/src/protocol/notifications/behaviour.rs +++ b/substrate/client/network/src/protocol/notifications/behaviour.rs @@ -33,11 +33,11 @@ use bytes::BytesMut; use fnv::FnvHashMap; use futures::{future::BoxFuture, prelude::*, stream::FuturesUnordered}; use libp2p::{ - core::{transport::PortUse, Endpoint, Multiaddr}, + core::{Endpoint, Multiaddr}, swarm::{ behaviour::{ConnectionClosed, ConnectionEstablished, DialFailure, FromSwarm}, - ConnectionDenied, ConnectionId, DialError, NetworkBehaviour, NotifyHandler, THandler, - THandlerInEvent, THandlerOutEvent, ToSwarm, + ConnectionDenied, ConnectionId, DialError, NetworkBehaviour, NotifyHandler, PollParameters, + THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, }, PeerId, }; @@ -49,7 +49,6 @@ use smallvec::SmallVec; use tokio::sync::oneshot::error::RecvError; use tokio_stream::StreamMap; -use libp2p::swarm::CloseConnection; use std::{ cmp, collections::{hash_map::Entry, VecDeque}, @@ -1234,12 +1233,11 @@ impl NetworkBehaviour for Notifications { peer: PeerId, _addr: &Multiaddr, _role_override: Endpoint, - _port_use: PortUse, ) -> Result, ConnectionDenied> { Ok(NotifsHandler::new(peer, self.notif_protocols.clone(), Some(self.metrics.clone()))) } - fn on_swarm_event(&mut self, event: FromSwarm) { + fn on_swarm_event(&mut self, event: FromSwarm) { match event { FromSwarm::ConnectionEstablished(ConnectionEstablished { peer_id, @@ -1672,9 +1670,6 @@ impl NetworkBehaviour for Notifications { FromSwarm::ExternalAddrConfirmed(_) => {}, FromSwarm::AddressChange(_) => {}, FromSwarm::NewListenAddr(_) => {}, - event => { - warn!(target: "sub-libp2p", "New unknown `FromSwarm` libp2p event: {event:?}"); - }, } } @@ -2222,19 +2217,14 @@ impl NetworkBehaviour for Notifications { ); } }, - NotifsHandlerOut::Close { protocol_index } => { - let set_id = SetId::from(protocol_index); - - trace!(target: "sub-libp2p", "Handler({}, {:?}) => SyncNotificationsClogged({:?})", peer_id, connection_id, set_id); - self.events.push_back(ToSwarm::CloseConnection { - peer_id, - connection: CloseConnection::One(connection_id), - }); - }, } } - fn poll(&mut self, cx: &mut Context) -> Poll>> { + fn poll( + &mut self, + cx: &mut Context, + _params: &mut impl PollParameters, + ) -> Poll>> { if let Some(event) = self.events.pop_front() { return Poll::Ready(event) } @@ -2369,6 +2359,7 @@ impl NetworkBehaviour for Notifications { } #[cfg(test)] +#[allow(deprecated)] mod tests { use super::*; use crate::{ @@ -2395,6 +2386,17 @@ mod tests { } } + #[derive(Clone)] + struct MockPollParams {} + + impl PollParameters for MockPollParams { + type SupportedProtocolsIter = std::vec::IntoIter>; + + fn supported_protocols(&self) -> Self::SupportedProtocolsIter { + vec![].into_iter() + } + } + fn development_notifs( ) -> (Notifications, ProtocolController, Box) { @@ -2652,7 +2654,7 @@ mod tests { peer_id: peer, connection_id: conn, endpoint: &connected.clone(), - cause: None, + handler: NotifsHandler::new(peer, vec![], None), remaining_established: 0usize, }, )); @@ -2852,7 +2854,7 @@ mod tests { peer_id: peer, connection_id: conn, endpoint: &connected.clone(), - cause: None, + handler: NotifsHandler::new(peer, vec![], None), remaining_established: 0usize, }, )); @@ -3005,7 +3007,7 @@ mod tests { peer_id: peer, connection_id: conn, endpoint: &connected.clone(), - cause: None, + handler: NotifsHandler::new(peer, vec![], None), remaining_established: 0usize, }, )); @@ -3049,7 +3051,7 @@ mod tests { peer_id: peer, connection_id: conn, endpoint: &connected.clone(), - cause: None, + handler: NotifsHandler::new(peer, vec![], None), remaining_established: 0usize, }, )); @@ -3119,7 +3121,7 @@ mod tests { peer_id: peer, connection_id: conn, endpoint: &connected.clone(), - cause: None, + handler: NotifsHandler::new(peer, vec![], None), remaining_established: 0usize, }, )); @@ -3267,7 +3269,7 @@ mod tests { peer_id: peer, connection_id: conn1, endpoint: &connected.clone(), - cause: None, + handler: NotifsHandler::new(peer, vec![], None), remaining_established: 0usize, }, )); @@ -3393,7 +3395,7 @@ mod tests { peer_id: peer, connection_id: conn, endpoint: &connected.clone(), - cause: None, + handler: NotifsHandler::new(peer, vec![], None), remaining_established: 0usize, }, )); @@ -3467,7 +3469,7 @@ mod tests { peer_id: peer, connection_id: conn, endpoint: &connected.clone(), - cause: None, + handler: NotifsHandler::new(peer, vec![], None), remaining_established: 0usize, }, )); @@ -3530,7 +3532,7 @@ mod tests { peer_id: peer, connection_id: conn1, endpoint: &connected.clone(), - cause: None, + handler: NotifsHandler::new(peer, vec![], None), remaining_established: 0usize, }, )); @@ -3544,7 +3546,7 @@ mod tests { peer_id: peer, connection_id: conn2, endpoint: &connected.clone(), - cause: None, + handler: NotifsHandler::new(peer, vec![], None), remaining_established: 0usize, }, )); @@ -3598,7 +3600,7 @@ mod tests { peer_id: peer, connection_id: conn, endpoint: &connected.clone(), - cause: None, + handler: NotifsHandler::new(peer, vec![], None), remaining_established: 0usize, }, )); @@ -3656,7 +3658,7 @@ mod tests { peer_id: peer, connection_id: conn2, endpoint: &connected.clone(), - cause: None, + handler: NotifsHandler::new(peer, vec![], None), remaining_established: 0usize, }, )); @@ -3717,7 +3719,7 @@ mod tests { peer_id: peer, connection_id: conn1, endpoint: &connected.clone(), - cause: None, + handler: NotifsHandler::new(peer, vec![], None), remaining_established: 0usize, }, )); @@ -3786,7 +3788,7 @@ mod tests { peer_id: peer, connection_id: conn1, endpoint: &connected.clone(), - cause: None, + handler: NotifsHandler::new(peer, vec![], None), remaining_established: 0usize, }, )); @@ -3827,7 +3829,7 @@ mod tests { peer_id: peer, connection_id: conn, endpoint: &connected.clone(), - cause: None, + handler: NotifsHandler::new(peer, vec![], None), remaining_established: 0usize, }, )); @@ -3950,7 +3952,7 @@ mod tests { peer_id: peer, connection_id: conn, endpoint: &connected.clone(), - cause: None, + handler: NotifsHandler::new(peer, vec![], None), remaining_established: 0usize, }, )); @@ -3970,9 +3972,11 @@ mod tests { assert!(notif.peers.get(&(peer, set_id)).is_some()); if tokio::time::timeout(Duration::from_secs(5), async { + let mut params = MockPollParams {}; + loop { futures::future::poll_fn(|cx| { - let _ = notif.poll(cx); + let _ = notif.poll(cx, &mut params); Poll::Ready(()) }) .await; @@ -4076,9 +4080,11 @@ mod tests { // verify that the code continues to keep the peer disabled by resetting the timer // after the first one expired. if tokio::time::timeout(Duration::from_secs(5), async { + let mut params = MockPollParams {}; + loop { futures::future::poll_fn(|cx| { - let _ = notif.poll(cx); + let _ = notif.poll(cx, &mut params); Poll::Ready(()) }) .await; @@ -4256,7 +4262,7 @@ mod tests { peer_id: peer, connection_id: conn, endpoint: &connected.clone(), - cause: None, + handler: NotifsHandler::new(peer, vec![], None), remaining_established: 0usize, }, )); @@ -4497,7 +4503,7 @@ mod tests { peer_id: peer, connection_id: ConnectionId::new_unchecked(0), endpoint: &connected.clone(), - cause: None, + handler: NotifsHandler::new(peer, vec![], None), remaining_established: 0usize, }, )); @@ -4599,7 +4605,7 @@ mod tests { peer_id: peer, connection_id: ConnectionId::new_unchecked(0), endpoint: &connected.clone(), - cause: None, + handler: NotifsHandler::new(peer, vec![], None), remaining_established: 0usize, }, )); @@ -4681,7 +4687,7 @@ mod tests { peer_id: peer, connection_id: ConnectionId::new_unchecked(0), endpoint: &endpoint.clone(), - cause: None, + handler: NotifsHandler::new(peer, vec![], None), remaining_established: 0usize, }, )); @@ -4798,7 +4804,7 @@ mod tests { peer_id: peer, connection_id: ConnectionId::new_unchecked(1337), endpoint: &connected.clone(), - cause: None, + handler: NotifsHandler::new(peer, vec![], None), remaining_established: 0usize, }, )); @@ -4833,7 +4839,7 @@ mod tests { peer_id: peer, connection_id: ConnectionId::new_unchecked(1337), endpoint: &connected.clone(), - cause: None, + handler: NotifsHandler::new(peer, vec![], None), remaining_established: 0usize, }, )); @@ -4884,7 +4890,7 @@ mod tests { peer_id: peer, connection_id: ConnectionId::new_unchecked(1337), endpoint: &connected.clone(), - cause: None, + handler: NotifsHandler::new(peer, vec![], None), remaining_established: 0usize, }, )); @@ -4931,7 +4937,7 @@ mod tests { peer_id: peer, connection_id: conn, endpoint: &connected.clone(), - cause: None, + handler: NotifsHandler::new(peer, vec![], None), remaining_established: 0usize, }, )); @@ -4981,7 +4987,7 @@ mod tests { peer_id: peer, connection_id: ConnectionId::new_unchecked(1337), endpoint: &connected.clone(), - cause: None, + handler: NotifsHandler::new(peer, vec![], None), remaining_established: 0usize, }, )); @@ -5024,7 +5030,7 @@ mod tests { peer_id: peer, connection_id: conn, endpoint: &connected.clone(), - cause: None, + handler: NotifsHandler::new(peer, vec![], None), remaining_established: 0usize, }, )); @@ -5035,7 +5041,7 @@ mod tests { peer_id: peer, connection_id: conn, endpoint: &connected.clone(), - cause: None, + handler: NotifsHandler::new(peer, vec![], None), remaining_established: 0usize, }, )); diff --git a/substrate/client/network/src/protocol/notifications/handler.rs b/substrate/client/network/src/protocol/notifications/handler.rs index 332de9f19c41..bff60ba1125f 100644 --- a/substrate/client/network/src/protocol/notifications/handler.rs +++ b/substrate/client/network/src/protocol/notifications/handler.rs @@ -74,12 +74,12 @@ use futures::{ }; use libp2p::{ swarm::{ - handler::ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, Stream, + handler::ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, KeepAlive, Stream, SubstreamProtocol, }, PeerId, }; -use log::{error, warn}; +use log::error; use parking_lot::{Mutex, RwLock}; use std::{ collections::VecDeque, @@ -87,7 +87,7 @@ use std::{ pin::Pin, sync::Arc, task::{Context, Poll}, - time::Duration, + time::{Duration, Instant}, }; /// Number of pending notifications in asynchronous contexts. @@ -113,18 +113,16 @@ pub struct NotifsHandler { /// List of notification protocols, specified by the user at initialization. protocols: Vec, - /// Whether to keep connection alive - keep_alive: bool, - - /// Optional future that keeps connection alive for a certain amount of time. - // TODO: this should be safe to remove, see https://github.com/paritytech/polkadot-sdk/issues/6350 - keep_alive_timeout_future: Option + Send + 'static>>>, + /// When the connection with the remote has been successfully established. + when_connection_open: Instant, /// Remote we are connected to. peer_id: PeerId, /// Events to return in priority from `poll`. - events_queue: VecDeque>, + events_queue: VecDeque< + ConnectionHandlerEvent, + >, /// Metrics. metrics: Option>, @@ -151,12 +149,7 @@ impl NotifsHandler { }) .collect(), peer_id, - // Keep connection alive initially until below timeout expires - keep_alive: true, - // A grace period of `INITIAL_KEEPALIVE_TIME` must be given to leave time for the remote - // to express desire to open substreams. - // TODO: This is a hack and ideally should not be necessary - keep_alive_timeout_future: Some(Box::pin(tokio::time::sleep(INITIAL_KEEPALIVE_TIME))), + when_connection_open: Instant::now(), events_queue: VecDeque::with_capacity(16), metrics: metrics.map_or(None, |metrics| Some(Arc::new(metrics))), } @@ -334,12 +327,6 @@ pub enum NotifsHandlerOut { /// Message that has been received. message: BytesMut, }, - - /// Close connection - Close { - /// Index of the protocol in the list of protocols passed at initialization. - protocol_index: usize, - }, } /// Sink connected directly to the node background task. Allows sending notifications to the peer. @@ -478,9 +465,17 @@ impl<'a> Ready<'a> { } } +/// Error specific to the collection of protocols. +#[derive(Debug, thiserror::Error)] +pub enum NotifsHandlerError { + #[error("Channel of synchronous notifications is full.")] + SyncNotificationsClogged, +} + impl ConnectionHandler for NotifsHandler { type FromBehaviour = NotifsHandlerIn; type ToBehaviour = NotifsHandlerOut; + type Error = NotifsHandlerError; type InboundProtocol = UpgradeCollec; type OutboundProtocol = NotificationsOut; // Index within the `out_protocols`. @@ -621,9 +616,6 @@ impl ConnectionHandler for NotifsHandler { State::Open { .. } => debug_assert!(false), }, ConnectionEvent::ListenUpgradeError(_listen_upgrade_error) => {}, - event => { - warn!(target: "sub-libp2p", "New unknown `ConnectionEvent` libp2p event: {event:?}"); - }, } } @@ -719,36 +711,35 @@ impl ConnectionHandler for NotifsHandler { } } - fn connection_keep_alive(&self) -> bool { + fn connection_keep_alive(&self) -> KeepAlive { // `Yes` if any protocol has some activity. if self.protocols.iter().any(|p| !matches!(p.state, State::Closed { .. })) { - return true; + return KeepAlive::Yes } - self.keep_alive + // A grace period of `INITIAL_KEEPALIVE_TIME` must be given to leave time for the remote + // to express desire to open substreams. + #[allow(deprecated)] + KeepAlive::Until(self.when_connection_open + INITIAL_KEEPALIVE_TIME) } + #[allow(deprecated)] fn poll( &mut self, cx: &mut Context, ) -> Poll< - ConnectionHandlerEvent, + ConnectionHandlerEvent< + Self::OutboundProtocol, + Self::OutboundOpenInfo, + Self::ToBehaviour, + Self::Error, + >, > { - { - let maybe_keep_alive_timeout_future = &mut self.keep_alive_timeout_future; - if let Some(keep_alive_timeout_future) = maybe_keep_alive_timeout_future { - if keep_alive_timeout_future.poll_unpin(cx).is_ready() { - maybe_keep_alive_timeout_future.take(); - self.keep_alive = false; - } - } - } - if let Some(ev) = self.events_queue.pop_front() { return Poll::Ready(ev) } - // For each open substream, try to send messages from `notifications_sink_rx` to the + // For each open substream, try send messages from `notifications_sink_rx` to the // substream. for protocol_index in 0..self.protocols.len() { if let State::Open { @@ -759,10 +750,11 @@ impl ConnectionHandler for NotifsHandler { // Only proceed with `out_substream.poll_ready_unpin` if there is an element // available in `notifications_sink_rx`. This avoids waking up the task when // a substream is ready to send if there isn't actually something to send. + #[allow(deprecated)] match Pin::new(&mut *notifications_sink_rx).as_mut().poll_peek(cx) { Poll::Ready(Some(&NotificationsSinkMessage::ForceClose)) => - return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( - NotifsHandlerOut::Close { protocol_index }, + return Poll::Ready(ConnectionHandlerEvent::Close( + NotifsHandlerError::SyncNotificationsClogged, )), Poll::Ready(Some(&NotificationsSinkMessage::Notification { .. })) => {}, Poll::Ready(None) | Poll::Pending => break, @@ -983,17 +975,6 @@ pub mod tests { rx_buffer: BytesMut, } - /// Mirror of `ActiveStreamCounter` in `libp2p` - #[allow(dead_code)] - struct MockActiveStreamCounter(Arc<()>); - - // Mirror of `Stream` in `libp2p` - #[allow(dead_code)] - struct MockStream { - stream: Negotiated, - counter: Option, - } - impl MockSubstream { /// Create new substream pair. pub fn new() -> (Self, Self) { @@ -1023,11 +1004,16 @@ pub mod tests { /// Unsafe substitute for `Stream::new` private constructor. fn stream_new(stream: Negotiated) -> Stream { - let stream = MockStream { stream, counter: None }; // Static asserts to make sure this doesn't break. const _: () = { - assert!(core::mem::size_of::() == core::mem::size_of::()); - assert!(core::mem::align_of::() == core::mem::align_of::()); + assert!( + core::mem::size_of::() == + core::mem::size_of::>() + ); + assert!( + core::mem::align_of::() == + core::mem::align_of::>() + ); }; unsafe { core::mem::transmute(stream) } @@ -1098,16 +1084,24 @@ pub mod tests { /// Create new [`NotifsHandler`]. fn notifs_handler() -> NotifsHandler { - NotifsHandler::new( - PeerId::random(), - vec![ProtocolConfig { + let proto = Protocol { + config: ProtocolConfig { name: "/foo".into(), fallback_names: vec![], handshake: Arc::new(RwLock::new(b"hello, world".to_vec())), max_notification_size: u64::MAX, - }], - None, - ) + }, + in_upgrade: NotificationsIn::new("/foo", Vec::new(), u64::MAX), + state: State::Closed { pending_opening: false }, + }; + + NotifsHandler { + protocols: vec![proto], + when_connection_open: Instant::now(), + peer_id: PeerId::random(), + events_queue: VecDeque::new(), + metrics: None, + } } // verify that if another substream is attempted to be opened by remote while an inbound @@ -1614,11 +1608,12 @@ pub mod tests { notifications_sink.send_sync_notification(vec![1, 3, 3, 9]); notifications_sink.send_sync_notification(vec![1, 3, 4, 0]); + #[allow(deprecated)] futures::future::poll_fn(|cx| { assert!(std::matches!( handler.poll(cx), - Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( - NotifsHandlerOut::Close { .. } + Poll::Ready(ConnectionHandlerEvent::Close( + NotifsHandlerError::SyncNotificationsClogged, )) )); Poll::Ready(()) diff --git a/substrate/client/network/src/protocol/notifications/tests.rs b/substrate/client/network/src/protocol/notifications/tests.rs index 50f03b5911b6..a8eeb2bb1980 100644 --- a/substrate/client/network/src/protocol/notifications/tests.rs +++ b/substrate/client/network/src/protocol/notifications/tests.rs @@ -30,25 +30,30 @@ use crate::{ use futures::{future::BoxFuture, prelude::*}; use libp2p::{ - core::{ - transport::{MemoryTransport, PortUse}, - upgrade, Endpoint, - }, + core::{transport::MemoryTransport, upgrade, Endpoint}, identity, noise, swarm::{ - behaviour::FromSwarm, ConnectionDenied, ConnectionId, NetworkBehaviour, Swarm, SwarmEvent, - THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, + self, behaviour::FromSwarm, ConnectionDenied, ConnectionId, Executor, NetworkBehaviour, + PollParameters, Swarm, SwarmEvent, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, }, - yamux, Multiaddr, PeerId, SwarmBuilder, Transport, + yamux, Multiaddr, PeerId, Transport, }; use sc_utils::mpsc::tracing_unbounded; use std::{ iter, + pin::Pin, sync::Arc, task::{Context, Poll}, time::Duration, }; +struct TokioExecutor(tokio::runtime::Runtime); +impl Executor for TokioExecutor { + fn exec(&self, f: Pin + Send>>) { + let _ = self.0.spawn(f); + } +} + /// Builds two nodes that have each other as bootstrap nodes. /// This is to be used only for testing, and a panic will happen if something goes wrong. fn build_nodes() -> (Swarm, Swarm) { @@ -62,6 +67,13 @@ fn build_nodes() -> (Swarm, Swarm) { for index in 0..2 { let keypair = keypairs[index].clone(); + let transport = MemoryTransport::new() + .upgrade(upgrade::Version::V1) + .authenticate(noise::Config::new(&keypair).unwrap()) + .multiplex(yamux::Config::default()) + .timeout(Duration::from_secs(20)) + .boxed(); + let (protocol_handle_pair, mut notif_service) = crate::protocol::notifications::service::notification_service("/foo".into()); // The first swarm has the second peer ID present in the peerstore. @@ -90,8 +102,39 @@ fn build_nodes() -> (Swarm, Swarm) { ); let (notif_handle, command_stream) = protocol_handle_pair.split(); + let behaviour = CustomProtoWithAddr { + inner: Notifications::new( + vec![controller_handle], + from_controller, + NotificationMetrics::new(None), + iter::once(( + ProtocolConfig { + name: "/foo".into(), + fallback_names: Vec::new(), + handshake: Vec::new(), + max_notification_size: 1024 * 1024, + }, + notif_handle, + command_stream, + )), + ), + peer_store_future: peer_store.run().boxed(), + protocol_controller_future: controller.run().boxed(), + addrs: addrs + .iter() + .enumerate() + .filter_map(|(n, a)| { + if n != index { + Some((keypairs[n].public().to_peer_id(), a.clone())) + } else { + None + } + }) + .collect(), + }; - tokio::spawn(async move { + let runtime = tokio::runtime::Runtime::new().unwrap(); + runtime.spawn(async move { loop { if let NotificationEvent::ValidateInboundSubstream { result_tx, .. } = notif_service.next_event().await.unwrap() @@ -101,49 +144,12 @@ fn build_nodes() -> (Swarm, Swarm) { } }); - let mut swarm = SwarmBuilder::with_existing_identity(keypair) - .with_tokio() - .with_other_transport(|keypair| { - MemoryTransport::new() - .upgrade(upgrade::Version::V1) - .authenticate(noise::Config::new(&keypair).unwrap()) - .multiplex(yamux::Config::default()) - .timeout(Duration::from_secs(20)) - .boxed() - }) - .unwrap() - .with_behaviour(|_keypair| CustomProtoWithAddr { - inner: Notifications::new( - vec![controller_handle], - from_controller, - NotificationMetrics::new(None), - iter::once(( - ProtocolConfig { - name: "/foo".into(), - fallback_names: Vec::new(), - handshake: Vec::new(), - max_notification_size: 1024 * 1024, - }, - notif_handle, - command_stream, - )), - ), - peer_store_future: peer_store.run().boxed(), - protocol_controller_future: controller.run().boxed(), - addrs: addrs - .iter() - .enumerate() - .filter_map(|(n, a)| { - if n != index { - Some((keypairs[n].public().to_peer_id(), a.clone())) - } else { - None - } - }) - .collect(), - }) - .unwrap() - .build(); + let mut swarm = Swarm::new( + transport, + behaviour, + keypairs[index].public().to_peer_id(), + swarm::Config::with_executor(TokioExecutor(runtime)), + ); swarm.listen_on(addrs[index].clone()).unwrap(); out.push(swarm); } @@ -235,18 +241,12 @@ impl NetworkBehaviour for CustomProtoWithAddr { peer: PeerId, addr: &Multiaddr, role_override: Endpoint, - port_use: PortUse, ) -> Result, ConnectionDenied> { - self.inner.handle_established_outbound_connection( - connection_id, - peer, - addr, - role_override, - port_use, - ) + self.inner + .handle_established_outbound_connection(connection_id, peer, addr, role_override) } - fn on_swarm_event(&mut self, event: FromSwarm) { + fn on_swarm_event(&mut self, event: FromSwarm) { self.inner.on_swarm_event(event); } @@ -259,15 +259,19 @@ impl NetworkBehaviour for CustomProtoWithAddr { self.inner.on_connection_handler_event(peer_id, connection_id, event); } - fn poll(&mut self, cx: &mut Context) -> Poll>> { + fn poll( + &mut self, + cx: &mut Context, + params: &mut impl PollParameters, + ) -> Poll>> { let _ = self.peer_store_future.poll_unpin(cx); let _ = self.protocol_controller_future.poll_unpin(cx); - self.inner.poll(cx) + self.inner.poll(cx, params) } } -#[tokio::test] -async fn reconnect_after_disconnect() { +#[test] +fn reconnect_after_disconnect() { // We connect two nodes together, then force a disconnect (through the API of the `Service`), // check that the disconnect worked, and finally check whether they successfully reconnect. @@ -284,106 +288,108 @@ async fn reconnect_after_disconnect() { let mut service1_state = ServiceState::NotConnected; let mut service2_state = ServiceState::NotConnected; - loop { - // Grab next event from services. - let event = { - let s1 = service1.select_next_some(); - let s2 = service2.select_next_some(); - futures::pin_mut!(s1, s2); - match future::select(s1, s2).await { - future::Either::Left((ev, _)) => future::Either::Left(ev), - future::Either::Right((ev, _)) => future::Either::Right(ev), - } - }; - - match event { - future::Either::Left(SwarmEvent::Behaviour(NotificationsOut::CustomProtocolOpen { - .. - })) => match service1_state { - ServiceState::NotConnected => { - service1_state = ServiceState::FirstConnec; - if service2_state == ServiceState::FirstConnec { - service1 - .behaviour_mut() - .disconnect_peer(Swarm::local_peer_id(&service2), SetId::from(0)); - } + futures::executor::block_on(async move { + loop { + // Grab next event from services. + let event = { + let s1 = service1.select_next_some(); + let s2 = service2.select_next_some(); + futures::pin_mut!(s1, s2); + match future::select(s1, s2).await { + future::Either::Left((ev, _)) => future::Either::Left(ev), + future::Either::Right((ev, _)) => future::Either::Right(ev), + } + }; + + match event { + future::Either::Left(SwarmEvent::Behaviour( + NotificationsOut::CustomProtocolOpen { .. }, + )) => match service1_state { + ServiceState::NotConnected => { + service1_state = ServiceState::FirstConnec; + if service2_state == ServiceState::FirstConnec { + service1 + .behaviour_mut() + .disconnect_peer(Swarm::local_peer_id(&service2), SetId::from(0)); + } + }, + ServiceState::Disconnected => service1_state = ServiceState::ConnectedAgain, + ServiceState::FirstConnec | ServiceState::ConnectedAgain => panic!(), }, - ServiceState::Disconnected => service1_state = ServiceState::ConnectedAgain, - ServiceState::FirstConnec | ServiceState::ConnectedAgain => panic!(), - }, - future::Either::Left(SwarmEvent::Behaviour( - NotificationsOut::CustomProtocolClosed { .. }, - )) => match service1_state { - ServiceState::FirstConnec => service1_state = ServiceState::Disconnected, - ServiceState::ConnectedAgain | - ServiceState::NotConnected | - ServiceState::Disconnected => panic!(), - }, - future::Either::Right(SwarmEvent::Behaviour( - NotificationsOut::CustomProtocolOpen { .. }, - )) => match service2_state { - ServiceState::NotConnected => { - service2_state = ServiceState::FirstConnec; - if service1_state == ServiceState::FirstConnec { - service1 - .behaviour_mut() - .disconnect_peer(Swarm::local_peer_id(&service2), SetId::from(0)); - } + future::Either::Left(SwarmEvent::Behaviour( + NotificationsOut::CustomProtocolClosed { .. }, + )) => match service1_state { + ServiceState::FirstConnec => service1_state = ServiceState::Disconnected, + ServiceState::ConnectedAgain | + ServiceState::NotConnected | + ServiceState::Disconnected => panic!(), }, - ServiceState::Disconnected => service2_state = ServiceState::ConnectedAgain, - ServiceState::FirstConnec | ServiceState::ConnectedAgain => panic!(), - }, - future::Either::Right(SwarmEvent::Behaviour( - NotificationsOut::CustomProtocolClosed { .. }, - )) => match service2_state { - ServiceState::FirstConnec => service2_state = ServiceState::Disconnected, - ServiceState::ConnectedAgain | - ServiceState::NotConnected | - ServiceState::Disconnected => panic!(), - }, - _ => {}, - } + future::Either::Right(SwarmEvent::Behaviour( + NotificationsOut::CustomProtocolOpen { .. }, + )) => match service2_state { + ServiceState::NotConnected => { + service2_state = ServiceState::FirstConnec; + if service1_state == ServiceState::FirstConnec { + service1 + .behaviour_mut() + .disconnect_peer(Swarm::local_peer_id(&service2), SetId::from(0)); + } + }, + ServiceState::Disconnected => service2_state = ServiceState::ConnectedAgain, + ServiceState::FirstConnec | ServiceState::ConnectedAgain => panic!(), + }, + future::Either::Right(SwarmEvent::Behaviour( + NotificationsOut::CustomProtocolClosed { .. }, + )) => match service2_state { + ServiceState::FirstConnec => service2_state = ServiceState::Disconnected, + ServiceState::ConnectedAgain | + ServiceState::NotConnected | + ServiceState::Disconnected => panic!(), + }, + _ => {}, + } - // Due to the bug in `Notifications`, the disconnected node does not always detect that - // it was disconnected. The closed inbound substream is tolerated by design, and the - // closed outbound substream is not detected until something is sent into it. - // See [PR #13396](https://github.com/paritytech/substrate/pull/13396). - // This happens if the disconnecting node reconnects to it fast enough. - // In this case the disconnected node does not transit via `ServiceState::NotConnected` - // and stays in `ServiceState::FirstConnec`. - // TODO: update this once the fix is finally merged. - if service1_state == ServiceState::ConnectedAgain && - service2_state == ServiceState::ConnectedAgain || - service1_state == ServiceState::ConnectedAgain && - service2_state == ServiceState::FirstConnec || - service1_state == ServiceState::FirstConnec && - service2_state == ServiceState::ConnectedAgain - { - break + // Due to the bug in `Notifications`, the disconnected node does not always detect that + // it was disconnected. The closed inbound substream is tolerated by design, and the + // closed outbound substream is not detected until something is sent into it. + // See [PR #13396](https://github.com/paritytech/substrate/pull/13396). + // This happens if the disconnecting node reconnects to it fast enough. + // In this case the disconnected node does not transit via `ServiceState::NotConnected` + // and stays in `ServiceState::FirstConnec`. + // TODO: update this once the fix is finally merged. + if service1_state == ServiceState::ConnectedAgain && + service2_state == ServiceState::ConnectedAgain || + service1_state == ServiceState::ConnectedAgain && + service2_state == ServiceState::FirstConnec || + service1_state == ServiceState::FirstConnec && + service2_state == ServiceState::ConnectedAgain + { + break + } } - } - // Now that the two services have disconnected and reconnected, wait for 3 seconds and - // check whether they're still connected. - let mut delay = futures_timer::Delay::new(Duration::from_secs(3)); - - loop { - // Grab next event from services. - let event = { - let s1 = service1.select_next_some(); - let s2 = service2.select_next_some(); - futures::pin_mut!(s1, s2); - match future::select(future::select(s1, s2), &mut delay).await { - future::Either::Right(_) => break, // success - future::Either::Left((future::Either::Left((ev, _)), _)) => ev, - future::Either::Left((future::Either::Right((ev, _)), _)) => ev, - } - }; + // Now that the two services have disconnected and reconnected, wait for 3 seconds and + // check whether they're still connected. + let mut delay = futures_timer::Delay::new(Duration::from_secs(3)); + + loop { + // Grab next event from services. + let event = { + let s1 = service1.select_next_some(); + let s2 = service2.select_next_some(); + futures::pin_mut!(s1, s2); + match future::select(future::select(s1, s2), &mut delay).await { + future::Either::Right(_) => break, // success + future::Either::Left((future::Either::Left((ev, _)), _)) => ev, + future::Either::Left((future::Either::Right((ev, _)), _)) => ev, + } + }; - match event { - SwarmEvent::Behaviour(NotificationsOut::CustomProtocolOpen { .. }) | - SwarmEvent::Behaviour(NotificationsOut::CustomProtocolClosed { .. }) => panic!(), - _ => {}, + match event { + SwarmEvent::Behaviour(NotificationsOut::CustomProtocolOpen { .. }) | + SwarmEvent::Behaviour(NotificationsOut::CustomProtocolClosed { .. }) => panic!(), + _ => {}, + } } - } + }); } diff --git a/substrate/client/network/src/protocol/notifications/upgrade/notifications.rs b/substrate/client/network/src/protocol/notifications/upgrade/notifications.rs index 9e8a03fc07c9..e01bcbe0bad7 100644 --- a/substrate/client/network/src/protocol/notifications/upgrade/notifications.rs +++ b/substrate/client/network/src/protocol/notifications/upgrade/notifications.rs @@ -39,12 +39,12 @@ use crate::types::ProtocolName; use asynchronous_codec::Framed; use bytes::BytesMut; use futures::prelude::*; -use libp2p::core::{InboundUpgrade, OutboundUpgrade, UpgradeInfo}; +use libp2p::core::{upgrade, InboundUpgrade, OutboundUpgrade, UpgradeInfo}; use log::{error, warn}; use unsigned_varint::codec::UviBytes; use std::{ - fmt, io, mem, + io, mem, pin::Pin, task::{Context, Poll}, vec, @@ -187,14 +187,6 @@ pub struct NotificationsInOpen { pub substream: NotificationsInSubstream, } -impl fmt::Debug for NotificationsInOpen { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("NotificationsInOpen") - .field("handshake", &self.handshake) - .finish_non_exhaustive() - } -} - impl NotificationsInSubstream where TSubstream: AsyncRead + AsyncWrite + Unpin, @@ -378,14 +370,7 @@ where fn upgrade_outbound(self, mut socket: TSubstream, negotiated_name: Self::Info) -> Self::Future { Box::pin(async move { - { - let mut len_data = unsigned_varint::encode::usize_buffer(); - let encoded_len = - unsigned_varint::encode::usize(self.initial_message.len(), &mut len_data).len(); - socket.write_all(&len_data[..encoded_len]).await?; - } - socket.write_all(&self.initial_message).await?; - socket.flush().await?; + upgrade::write_length_prefixed(&mut socket, &self.initial_message).await?; // Reading handshake. let handshake_len = unsigned_varint::aio::read_usize(&mut socket).await?; @@ -428,15 +413,6 @@ pub struct NotificationsOutOpen { pub substream: NotificationsOutSubstream, } -impl fmt::Debug for NotificationsOutOpen { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("NotificationsOutOpen") - .field("handshake", &self.handshake) - .field("negotiated_fallback", &self.negotiated_fallback) - .finish_non_exhaustive() - } -} - impl Sink> for NotificationsOutSubstream where TSubstream: AsyncRead + AsyncWrite + Unpin, diff --git a/substrate/client/network/src/protocol_controller.rs b/substrate/client/network/src/protocol_controller.rs index 11f5321294d0..af7adb50907f 100644 --- a/substrate/client/network/src/protocol_controller.rs +++ b/substrate/client/network/src/protocol_controller.rs @@ -464,7 +464,7 @@ impl ProtocolController { /// maintain connections with such peers. fn on_add_reserved_peer(&mut self, peer_id: PeerId) { if self.reserved_nodes.contains_key(&peer_id) { - debug!( + warn!( target: LOG_TARGET, "Trying to add an already reserved node {peer_id} as reserved on {:?}.", self.set_id, diff --git a/substrate/client/network/src/request_responses.rs b/substrate/client/network/src/request_responses.rs index 5fe34c781378..6c2631924df4 100644 --- a/substrate/client/network/src/request_responses.rs +++ b/substrate/client/network/src/request_responses.rs @@ -43,11 +43,13 @@ use crate::{ use futures::{channel::oneshot, prelude::*}; use libp2p::{ - core::{transport::PortUse, Endpoint, Multiaddr}, + core::{Endpoint, Multiaddr}, request_response::{self, Behaviour, Codec, Message, ProtocolSupport, ResponseChannel}, swarm::{ - behaviour::FromSwarm, handler::multi::MultiHandler, ConnectionDenied, ConnectionId, - NetworkBehaviour, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, + behaviour::{ConnectionClosed, FromSwarm}, + handler::multi::MultiHandler, + ConnectionDenied, ConnectionId, NetworkBehaviour, PollParameters, THandler, + THandlerInEvent, THandlerOutEvent, ToSwarm, }, PeerId, }; @@ -62,11 +64,11 @@ use std::{ time::{Duration, Instant}, }; -pub use libp2p::request_response::{Config, InboundRequestId, OutboundRequestId}; +pub use libp2p::request_response::{Config, RequestId}; /// Possible failures occurring in the context of sending an outbound request and receiving the /// response. -#[derive(Debug, Clone, thiserror::Error)] +#[derive(Debug, thiserror::Error)] pub enum OutboundFailure { /// The request could not be sent because a dialing attempt failed. #[error("Failed to dial the requested peer")] @@ -80,9 +82,6 @@ pub enum OutboundFailure { /// The remote supports none of the requested protocols. #[error("The remote supports none of the requested protocols")] UnsupportedProtocols, - /// An IO failure happened on an outbound stream. - #[error("An IO failure happened on an outbound stream")] - Io(Arc), } impl From for OutboundFailure { @@ -94,7 +93,6 @@ impl From for OutboundFailure { OutboundFailure::ConnectionClosed, request_response::OutboundFailure::UnsupportedProtocols => OutboundFailure::UnsupportedProtocols, - request_response::OutboundFailure::Io(error) => OutboundFailure::Io(Arc::new(error)), } } } @@ -116,9 +114,6 @@ pub enum InboundFailure { /// The local peer failed to respond to an inbound request #[error("The response channel was dropped without sending a response to the remote")] ResponseOmission, - /// An IO failure happened on an inbound stream. - #[error("An IO failure happened on an inbound stream")] - Io(Arc), } impl From for InboundFailure { @@ -129,7 +124,6 @@ impl From for InboundFailure { request_response::InboundFailure::ConnectionClosed => InboundFailure::ConnectionClosed, request_response::InboundFailure::UnsupportedProtocols => InboundFailure::UnsupportedProtocols, - request_response::InboundFailure::Io(error) => InboundFailure::Io(Arc::new(error)), } } } @@ -325,12 +319,12 @@ pub enum Event { /// requests. There is no uniqueness guarantee in a set of both inbound and outbound /// [`ProtocolRequestId`]s. #[derive(Debug, Clone, PartialEq, Eq, Hash)] -struct ProtocolRequestId { +struct ProtocolRequestId { protocol: ProtocolName, request_id: RequestId, } -impl From<(ProtocolName, RequestId)> for ProtocolRequestId { +impl From<(ProtocolName, RequestId)> for ProtocolRequestId { fn from((protocol, request_id): (ProtocolName, RequestId)) -> Self { Self { protocol, request_id } } @@ -348,7 +342,7 @@ pub struct RequestResponsesBehaviour { >, /// Pending requests, passed down to a request-response [`Behaviour`], awaiting a reply. - pending_requests: HashMap, PendingRequest>, + pending_requests: HashMap, /// Whenever an incoming request arrives, a `Future` is added to this list and will yield the /// start time and the response to send back to the remote. @@ -357,11 +351,11 @@ pub struct RequestResponsesBehaviour { >, /// Whenever an incoming request arrives, the arrival [`Instant`] is recorded here. - pending_responses_arrival_time: HashMap, Instant>, + pending_responses_arrival_time: HashMap, /// Whenever a response is received on `pending_responses`, insert a channel to be notified /// when the request has been sent out. - send_feedback: HashMap, oneshot::Sender<()>>, + send_feedback: HashMap>, /// Primarily used to get a reputation of a node. peer_store: Arc, @@ -370,7 +364,7 @@ pub struct RequestResponsesBehaviour { /// Generated by the response builder and waiting to be processed. struct RequestProcessingOutcome { peer: PeerId, - request_id: InboundRequestId, + request_id: RequestId, protocol: ProtocolName, inner_channel: ResponseChannel, ()>>, response: OutgoingResponse, @@ -385,7 +379,8 @@ impl RequestResponsesBehaviour { ) -> Result { let mut protocols = HashMap::new(); for protocol in list { - let cfg = Config::default().with_request_timeout(protocol.request_timeout); + let mut cfg = Config::default(); + cfg.set_request_timeout(protocol.request_timeout); let protocol_support = if protocol.inbound_queue.is_some() { ProtocolSupport::Full @@ -460,7 +455,7 @@ impl RequestResponsesBehaviour { fn send_request_inner( behaviour: &mut Behaviour, - pending_requests: &mut HashMap, PendingRequest>, + pending_requests: &mut HashMap, target: &PeerId, protocol_name: ProtocolName, request: Vec, @@ -546,16 +541,11 @@ impl NetworkBehaviour for RequestResponsesBehaviour { peer: PeerId, addr: &Multiaddr, role_override: Endpoint, - port_use: PortUse, ) -> Result, ConnectionDenied> { let iter = self.protocols.iter_mut().filter_map(|(p, (r, _))| { - if let Ok(handler) = r.handle_established_outbound_connection( - connection_id, - peer, - addr, - role_override, - port_use, - ) { + if let Ok(handler) = + r.handle_established_outbound_connection(connection_id, peer, addr, role_override) + { Some((p.to_string(), handler)) } else { None @@ -568,9 +558,80 @@ impl NetworkBehaviour for RequestResponsesBehaviour { )) } - fn on_swarm_event(&mut self, event: FromSwarm) { - for (protocol, _) in self.protocols.values_mut() { - protocol.on_swarm_event(event); + fn on_swarm_event(&mut self, event: FromSwarm) { + match event { + FromSwarm::ConnectionEstablished(e) => + for (p, _) in self.protocols.values_mut() { + NetworkBehaviour::on_swarm_event(p, FromSwarm::ConnectionEstablished(e)); + }, + FromSwarm::ConnectionClosed(ConnectionClosed { + peer_id, + connection_id, + endpoint, + handler, + remaining_established, + }) => + for (p_name, p_handler) in handler.into_iter() { + if let Some((proto, _)) = self.protocols.get_mut(p_name.as_str()) { + proto.on_swarm_event(FromSwarm::ConnectionClosed(ConnectionClosed { + peer_id, + connection_id, + endpoint, + handler: p_handler, + remaining_established, + })); + } else { + log::error!( + target: "sub-libp2p", + "on_swarm_event/connection_closed: no request-response instance registered for protocol {:?}", + p_name, + ) + } + }, + FromSwarm::DialFailure(e) => + for (p, _) in self.protocols.values_mut() { + NetworkBehaviour::on_swarm_event(p, FromSwarm::DialFailure(e)); + }, + FromSwarm::ListenerClosed(e) => + for (p, _) in self.protocols.values_mut() { + NetworkBehaviour::on_swarm_event(p, FromSwarm::ListenerClosed(e)); + }, + FromSwarm::ListenFailure(e) => + for (p, _) in self.protocols.values_mut() { + NetworkBehaviour::on_swarm_event(p, FromSwarm::ListenFailure(e)); + }, + FromSwarm::ListenerError(e) => + for (p, _) in self.protocols.values_mut() { + NetworkBehaviour::on_swarm_event(p, FromSwarm::ListenerError(e)); + }, + FromSwarm::ExternalAddrExpired(e) => + for (p, _) in self.protocols.values_mut() { + NetworkBehaviour::on_swarm_event(p, FromSwarm::ExternalAddrExpired(e)); + }, + FromSwarm::NewListener(e) => + for (p, _) in self.protocols.values_mut() { + NetworkBehaviour::on_swarm_event(p, FromSwarm::NewListener(e)); + }, + FromSwarm::ExpiredListenAddr(e) => + for (p, _) in self.protocols.values_mut() { + NetworkBehaviour::on_swarm_event(p, FromSwarm::ExpiredListenAddr(e)); + }, + FromSwarm::NewExternalAddrCandidate(e) => + for (p, _) in self.protocols.values_mut() { + NetworkBehaviour::on_swarm_event(p, FromSwarm::NewExternalAddrCandidate(e)); + }, + FromSwarm::ExternalAddrConfirmed(e) => + for (p, _) in self.protocols.values_mut() { + NetworkBehaviour::on_swarm_event(p, FromSwarm::ExternalAddrConfirmed(e)); + }, + FromSwarm::AddressChange(e) => + for (p, _) in self.protocols.values_mut() { + NetworkBehaviour::on_swarm_event(p, FromSwarm::AddressChange(e)); + }, + FromSwarm::NewListenAddr(e) => + for (p, _) in self.protocols.values_mut() { + NetworkBehaviour::on_swarm_event(p, FromSwarm::NewListenAddr(e)); + }, } } @@ -592,7 +653,11 @@ impl NetworkBehaviour for RequestResponsesBehaviour { } } - fn poll(&mut self, cx: &mut Context) -> Poll>> { + fn poll( + &mut self, + cx: &mut Context, + params: &mut impl PollParameters, + ) -> Poll>> { 'poll_all: loop { // Poll to see if any response is ready to be sent back. while let Poll::Ready(Some(outcome)) = self.pending_responses.poll_next_unpin(cx) { @@ -642,7 +707,7 @@ impl NetworkBehaviour for RequestResponsesBehaviour { // Poll request-responses protocols. for (protocol, (ref mut behaviour, ref mut resp_builder)) in &mut self.protocols { - 'poll_protocol: while let Poll::Ready(ev) = behaviour.poll(cx) { + 'poll_protocol: while let Poll::Ready(ev) = behaviour.poll(cx, params) { let ev = match ev { // Main events we are interested in. ToSwarm::GenerateEvent(ev) => ev, @@ -652,23 +717,29 @@ impl NetworkBehaviour for RequestResponsesBehaviour { ToSwarm::Dial { opts } => { if opts.get_peer_id().is_none() { log::error!( - target: "sub-libp2p", "The request-response isn't supposed to start dialing addresses" ); } return Poll::Ready(ToSwarm::Dial { opts }) }, - event => { - return Poll::Ready( - event.map_in(|event| ((*protocol).to_string(), event)).map_out( - |_| { - unreachable!( - "`GenerateEvent` is handled in a branch above; qed" - ) - }, - ), - ); - }, + ToSwarm::NotifyHandler { peer_id, handler, event } => + return Poll::Ready(ToSwarm::NotifyHandler { + peer_id, + handler, + event: ((*protocol).to_string(), event), + }), + ToSwarm::CloseConnection { peer_id, connection } => + return Poll::Ready(ToSwarm::CloseConnection { peer_id, connection }), + ToSwarm::NewExternalAddrCandidate(observed) => + return Poll::Ready(ToSwarm::NewExternalAddrCandidate(observed)), + ToSwarm::ExternalAddrConfirmed(addr) => + return Poll::Ready(ToSwarm::ExternalAddrConfirmed(addr)), + ToSwarm::ExternalAddrExpired(addr) => + return Poll::Ready(ToSwarm::ExternalAddrExpired(addr)), + ToSwarm::ListenOn { opts } => + return Poll::Ready(ToSwarm::ListenOn { opts }), + ToSwarm::RemoveListener { id } => + return Poll::Ready(ToSwarm::RemoveListener { id }), }; match ev { @@ -788,7 +859,6 @@ impl NetworkBehaviour for RequestResponsesBehaviour { error, .. } => { - let error = OutboundFailure::from(error); let started = match self .pending_requests .remove(&(protocol.clone(), request_id).into()) @@ -800,7 +870,9 @@ impl NetworkBehaviour for RequestResponsesBehaviour { }) => { // Try using the fallback request if the protocol was not // supported. - if matches!(error, OutboundFailure::UnsupportedProtocols) { + if let request_response::OutboundFailure::UnsupportedProtocols = + error + { if let Some((fallback_request, fallback_protocol)) = fallback_request { @@ -821,7 +893,7 @@ impl NetworkBehaviour for RequestResponsesBehaviour { } if response_tx - .send(Err(RequestFailure::Network(error.clone()))) + .send(Err(RequestFailure::Network(error.clone().into()))) .is_err() { log::debug!( @@ -848,7 +920,7 @@ impl NetworkBehaviour for RequestResponsesBehaviour { peer, protocol: protocol.clone(), duration: started.elapsed(), - result: Err(RequestFailure::Network(error)), + result: Err(RequestFailure::Network(error.into())), }; return Poll::Ready(ToSwarm::GenerateEvent(out)) @@ -1112,10 +1184,7 @@ mod tests { transport, behaviour, keypair.public().to_peer_id(), - SwarmConfig::with_executor(TokioExecutor(runtime)) - // This is taken care of by notification protocols in non-test environment - // It is very slow in test environment for some reason, hence larger timeout - .with_idle_connection_timeout(Duration::from_secs(10)), + SwarmConfig::with_executor(TokioExecutor(runtime)), ); let listen_addr: Multiaddr = format!("/memory/{}", rand::random::()).parse().unwrap(); @@ -1285,9 +1354,7 @@ mod tests { match swarm.select_next_some().await { SwarmEvent::Behaviour(Event::InboundRequest { result, .. }) => { assert!(result.is_ok()); - }, - SwarmEvent::ConnectionClosed { .. } => { - break; + break }, _ => {}, } @@ -1327,20 +1394,20 @@ mod tests { } match response_receiver.unwrap().await.unwrap().unwrap_err() { - RequestFailure::Network(OutboundFailure::Io(_)) => {}, - request_failure => panic!("Unexpected failure: {request_failure:?}"), + RequestFailure::Network(OutboundFailure::ConnectionClosed) => {}, + _ => panic!(), } }); } - /// A `RequestId` is a unique identifier among either all inbound or all outbound requests for + /// A [`RequestId`] is a unique identifier among either all inbound or all outbound requests for /// a single [`RequestResponsesBehaviour`] behaviour. It is not guaranteed to be unique across - /// multiple [`RequestResponsesBehaviour`] behaviours. Thus, when handling `RequestId` in the + /// multiple [`RequestResponsesBehaviour`] behaviours. Thus when handling [`RequestId`] in the /// context of multiple [`RequestResponsesBehaviour`] behaviours, one needs to couple the - /// protocol name with the `RequestId` to get a unique request identifier. + /// protocol name with the [`RequestId`] to get a unique request identifier. /// /// This test ensures that two requests on different protocols can be handled concurrently - /// without a `RequestId` collision. + /// without a [`RequestId`] collision. /// /// See [`ProtocolRequestId`] for additional information. #[test] diff --git a/substrate/client/network/src/service.rs b/substrate/client/network/src/service.rs index 751183ae19a9..71d0b45aa06d 100644 --- a/substrate/client/network/src/service.rs +++ b/substrate/client/network/src/service.rs @@ -41,7 +41,7 @@ use crate::{ NetworkState, NotConnectedPeer as NetworkStateNotConnectedPeer, Peer as NetworkStatePeer, }, peer_store::{PeerStore, PeerStoreProvider}, - protocol::{self, Protocol, Ready}, + protocol::{self, NotifsHandlerError, Protocol, Ready}, protocol_controller::{self, ProtoSetConfig, ProtocolController, SetId}, request_responses::{IfDisconnected, ProtocolConfig as RequestResponseConfig, RequestFailure}, service::{ @@ -59,12 +59,16 @@ use crate::{ }; use codec::DecodeAll; +use either::Either; use futures::{channel::oneshot, prelude::*}; +#[allow(deprecated)] +use libp2p::swarm::THandlerErr; use libp2p::{ connection_limits::{ConnectionLimits, Exceeded}, core::{upgrade, ConnectedPoint, Endpoint}, identify::Info as IdentifyInfo, identity::ed25519, + kad::{record::Key as KademliaKey, Record}, multiaddr::{self, Multiaddr}, swarm::{ Config as SwarmConfig, ConnectionError, ConnectionId, DialError, Executor, ListenError, @@ -76,7 +80,6 @@ use log::{debug, error, info, trace, warn}; use metrics::{Histogram, MetricSources, Metrics}; use parking_lot::Mutex; use prometheus_endpoint::Registry; -use sc_network_types::kad::{Key as KademliaKey, Record}; use sc_client_api::BlockBackend; use sc_network_common::{ @@ -91,6 +94,7 @@ pub use libp2p::identity::{DecodingError, Keypair, PublicKey}; pub use metrics::NotificationMetrics; pub use protocol::NotificationsSink; use std::{ + cmp, collections::{HashMap, HashSet}, fs, iter, marker::PhantomData, @@ -111,7 +115,6 @@ pub mod signature; pub mod traits; struct Libp2pBandwidthSink { - #[allow(deprecated)] sink: Arc, } @@ -333,7 +336,7 @@ where "🏷 Local node identity is: {}", local_peer_id.to_base58(), ); - info!(target: "sub-libp2p", "Running libp2p network backend"); + log::info!(target: "sub-libp2p", "Running libp2p network backend"); let (transport, bandwidth) = { let config_mem = match network_config.transport { @@ -341,7 +344,46 @@ where TransportConfig::Normal { .. } => false, }; - transport::build_transport(local_identity.clone().into(), config_mem) + // The yamux buffer size limit is configured to be equal to the maximum frame size + // of all protocols. 10 bytes are added to each limit for the length prefix that + // is not included in the upper layer protocols limit but is still present in the + // yamux buffer. These 10 bytes correspond to the maximum size required to encode + // a variable-length-encoding 64bits number. In other words, we make the + // assumption that no notification larger than 2^64 will ever be sent. + let yamux_maximum_buffer_size = { + let requests_max = request_response_protocols + .iter() + .map(|cfg| usize::try_from(cfg.max_request_size).unwrap_or(usize::MAX)); + let responses_max = request_response_protocols + .iter() + .map(|cfg| usize::try_from(cfg.max_response_size).unwrap_or(usize::MAX)); + let notifs_max = notification_protocols + .iter() + .map(|cfg| usize::try_from(cfg.max_notification_size()).unwrap_or(usize::MAX)); + + // A "default" max is added to cover all the other protocols: ping, identify, + // kademlia, block announces, and transactions. + let default_max = cmp::max( + 1024 * 1024, + usize::try_from(protocol::BLOCK_ANNOUNCES_TRANSACTIONS_SUBSTREAM_SIZE) + .unwrap_or(usize::MAX), + ); + + iter::once(default_max) + .chain(requests_max) + .chain(responses_max) + .chain(notifs_max) + .max() + .expect("iterator known to always yield at least one element; qed") + .saturating_add(10) + }; + + transport::build_transport( + local_identity.clone().into(), + config_mem, + network_config.yamux_window_size, + yamux_maximum_buffer_size, + ) }; let (to_notifications, from_protocol_controllers) = @@ -931,18 +973,6 @@ where expires, )); } - - fn start_providing(&self, key: KademliaKey) { - let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::StartProviding(key)); - } - - fn stop_providing(&self, key: KademliaKey) { - let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::StopProviding(key)); - } - - fn get_providers(&self, key: KademliaKey) { - let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::GetProviders(key)); - } } #[async_trait::async_trait] @@ -1303,9 +1333,6 @@ enum ServiceToWorkerMsg { update_local_storage: bool, }, StoreRecord(KademliaKey, Vec, Option, Option), - StartProviding(KademliaKey), - StopProviding(KademliaKey), - GetProviders(KademliaKey), AddKnownAddress(PeerId, Multiaddr), EventStream(out_events::Sender), Request { @@ -1428,23 +1455,17 @@ where fn handle_worker_message(&mut self, msg: ServiceToWorkerMsg) { match msg { ServiceToWorkerMsg::GetValue(key) => - self.network_service.behaviour_mut().get_value(key.into()), + self.network_service.behaviour_mut().get_value(key), ServiceToWorkerMsg::PutValue(key, value) => - self.network_service.behaviour_mut().put_value(key.into(), value), + self.network_service.behaviour_mut().put_value(key, value), ServiceToWorkerMsg::PutRecordTo { record, peers, update_local_storage } => self .network_service .behaviour_mut() - .put_record_to(record.into(), peers, update_local_storage), + .put_record_to(record, peers, update_local_storage), ServiceToWorkerMsg::StoreRecord(key, value, publisher, expires) => self .network_service .behaviour_mut() - .store_record(key.into(), value, publisher, expires), - ServiceToWorkerMsg::StartProviding(key) => - self.network_service.behaviour_mut().start_providing(key.into()), - ServiceToWorkerMsg::StopProviding(key) => - self.network_service.behaviour_mut().stop_providing(&key.into()), - ServiceToWorkerMsg::GetProviders(key) => - self.network_service.behaviour_mut().get_providers(key.into()), + .store_record(key, value, publisher, expires), ServiceToWorkerMsg::AddKnownAddress(peer_id, addr) => self.network_service.behaviour_mut().add_known_address(peer_id, addr), ServiceToWorkerMsg::EventStream(sender) => self.event_streams.push(sender), @@ -1480,7 +1501,8 @@ where } /// Process the next event coming from `Swarm`. - fn handle_swarm_event(&mut self, event: SwarmEvent) { + #[allow(deprecated)] + fn handle_swarm_event(&mut self, event: SwarmEvent>>) { match event { SwarmEvent::Behaviour(BehaviourOut::InboundRequest { protocol, result, .. }) => { if let Some(metrics) = self.metrics.as_ref() { @@ -1505,7 +1527,6 @@ where Some("busy-omitted"), ResponseFailure::Network(InboundFailure::ConnectionClosed) => Some("connection-closed"), - ResponseFailure::Network(InboundFailure::Io(_)) => Some("io"), }; if let Some(reason) = reason { @@ -1545,7 +1566,6 @@ where "connection-closed", RequestFailure::Network(OutboundFailure::UnsupportedProtocols) => "unsupported", - RequestFailure::Network(OutboundFailure::Io(_)) => "io", }; metrics @@ -1658,9 +1678,6 @@ where DhtEvent::ValuePut(_) => "value-put", DhtEvent::ValuePutFailed(_) => "value-put-failed", DhtEvent::PutRecordRequest(_, _, _, _) => "put-record-request", - DhtEvent::StartProvidingFailed(_) => "start-providing-failed", - DhtEvent::ProvidersFound(_, _) => "providers-found", - DhtEvent::ProvidersNotFound(_) => "providers-not-found", }; metrics .kademlia_query_duration @@ -1715,6 +1732,15 @@ where }; let reason = match cause { Some(ConnectionError::IO(_)) => "transport-error", + Some(ConnectionError::Handler(Either::Left(Either::Left( + Either::Left(Either::Right( + NotifsHandlerError::SyncNotificationsClogged, + )), + )))) => "sync-notifications-clogged", + Some(ConnectionError::Handler(Either::Left(Either::Left( + Either::Right(Either::Left(_)), + )))) => "ping-timeout", + Some(ConnectionError::Handler(_)) => "protocol-error", Some(ConnectionError::KeepAliveTimeout) => "keep-alive-timeout", None => "actively-closed", }; @@ -1753,12 +1779,7 @@ where not_reported.then(|| self.boot_node_ids.get(&peer_id)).flatten() { if let DialError::WrongPeerId { obtained, endpoint } = &error { - if let ConnectedPoint::Dialer { - address, - role_override: _, - port_use: _, - } = endpoint - { + if let ConnectedPoint::Dialer { address, role_override: _ } = endpoint { let address_without_peer_id = parse_addr(address.clone().into()) .map_or_else(|_| address.clone(), |r| r.1.into()); @@ -1779,6 +1800,7 @@ where } if let Some(metrics) = self.metrics.as_ref() { + #[allow(deprecated)] let reason = match error { DialError::Denied { cause } => if cause.downcast::().is_ok() { @@ -1818,6 +1840,7 @@ where "Libp2p => IncomingConnectionError({local_addr},{send_back_addr} via {connection_id:?}): {error}" ); if let Some(metrics) = self.metrics.as_ref() { + #[allow(deprecated)] let reason = match error { ListenError::Denied { cause } => if cause.downcast::().is_ok() { @@ -1870,21 +1893,6 @@ where metrics.listeners_errors_total.inc(); } }, - SwarmEvent::NewExternalAddrCandidate { address } => { - trace!(target: "sub-libp2p", "Libp2p => NewExternalAddrCandidate: {address:?}"); - }, - SwarmEvent::ExternalAddrConfirmed { address } => { - trace!(target: "sub-libp2p", "Libp2p => ExternalAddrConfirmed: {address:?}"); - }, - SwarmEvent::ExternalAddrExpired { address } => { - trace!(target: "sub-libp2p", "Libp2p => ExternalAddrExpired: {address:?}"); - }, - SwarmEvent::NewExternalAddrOfPeer { peer_id, address } => { - trace!(target: "sub-libp2p", "Libp2p => NewExternalAddrOfPeer({peer_id:?}): {address:?}") - }, - event => { - warn!(target: "sub-libp2p", "New unknown SwarmEvent libp2p event: {event:?}"); - }, } } } diff --git a/substrate/client/network/src/service/traits.rs b/substrate/client/network/src/service/traits.rs index acfed9ea894c..bd4f83c7fd44 100644 --- a/substrate/client/network/src/service/traits.rs +++ b/substrate/client/network/src/service/traits.rs @@ -32,15 +32,12 @@ use crate::{ }; use futures::{channel::oneshot, Stream}; +use libp2p::kad::Record; use prometheus_endpoint::Registry; use sc_client_api::BlockBackend; use sc_network_common::{role::ObservedRole, ExHashT}; -pub use sc_network_types::{ - kad::{Key as KademliaKey, Record}, - multiaddr::Multiaddr, - PeerId, -}; +use sc_network_types::{multiaddr::Multiaddr, PeerId}; use sp_runtime::traits::Block as BlockT; use std::{ @@ -52,7 +49,7 @@ use std::{ time::{Duration, Instant}, }; -pub use libp2p::identity::SigningError; +pub use libp2p::{identity::SigningError, kad::record::Key as KademliaKey}; /// Supertrait defining the services provided by [`NetworkBackend`] service handle. pub trait NetworkService: @@ -234,15 +231,6 @@ pub trait NetworkDHTProvider { publisher: Option, expires: Option, ); - - /// Register this node as a provider for `key` on the DHT. - fn start_providing(&self, key: KademliaKey); - - /// Deregister this node as a provider for `key` on the DHT. - fn stop_providing(&self, key: KademliaKey); - - /// Start getting the list of providers for `key` on the DHT. - fn get_providers(&self, key: KademliaKey); } impl NetworkDHTProvider for Arc @@ -271,18 +259,6 @@ where ) { T::store_record(self, key, value, publisher, expires) } - - fn start_providing(&self, key: KademliaKey) { - T::start_providing(self, key) - } - - fn stop_providing(&self, key: KademliaKey) { - T::stop_providing(self, key) - } - - fn get_providers(&self, key: KademliaKey) { - T::get_providers(self, key) - } } /// Provides an ability to set a fork sync request for a particular block. diff --git a/substrate/client/network/src/transport.rs b/substrate/client/network/src/transport.rs index 2f6b7a643c48..ed7e7c574e16 100644 --- a/substrate/client/network/src/transport.rs +++ b/substrate/client/network/src/transport.rs @@ -29,8 +29,6 @@ use libp2p::{ }; use std::{sync::Arc, time::Duration}; -// TODO: Create a wrapper similar to upstream `BandwidthTransport` that tracks sent/received bytes -#[allow(deprecated)] pub use libp2p::bandwidth::BandwidthSinks; /// Builds the transport that serves as a common ground for all connections. @@ -38,12 +36,21 @@ pub use libp2p::bandwidth::BandwidthSinks; /// If `memory_only` is true, then only communication within the same process are allowed. Only /// addresses with the format `/memory/...` are allowed. /// +/// `yamux_window_size` is the maximum size of the Yamux receive windows. `None` to leave the +/// default (256kiB). +/// +/// `yamux_maximum_buffer_size` is the maximum allowed size of the Yamux buffer. This should be +/// set either to the maximum of all the maximum allowed sizes of messages frames of all +/// high-level protocols combined, or to some generously high value if you are sure that a maximum +/// size is enforced on all high-level protocols. +/// /// Returns a `BandwidthSinks` object that allows querying the average bandwidth produced by all /// the connections spawned with this transport. -#[allow(deprecated)] pub fn build_transport( keypair: identity::Keypair, memory_only: bool, + yamux_window_size: Option, + yamux_maximum_buffer_size: usize, ) -> (Boxed<(PeerId, StreamMuxerBox)>, Arc) { // Build the base layer of the transport. let transport = if !memory_only { @@ -74,7 +81,19 @@ pub fn build_transport( }; let authentication_config = noise::Config::new(&keypair).expect("Can create noise config. qed"); - let multiplexing_config = libp2p::yamux::Config::default(); + let multiplexing_config = { + let mut yamux_config = libp2p::yamux::Config::default(); + // Enable proper flow-control: window updates are only sent when + // buffered data has been consumed. + yamux_config.set_window_update_mode(libp2p::yamux::WindowUpdateMode::on_read()); + yamux_config.set_max_buffer_size(yamux_maximum_buffer_size); + + if let Some(yamux_window_size) = yamux_window_size { + yamux_config.set_receive_window_size(yamux_window_size); + } + + yamux_config + }; let transport = transport .upgrade(upgrade::Version::V1Lazy) diff --git a/substrate/client/network/src/types.rs b/substrate/client/network/src/types.rs index 5289389de381..0652bbcdddec 100644 --- a/substrate/client/network/src/types.rs +++ b/substrate/client/network/src/types.rs @@ -26,6 +26,8 @@ use std::{ sync::Arc, }; +pub use libp2p::{multiaddr, Multiaddr, PeerId}; + /// The protocol name transmitted on the wire. #[derive(Debug, Clone)] pub enum ProtocolName { diff --git a/substrate/client/network/statement/Cargo.toml b/substrate/client/network/statement/Cargo.toml index dd3a8bef8a2f..43933f066edd 100644 --- a/substrate/client/network/statement/Cargo.toml +++ b/substrate/client/network/statement/Cargo.toml @@ -22,10 +22,10 @@ codec = { features = ["derive"], workspace = true, default-features = true } futures = { workspace = true } log = { workspace = true, default-features = true } prometheus-endpoint = { workspace = true, default-features = true } -sc-network = { workspace = true, default-features = true } sc-network-common = { workspace = true, default-features = true } sc-network-sync = { workspace = true, default-features = true } sc-network-types = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } sp-consensus = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } sp-statement-store = { workspace = true, default-features = true } diff --git a/substrate/client/network/statement/src/lib.rs b/substrate/client/network/statement/src/lib.rs index 586a15cadd68..df93788696e3 100644 --- a/substrate/client/network/statement/src/lib.rs +++ b/substrate/client/network/statement/src/lib.rs @@ -33,8 +33,7 @@ use futures::{channel::oneshot, prelude::*, stream::FuturesUnordered, FutureExt} use prometheus_endpoint::{register, Counter, PrometheusError, Registry, U64}; use sc_network::{ config::{NonReservedPeerMode, SetConfig}, - error, - multiaddr::{Multiaddr, Protocol}, + error, multiaddr, peer_store::PeerStoreProvider, service::{ traits::{NotificationEvent, NotificationService, ValidationResult}, @@ -297,19 +296,9 @@ where fn handle_sync_event(&mut self, event: SyncEvent) { match event { - SyncEvent::InitialPeers(peer_ids) => { - let addrs = peer_ids - .into_iter() - .map(|peer_id| Multiaddr::empty().with(Protocol::P2p(peer_id.into()))) - .collect(); - let result = - self.network.add_peers_to_reserved_set(self.protocol_name.clone(), addrs); - if let Err(err) = result { - log::error!(target: LOG_TARGET, "Add reserved peers failed: {}", err); - } - }, - SyncEvent::PeerConnected(peer_id) => { - let addr = Multiaddr::empty().with(Protocol::P2p(peer_id.into())); + SyncEvent::PeerConnected(remote) => { + let addr = iter::once(multiaddr::Protocol::P2p(remote.into())) + .collect::(); let result = self.network.add_peers_to_reserved_set( self.protocol_name.clone(), iter::once(addr).collect(), @@ -318,10 +307,10 @@ where log::error!(target: LOG_TARGET, "Add reserved peer failed: {}", err); } }, - SyncEvent::PeerDisconnected(peer_id) => { + SyncEvent::PeerDisconnected(remote) => { let result = self.network.remove_peers_from_reserved_set( self.protocol_name.clone(), - iter::once(peer_id).collect(), + iter::once(remote).collect(), ); if let Err(err) = result { log::error!(target: LOG_TARGET, "Failed to remove reserved peer: {err}"); diff --git a/substrate/client/network/sync/Cargo.toml b/substrate/client/network/sync/Cargo.toml index fdc290a2d01e..378b7c12e9b7 100644 --- a/substrate/client/network/sync/Cargo.toml +++ b/substrate/client/network/sync/Cargo.toml @@ -23,30 +23,30 @@ array-bytes = { workspace = true, default-features = true } async-channel = { workspace = true } async-trait = { workspace = true } codec = { features = ["derive"], workspace = true, default-features = true } -fork-tree = { workspace = true, default-features = true } futures = { workspace = true } futures-timer = { workspace = true } log = { workspace = true, default-features = true } mockall = { workspace = true } -prometheus-endpoint = { workspace = true, default-features = true } prost = { workspace = true } +schnellru = { workspace = true } +smallvec = { workspace = true, default-features = true } +thiserror = { workspace = true } +tokio-stream = { workspace = true } +tokio = { features = ["macros", "time"], workspace = true, default-features = true } +fork-tree = { workspace = true, default-features = true } +prometheus-endpoint = { workspace = true, default-features = true } sc-client-api = { workspace = true, default-features = true } sc-consensus = { workspace = true, default-features = true } sc-network = { workspace = true, default-features = true } sc-network-common = { workspace = true, default-features = true } sc-network-types = { workspace = true, default-features = true } sc-utils = { workspace = true, default-features = true } -schnellru = { workspace = true } -smallvec = { workspace = true, default-features = true } sp-arithmetic = { workspace = true, default-features = true } sp-blockchain = { workspace = true, default-features = true } sp-consensus = { workspace = true, default-features = true } -sp-consensus-grandpa = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } +sp-consensus-grandpa = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } -thiserror = { workspace = true } -tokio = { features = ["macros", "time"], workspace = true, default-features = true } -tokio-stream = { workspace = true } [dev-dependencies] mockall = { workspace = true } diff --git a/substrate/client/network/sync/src/engine.rs b/substrate/client/network/sync/src/engine.rs index 4003361525e1..cc2089d1974c 100644 --- a/substrate/client/network/sync/src/engine.rs +++ b/substrate/client/network/sync/src/engine.rs @@ -100,8 +100,6 @@ mod rep { pub const REFUSED: Rep = Rep::new(-(1 << 10), "Request refused"); /// Reputation change when a peer doesn't respond in time to our messages. pub const TIMEOUT: Rep = Rep::new(-(1 << 10), "Request timeout"); - /// Reputation change when a peer connection failed with IO error. - pub const IO: Rep = Rep::new(-(1 << 10), "IO error during request"); } struct Metrics { @@ -547,14 +545,7 @@ where self.process_service_command(command), notification_event = self.notification_service.next_event() => match notification_event { Some(event) => self.process_notification_event(event), - None => { - error!( - target: LOG_TARGET, - "Terminating `SyncingEngine` because `NotificationService` has terminated.", - ); - - return; - } + None => return, }, response_event = self.pending_responses.select_next_some() => self.process_response_event(response_event), @@ -656,11 +647,7 @@ where ToServiceCommand::SetSyncForkRequest(peers, hash, number) => { self.strategy.set_sync_fork_request(peers, &hash, number); }, - ToServiceCommand::EventStream(tx) => { - let _ = tx - .unbounded_send(SyncEvent::InitialPeers(self.peers.keys().cloned().collect())); - self.event_streams.push(tx); - }, + ToServiceCommand::EventStream(tx) => self.event_streams.push(tx), ToServiceCommand::RequestJustification(hash, number) => self.strategy.request_justification(&hash, number), ToServiceCommand::ClearJustificationRequests => @@ -1025,14 +1012,9 @@ where debug_assert!( false, "Can not receive `RequestFailure::Obsolete` after dropping the \ - response receiver.", + response receiver.", ); }, - RequestFailure::Network(OutboundFailure::Io(_)) => { - self.network_service.report_peer(peer_id, rep::IO); - self.network_service - .disconnect_peer(peer_id, self.block_announce_protocol_name.clone()); - }, } }, Err(oneshot::Canceled) => { diff --git a/substrate/client/network/sync/src/strategy/state_sync.rs b/substrate/client/network/sync/src/strategy/state_sync.rs index 47d859a1b7c6..1ed1de7c8efa 100644 --- a/substrate/client/network/sync/src/strategy/state_sync.rs +++ b/substrate/client/network/sync/src/strategy/state_sync.rs @@ -19,12 +19,12 @@ //! State sync support. use crate::{ - schema::v1::{KeyValueStateEntry, StateEntry, StateRequest, StateResponse}, + schema::v1::{StateEntry, StateRequest, StateResponse}, LOG_TARGET, }; use codec::{Decode, Encode}; use log::debug; -use sc_client_api::{CompactProof, KeyValueStates, ProofProvider}; +use sc_client_api::{CompactProof, ProofProvider}; use sc_consensus::ImportedState; use smallvec::SmallVec; use sp_core::storage::well_known_keys; @@ -89,62 +89,22 @@ pub enum ImportResult { BadResponse, } -struct StateSyncMetadata { - last_key: SmallVec<[Vec; 2]>, +/// State sync state machine. Accumulates partial state data until it +/// is ready to be imported. +pub struct StateSync { + target_block: B::Hash, target_header: B::Header, + target_root: B::Hash, target_body: Option>, target_justifications: Option, + last_key: SmallVec<[Vec; 2]>, + state: HashMap, (Vec<(Vec, Vec)>, Vec>)>, complete: bool, + client: Arc, imported_bytes: u64, skip_proof: bool, } -impl StateSyncMetadata { - fn target_hash(&self) -> B::Hash { - self.target_header.hash() - } - - /// Returns target block number. - fn target_number(&self) -> NumberFor { - *self.target_header.number() - } - - fn target_root(&self) -> B::Hash { - *self.target_header.state_root() - } - - fn next_request(&self) -> StateRequest { - StateRequest { - block: self.target_hash().encode(), - start: self.last_key.clone().into_vec(), - no_proof: self.skip_proof, - } - } - - fn progress(&self) -> StateSyncProgress { - let cursor = *self.last_key.get(0).and_then(|last| last.get(0)).unwrap_or(&0u8); - let percent_done = cursor as u32 * 100 / 256; - StateSyncProgress { - percentage: percent_done, - size: self.imported_bytes, - phase: if self.complete { - StateSyncPhase::ImportingState - } else { - StateSyncPhase::DownloadingState - }, - } - } -} - -/// State sync state machine. -/// -/// Accumulates partial state data until it is ready to be imported. -pub struct StateSync { - metadata: StateSyncMetadata, - state: HashMap, (Vec<(Vec, Vec)>, Vec>)>, - client: Arc, -} - impl StateSync where B: BlockT, @@ -160,92 +120,18 @@ where ) -> Self { Self { client, - metadata: StateSyncMetadata { - last_key: SmallVec::default(), - target_header, - target_body, - target_justifications, - complete: false, - imported_bytes: 0, - skip_proof, - }, + target_block: target_header.hash(), + target_root: *target_header.state_root(), + target_header, + target_body, + target_justifications, + last_key: SmallVec::default(), state: HashMap::default(), + complete: false, + imported_bytes: 0, + skip_proof, } } - - fn process_state_key_values( - &mut self, - state_root: Vec, - key_values: impl IntoIterator, Vec)>, - ) { - let is_top = state_root.is_empty(); - - let entry = self.state.entry(state_root).or_default(); - - if entry.0.len() > 0 && entry.1.len() > 1 { - // Already imported child_trie with same root. - // Warning this will not work with parallel download. - return; - } - - let mut child_storage_roots = Vec::new(); - - for (key, value) in key_values { - // Skip all child key root (will be recalculated on import) - if is_top && well_known_keys::is_child_storage_key(key.as_slice()) { - child_storage_roots.push((value, key)); - } else { - self.metadata.imported_bytes += key.len() as u64; - entry.0.push((key, value)); - } - } - - for (root, storage_key) in child_storage_roots { - self.state.entry(root).or_default().1.push(storage_key); - } - } - - fn process_state_verified(&mut self, values: KeyValueStates) { - for values in values.0 { - self.process_state_key_values(values.state_root, values.key_values); - } - } - - fn process_state_unverified(&mut self, response: StateResponse) -> bool { - let mut complete = true; - // if the trie is a child trie and one of its parent trie is empty, - // the parent cursor stays valid. - // Empty parent trie content only happens when all the response content - // is part of a single child trie. - if self.metadata.last_key.len() == 2 && response.entries[0].entries.is_empty() { - // Do not remove the parent trie position. - self.metadata.last_key.pop(); - } else { - self.metadata.last_key.clear(); - } - for state in response.entries { - debug!( - target: LOG_TARGET, - "Importing state from {:?} to {:?}", - state.entries.last().map(|e| sp_core::hexdisplay::HexDisplay::from(&e.key)), - state.entries.first().map(|e| sp_core::hexdisplay::HexDisplay::from(&e.key)), - ); - - if !state.complete { - if let Some(e) = state.entries.last() { - self.metadata.last_key.push(e.key.clone()); - } - complete = false; - } - - let KeyValueStateEntry { state_root, entries, complete: _ } = state; - self.process_state_key_values( - state_root, - entries.into_iter().map(|StateEntry { key, value }| (key, value)), - ); - } - complete - } } impl StateSyncProvider for StateSync @@ -259,11 +145,11 @@ where debug!(target: LOG_TARGET, "Bad state response"); return ImportResult::BadResponse } - if !self.metadata.skip_proof && response.proof.is_empty() { + if !self.skip_proof && response.proof.is_empty() { debug!(target: LOG_TARGET, "Missing proof"); return ImportResult::BadResponse } - let complete = if !self.metadata.skip_proof { + let complete = if !self.skip_proof { debug!(target: LOG_TARGET, "Importing state from {} trie nodes", response.proof.len()); let proof_size = response.proof.len() as u64; let proof = match CompactProof::decode(&mut response.proof.as_ref()) { @@ -274,9 +160,9 @@ where }, }; let (values, completed) = match self.client.verify_range_proof( - self.metadata.target_root(), + self.target_root, proof, - self.metadata.last_key.as_slice(), + self.last_key.as_slice(), ) { Err(e) => { debug!( @@ -291,25 +177,110 @@ where debug!(target: LOG_TARGET, "Imported with {} keys", values.len()); let complete = completed == 0; - if !complete && !values.update_last_key(completed, &mut self.metadata.last_key) { + if !complete && !values.update_last_key(completed, &mut self.last_key) { debug!(target: LOG_TARGET, "Error updating key cursor, depth: {}", completed); }; - self.process_state_verified(values); - self.metadata.imported_bytes += proof_size; + for values in values.0 { + let key_values = if values.state_root.is_empty() { + // Read child trie roots. + values + .key_values + .into_iter() + .filter(|key_value| { + if well_known_keys::is_child_storage_key(key_value.0.as_slice()) { + self.state + .entry(key_value.1.clone()) + .or_default() + .1 + .push(key_value.0.clone()); + false + } else { + true + } + }) + .collect() + } else { + values.key_values + }; + let entry = self.state.entry(values.state_root).or_default(); + if entry.0.len() > 0 && entry.1.len() > 1 { + // Already imported child_trie with same root. + // Warning this will not work with parallel download. + } else if entry.0.is_empty() { + for (key, _value) in key_values.iter() { + self.imported_bytes += key.len() as u64; + } + + entry.0 = key_values; + } else { + for (key, value) in key_values { + self.imported_bytes += key.len() as u64; + entry.0.push((key, value)) + } + } + } + self.imported_bytes += proof_size; complete } else { - self.process_state_unverified(response) + let mut complete = true; + // if the trie is a child trie and one of its parent trie is empty, + // the parent cursor stays valid. + // Empty parent trie content only happens when all the response content + // is part of a single child trie. + if self.last_key.len() == 2 && response.entries[0].entries.is_empty() { + // Do not remove the parent trie position. + self.last_key.pop(); + } else { + self.last_key.clear(); + } + for state in response.entries { + debug!( + target: LOG_TARGET, + "Importing state from {:?} to {:?}", + state.entries.last().map(|e| sp_core::hexdisplay::HexDisplay::from(&e.key)), + state.entries.first().map(|e| sp_core::hexdisplay::HexDisplay::from(&e.key)), + ); + + if !state.complete { + if let Some(e) = state.entries.last() { + self.last_key.push(e.key.clone()); + } + complete = false; + } + let is_top = state.state_root.is_empty(); + let entry = self.state.entry(state.state_root).or_default(); + if entry.0.len() > 0 && entry.1.len() > 1 { + // Already imported child trie with same root. + } else { + let mut child_roots = Vec::new(); + for StateEntry { key, value } in state.entries { + // Skip all child key root (will be recalculated on import). + if is_top && well_known_keys::is_child_storage_key(key.as_slice()) { + child_roots.push((value, key)); + } else { + self.imported_bytes += key.len() as u64; + entry.0.push((key, value)) + } + } + for (root, storage_key) in child_roots { + self.state.entry(root).or_default().1.push(storage_key); + } + } + } + complete }; if complete { - self.metadata.complete = true; - let target_hash = self.metadata.target_hash(); + self.complete = true; ImportResult::Import( - target_hash, - self.metadata.target_header.clone(), - ImportedState { block: target_hash, state: std::mem::take(&mut self.state).into() }, - self.metadata.target_body.clone(), - self.metadata.target_justifications.clone(), + self.target_block, + self.target_header.clone(), + ImportedState { + block: self.target_block, + state: std::mem::take(&mut self.state).into(), + }, + self.target_body.clone(), + self.target_justifications.clone(), ) } else { ImportResult::Continue @@ -318,26 +289,40 @@ where /// Produce next state request. fn next_request(&self) -> StateRequest { - self.metadata.next_request() + StateRequest { + block: self.target_block.encode(), + start: self.last_key.clone().into_vec(), + no_proof: self.skip_proof, + } } /// Check if the state is complete. fn is_complete(&self) -> bool { - self.metadata.complete + self.complete } /// Returns target block number. fn target_number(&self) -> NumberFor { - self.metadata.target_number() + *self.target_header.number() } /// Returns target block hash. fn target_hash(&self) -> B::Hash { - self.metadata.target_hash() + self.target_block } /// Returns state sync estimated progress. fn progress(&self) -> StateSyncProgress { - self.metadata.progress() + let cursor = *self.last_key.get(0).and_then(|last| last.get(0)).unwrap_or(&0u8); + let percent_done = cursor as u32 * 100 / 256; + StateSyncProgress { + percentage: percent_done, + size: self.imported_bytes, + phase: if self.complete { + StateSyncPhase::ImportingState + } else { + StateSyncPhase::DownloadingState + }, + } } } diff --git a/substrate/client/network/sync/src/types.rs b/substrate/client/network/sync/src/types.rs index a72a2f7c1ffe..5745a34378df 100644 --- a/substrate/client/network/sync/src/types.rs +++ b/substrate/client/network/sync/src/types.rs @@ -127,10 +127,6 @@ where /// Syncing-related events that other protocols can subscribe to. pub enum SyncEvent { - /// All connected peers that the syncing implementation is tracking. - /// Always sent as the first message to the stream. - InitialPeers(Vec), - /// Peer that the syncing implementation is tracking connected. PeerConnected(PeerId), diff --git a/substrate/client/network/test/Cargo.toml b/substrate/client/network/test/Cargo.toml index 783d47f21fa7..ebece1762f29 100644 --- a/substrate/client/network/test/Cargo.toml +++ b/substrate/client/network/test/Cargo.toml @@ -16,6 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] +tokio = { workspace = true, default-features = true } async-trait = { workspace = true } futures = { workspace = true } futures-timer = { workspace = true } @@ -28,11 +29,11 @@ sc-client-api = { workspace = true, default-features = true } sc-consensus = { workspace = true, default-features = true } sc-network = { workspace = true, default-features = true } sc-network-common = { workspace = true, default-features = true } -sc-network-light = { workspace = true, default-features = true } -sc-network-sync = { workspace = true, default-features = true } sc-network-types = { workspace = true, default-features = true } -sc-service = { workspace = true } sc-utils = { workspace = true, default-features = true } +sc-network-light = { workspace = true, default-features = true } +sc-network-sync = { workspace = true, default-features = true } +sc-service = { features = ["test-helpers"], workspace = true } sp-blockchain = { workspace = true, default-features = true } sp-consensus = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } @@ -40,4 +41,3 @@ sp-runtime = { workspace = true, default-features = true } sp-tracing = { workspace = true, default-features = true } substrate-test-runtime = { workspace = true } substrate-test-runtime-client = { workspace = true } -tokio = { workspace = true, default-features = true } diff --git a/substrate/client/network/test/src/lib.rs b/substrate/client/network/test/src/lib.rs index 3cdf211e07f6..825481314c67 100644 --- a/substrate/client/network/test/src/lib.rs +++ b/substrate/client/network/test/src/lib.rs @@ -91,7 +91,7 @@ use sp_runtime::{ traits::{Block as BlockT, Header as HeaderT, NumberFor, Zero}, Justification, Justifications, }; -use substrate_test_runtime_client::Sr25519Keyring; +use substrate_test_runtime_client::AccountKeyring; pub use substrate_test_runtime_client::{ runtime::{Block, ExtrinsicBuilder, Hash, Header, Transfer}, TestClient, TestClientBuilder, TestClientBuilderExt, @@ -475,8 +475,8 @@ where BlockOrigin::File, |mut builder| { let transfer = Transfer { - from: Sr25519Keyring::Alice.into(), - to: Sr25519Keyring::Alice.into(), + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Alice.into(), amount: 1, nonce, }; diff --git a/substrate/client/network/transactions/Cargo.toml b/substrate/client/network/transactions/Cargo.toml index ef9ea1c46197..2ffd6f5f4660 100644 --- a/substrate/client/network/transactions/Cargo.toml +++ b/substrate/client/network/transactions/Cargo.toml @@ -26,5 +26,5 @@ sc-network-common = { workspace = true, default-features = true } sc-network-sync = { workspace = true, default-features = true } sc-network-types = { workspace = true, default-features = true } sc-utils = { workspace = true, default-features = true } -sp-consensus = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } diff --git a/substrate/client/network/transactions/src/lib.rs b/substrate/client/network/transactions/src/lib.rs index 49f429a04ee2..2b5297fe0e13 100644 --- a/substrate/client/network/transactions/src/lib.rs +++ b/substrate/client/network/transactions/src/lib.rs @@ -35,8 +35,7 @@ use log::{debug, trace, warn}; use prometheus_endpoint::{register, Counter, PrometheusError, Registry, U64}; use sc_network::{ config::{NonReservedPeerMode, ProtocolId, SetConfig}, - error, - multiaddr::{Multiaddr, Protocol}, + error, multiaddr, peer_store::PeerStoreProvider, service::{ traits::{NotificationEvent, NotificationService, ValidationResult}, @@ -378,19 +377,9 @@ where fn handle_sync_event(&mut self, event: SyncEvent) { match event { - SyncEvent::InitialPeers(peer_ids) => { - let addrs = peer_ids - .into_iter() - .map(|peer_id| Multiaddr::empty().with(Protocol::P2p(peer_id.into()))) - .collect(); - let result = - self.network.add_peers_to_reserved_set(self.protocol_name.clone(), addrs); - if let Err(err) = result { - log::error!(target: LOG_TARGET, "Add reserved peers failed: {}", err); - } - }, - SyncEvent::PeerConnected(peer_id) => { - let addr = Multiaddr::empty().with(Protocol::P2p(peer_id.into())); + SyncEvent::PeerConnected(remote) => { + let addr = iter::once(multiaddr::Protocol::P2p(remote.into())) + .collect::(); let result = self.network.add_peers_to_reserved_set( self.protocol_name.clone(), iter::once(addr).collect(), @@ -399,10 +388,10 @@ where log::error!(target: LOG_TARGET, "Add reserved peer failed: {}", err); } }, - SyncEvent::PeerDisconnected(peer_id) => { + SyncEvent::PeerDisconnected(remote) => { let result = self.network.remove_peers_from_reserved_set( self.protocol_name.clone(), - iter::once(peer_id).collect(), + iter::once(remote).collect(), ); if let Err(err) = result { log::error!(target: LOG_TARGET, "Remove reserved peer failed: {}", err); @@ -491,7 +480,7 @@ where continue } - let (hashes, to_send): (Vec<_>, Transactions<_>) = transactions + let (hashes, to_send): (Vec<_>, Vec<_>) = transactions .iter() .filter(|(hash, _)| peer.known_transactions.insert(hash.clone())) .cloned() diff --git a/substrate/client/network/types/Cargo.toml b/substrate/client/network/types/Cargo.toml index 67814f135d39..655f104111e4 100644 --- a/substrate/client/network/types/Cargo.toml +++ b/substrate/client/network/types/Cargo.toml @@ -11,10 +11,8 @@ documentation = "https://docs.rs/sc-network-types" [dependencies] bs58 = { workspace = true, default-features = true } -bytes = { version = "1.4.0", default-features = false } ed25519-dalek = { workspace = true, default-features = true } libp2p-identity = { features = ["ed25519", "peerid", "rand"], workspace = true } -libp2p-kad = { version = "0.46.2", default-features = false } litep2p = { workspace = true } log = { workspace = true, default-features = true } multiaddr = { workspace = true } diff --git a/substrate/client/network/types/src/kad.rs b/substrate/client/network/types/src/kad.rs deleted file mode 100644 index 72028d356dc7..000000000000 --- a/substrate/client/network/types/src/kad.rs +++ /dev/null @@ -1,185 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use crate::{multihash::Multihash, PeerId}; -use bytes::Bytes; -use libp2p_kad::RecordKey as Libp2pKey; -use litep2p::protocol::libp2p::kademlia::{Record as Litep2pRecord, RecordKey as Litep2pKey}; -use std::{error::Error, fmt, time::Instant}; - -/// The (opaque) key of a record. -#[derive(Clone, Debug, PartialEq, Eq, Hash)] -pub struct Key(Bytes); - -impl Key { - /// Creates a new key from the bytes of the input. - pub fn new>(key: &K) -> Self { - Key(Bytes::copy_from_slice(key.as_ref())) - } - - /// Copies the bytes of the key into a new vector. - pub fn to_vec(&self) -> Vec { - self.0.to_vec() - } -} - -impl AsRef<[u8]> for Key { - fn as_ref(&self) -> &[u8] { - &self.0[..] - } -} - -impl From> for Key { - fn from(v: Vec) -> Key { - Key(Bytes::from(v)) - } -} - -impl From for Key { - fn from(m: Multihash) -> Key { - Key::from(m.to_bytes()) - } -} - -impl From for Key { - fn from(key: Litep2pKey) -> Self { - Self::from(key.to_vec()) - } -} - -impl From for Litep2pKey { - fn from(key: Key) -> Self { - Self::from(key.to_vec()) - } -} - -impl From for Key { - fn from(key: Libp2pKey) -> Self { - Self::from(key.to_vec()) - } -} - -impl From for Libp2pKey { - fn from(key: Key) -> Self { - Self::from(key.to_vec()) - } -} - -/// A record stored in the DHT. -#[derive(Clone, Debug, Eq, PartialEq)] -pub struct Record { - /// Key of the record. - pub key: Key, - /// Value of the record. - pub value: Vec, - /// The (original) publisher of the record. - pub publisher: Option, - /// The expiration time as measured by a local, monotonic clock. - pub expires: Option, -} - -impl Record { - /// Creates a new record for insertion into the DHT. - pub fn new(key: Key, value: Vec) -> Self { - Record { key, value, publisher: None, expires: None } - } - - /// Checks whether the record is expired w.r.t. the given `Instant`. - pub fn is_expired(&self, now: Instant) -> bool { - self.expires.map_or(false, |t| now >= t) - } -} - -impl From for Record { - fn from(out: libp2p_kad::Record) -> Self { - let vec: Vec = out.key.to_vec(); - let key: Key = vec.into(); - let publisher = out.publisher.map(Into::into); - Record { key, value: out.value, publisher, expires: out.expires } - } -} - -impl From for Litep2pRecord { - fn from(val: Record) -> Self { - let vec: Vec = val.key.to_vec(); - let key: Litep2pKey = vec.into(); - let publisher = val.publisher.map(Into::into); - Litep2pRecord { key, value: val.value, publisher, expires: val.expires } - } -} - -impl From for libp2p_kad::Record { - fn from(a: Record) -> libp2p_kad::Record { - let peer = a.publisher.map(Into::into); - libp2p_kad::Record { - key: a.key.to_vec().into(), - value: a.value, - publisher: peer, - expires: a.expires, - } - } -} - -/// A record either received by the given peer or retrieved from the local -/// record store. -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct PeerRecord { - /// The peer from whom the record was received. `None` if the record was - /// retrieved from local storage. - pub peer: Option, - pub record: Record, -} - -impl From for PeerRecord { - fn from(out: libp2p_kad::PeerRecord) -> Self { - let peer = out.peer.map(Into::into); - let record = out.record.into(); - PeerRecord { peer, record } - } -} - -/// An error during signing of a message. -#[derive(Debug)] -pub struct SigningError { - msg: String, - source: Option>, -} - -/// An error during encoding of key material. -#[allow(dead_code)] -impl SigningError { - pub(crate) fn new(msg: S) -> Self { - Self { msg: msg.to_string(), source: None } - } - - pub(crate) fn source(self, source: impl Error + Send + Sync + 'static) -> Self { - Self { source: Some(Box::new(source)), ..self } - } -} - -impl fmt::Display for SigningError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "Key signing error: {}", self.msg) - } -} - -impl Error for SigningError { - fn source(&self) -> Option<&(dyn Error + 'static)> { - self.source.as_ref().map(|s| &**s as &dyn Error) - } -} diff --git a/substrate/client/network/types/src/lib.rs b/substrate/client/network/types/src/lib.rs index 093d81533f60..5684e38ab2e8 100644 --- a/substrate/client/network/types/src/lib.rs +++ b/substrate/client/network/types/src/lib.rs @@ -17,8 +17,8 @@ // along with this program. If not, see . pub mod ed25519; -pub mod kad; pub mod multiaddr; pub mod multihash; + mod peer_id; pub use peer_id::PeerId; diff --git a/substrate/client/offchain/Cargo.toml b/substrate/client/offchain/Cargo.toml index bfdb29cc4c35..71b40211e126 100644 --- a/substrate/client/offchain/Cargo.toml +++ b/substrate/client/offchain/Cargo.toml @@ -26,12 +26,13 @@ http-body-util = { workspace = true } hyper = { features = ["http1", "http2"], workspace = true, default-features = true } hyper-rustls = { workspace = true } hyper-util = { features = ["client-legacy", "http1", "http2"], workspace = true } -log = { workspace = true, default-features = true } num_cpus = { workspace = true } once_cell = { workspace = true } parking_lot = { workspace = true, default-features = true } rand = { workspace = true, default-features = true } rustls = { workspace = true } +threadpool = { workspace = true } +tracing = { workspace = true, default-features = true } sc-client-api = { workspace = true, default-features = true } sc-network = { workspace = true, default-features = true } sc-network-common = { workspace = true, default-features = true } @@ -40,15 +41,15 @@ sc-transaction-pool-api = { workspace = true, default-features = true } sc-utils = { workspace = true, default-features = true } sp-api = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } -sp-externalities = { workspace = true, default-features = true } -sp-keystore = { workspace = true, default-features = true } sp-offchain = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } -threadpool = { workspace = true } -tracing = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } +sp-externalities = { workspace = true, default-features = true } +log = { workspace = true, default-features = true } [dev-dependencies] async-trait = { workspace = true } +tokio = { workspace = true, default-features = true } sc-block-builder = { workspace = true, default-features = true } sc-client-db = { default-features = true, workspace = true } sc-transaction-pool = { workspace = true, default-features = true } @@ -56,7 +57,6 @@ sc-transaction-pool-api = { workspace = true, default-features = true } sp-consensus = { workspace = true, default-features = true } sp-tracing = { workspace = true, default-features = true } substrate-test-runtime-client = { workspace = true } -tokio = { workspace = true, default-features = true } [features] default = [] diff --git a/substrate/client/proposer-metrics/src/lib.rs b/substrate/client/proposer-metrics/src/lib.rs index a62278988f12..2856300cf802 100644 --- a/substrate/client/proposer-metrics/src/lib.rs +++ b/substrate/client/proposer-metrics/src/lib.rs @@ -44,7 +44,7 @@ impl MetricsLink { } /// The reason why proposing a block ended. -#[derive(Clone, Copy, PartialEq, Eq, Debug)] +#[derive(Clone, Copy, PartialEq, Eq)] pub enum EndProposingReason { NoMoreTransactions, HitDeadline, diff --git a/substrate/client/rpc-api/Cargo.toml b/substrate/client/rpc-api/Cargo.toml index e7bb723d8839..3263285aa2b1 100644 --- a/substrate/client/rpc-api/Cargo.toml +++ b/substrate/client/rpc-api/Cargo.toml @@ -17,15 +17,15 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { workspace = true, default-features = true } -jsonrpsee = { features = ["client-core", "macros", "server-core"], workspace = true } -sc-chain-spec = { workspace = true, default-features = true } -sc-mixnet = { workspace = true, default-features = true } -sc-transaction-pool-api = { workspace = true, default-features = true } scale-info = { features = ["derive"], workspace = true } serde = { features = ["derive"], workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } +thiserror = { workspace = true } +sc-chain-spec = { workspace = true, default-features = true } +sc-mixnet = { workspace = true, default-features = true } +sc-transaction-pool-api = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } sp-rpc = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } sp-version = { workspace = true, default-features = true } -thiserror = { workspace = true } +jsonrpsee = { features = ["client-core", "macros", "server-core"], workspace = true } diff --git a/substrate/client/rpc-servers/src/lib.rs b/substrate/client/rpc-servers/src/lib.rs index 4234ff3196ef..31e4042d81f2 100644 --- a/substrate/client/rpc-servers/src/lib.rs +++ b/substrate/client/rpc-servers/src/lib.rs @@ -144,56 +144,11 @@ where local_addrs.push(local_addr); let cfg = cfg.clone(); - let RpcSettings { - batch_config, - max_connections, - max_payload_in_mb, - max_payload_out_mb, - max_buffer_capacity_per_connection, - max_subscriptions_per_connection, - rpc_methods, - rate_limit_trust_proxy_headers, - rate_limit_whitelisted_ips, - host_filter, - cors, - rate_limit, - } = listener.rpc_settings(); - - let http_middleware = tower::ServiceBuilder::new() - .option_layer(host_filter) - // Proxy `GET /health, /health/readiness` requests to the internal - // `system_health` method. - .layer(NodeHealthProxyLayer::default()) - .layer(cors); - - let mut builder = jsonrpsee::server::Server::builder() - .max_request_body_size(max_payload_in_mb.saturating_mul(MEGABYTE)) - .max_response_body_size(max_payload_out_mb.saturating_mul(MEGABYTE)) - .max_connections(max_connections) - .max_subscriptions_per_connection(max_subscriptions_per_connection) - .enable_ws_ping( - PingConfig::new() - .ping_interval(Duration::from_secs(30)) - .inactive_limit(Duration::from_secs(60)) - .max_failures(3), - ) - .set_http_middleware(http_middleware) - .set_message_buffer_capacity(max_buffer_capacity_per_connection) - .set_batch_request_config(batch_config) - .custom_tokio_runtime(cfg.tokio_handle.clone()); - - if let Some(provider) = id_provider.clone() { - builder = builder.set_id_provider(provider); - } else { - builder = builder.set_id_provider(RandomStringIdProvider::new(16)); - }; - - let service_builder = builder.to_service_builder(); - let deny_unsafe = deny_unsafe(&local_addr, &rpc_methods); + let mut id_provider2 = id_provider.clone(); tokio_handle.spawn(async move { loop { - let (sock, remote_addr) = tokio::select! { + let (sock, remote_addr, rpc_cfg) = tokio::select! { res = listener.accept() => { match res { Ok(s) => s, @@ -206,10 +161,57 @@ where _ = cfg.stop_handle.clone().shutdown() => break, }; + let RpcSettings { + batch_config, + max_connections, + max_payload_in_mb, + max_payload_out_mb, + max_buffer_capacity_per_connection, + max_subscriptions_per_connection, + rpc_methods, + rate_limit_trust_proxy_headers, + rate_limit_whitelisted_ips, + host_filter, + cors, + rate_limit, + } = rpc_cfg; + + let http_middleware = tower::ServiceBuilder::new() + .option_layer(host_filter) + // Proxy `GET /health, /health/readiness` requests to the internal + // `system_health` method. + .layer(NodeHealthProxyLayer::default()) + .layer(cors); + + let mut builder = jsonrpsee::server::Server::builder() + .max_request_body_size(max_payload_in_mb.saturating_mul(MEGABYTE)) + .max_response_body_size(max_payload_out_mb.saturating_mul(MEGABYTE)) + .max_connections(max_connections) + .max_subscriptions_per_connection(max_subscriptions_per_connection) + .enable_ws_ping( + PingConfig::new() + .ping_interval(Duration::from_secs(30)) + .inactive_limit(Duration::from_secs(60)) + .max_failures(3), + ) + .set_http_middleware(http_middleware) + .set_message_buffer_capacity(max_buffer_capacity_per_connection) + .set_batch_request_config(batch_config) + .custom_tokio_runtime(cfg.tokio_handle.clone()) + .set_id_provider(RandomStringIdProvider::new(16)); + + if let Some(provider) = id_provider2.take() { + builder = builder.set_id_provider(provider); + } else { + builder = builder.set_id_provider(RandomStringIdProvider::new(16)); + }; + + let service_builder = builder.to_service_builder(); + let deny_unsafe = deny_unsafe(&local_addr, &rpc_methods); + let ip = remote_addr.ip(); let cfg2 = cfg.clone(); let service_builder2 = service_builder.clone(); - let rate_limit_whitelisted_ips2 = rate_limit_whitelisted_ips.clone(); let svc = tower::service_fn(move |mut req: http::Request| { @@ -222,14 +224,14 @@ where let proxy_ip = if rate_limit_trust_proxy_headers { get_proxy_ip(&req) } else { None }; - let rate_limit_cfg = if rate_limit_whitelisted_ips2 + let rate_limit_cfg = if rate_limit_whitelisted_ips .iter() .any(|ips| ips.contains(proxy_ip.unwrap_or(ip))) { log::debug!(target: "rpc", "ip={ip}, proxy_ip={:?} is trusted, disabling rate-limit", proxy_ip); None } else { - if !rate_limit_whitelisted_ips2.is_empty() { + if !rate_limit_whitelisted_ips.is_empty() { log::debug!(target: "rpc", "ip={ip}, proxy_ip={:?} is not trusted, rate-limit enabled", proxy_ip); } rate_limit diff --git a/substrate/client/rpc-servers/src/utils.rs b/substrate/client/rpc-servers/src/utils.rs index b76cfced3401..d9b2db7af133 100644 --- a/substrate/client/rpc-servers/src/utils.rs +++ b/substrate/client/rpc-servers/src/utils.rs @@ -176,30 +176,31 @@ pub(crate) struct Listener { impl Listener { /// Accepts a new connection. - pub(crate) async fn accept(&mut self) -> std::io::Result<(tokio::net::TcpStream, SocketAddr)> { + pub(crate) async fn accept( + &mut self, + ) -> std::io::Result<(tokio::net::TcpStream, SocketAddr, RpcSettings)> { let (sock, remote_addr) = self.listener.accept().await?; - Ok((sock, remote_addr)) + Ok((sock, remote_addr, self.cfg.clone())) } /// Returns the local address the listener is bound to. pub fn local_addr(&self) -> SocketAddr { self.local_addr } - - pub fn rpc_settings(&self) -> RpcSettings { - self.cfg.clone() - } } pub(crate) fn host_filtering(enabled: bool, addr: SocketAddr) -> Option { if enabled { // NOTE: The listening addresses are whitelisted by default. - let hosts = [ - format!("localhost:{}", addr.port()), - format!("127.0.0.1:{}", addr.port()), - format!("[::1]:{}", addr.port()), - ]; + let mut hosts = Vec::new(); + + if addr.is_ipv4() { + hosts.push(format!("localhost:{}", addr.port())); + hosts.push(format!("127.0.0.1:{}", addr.port())); + } else { + hosts.push(format!("[::1]:{}", addr.port())); + } Some(HostFilterLayer::new(hosts).expect("Valid hosts; qed")) } else { diff --git a/substrate/client/rpc-spec-v2/Cargo.toml b/substrate/client/rpc-spec-v2/Cargo.toml index ebe7e7eca7b4..58dd8b830beb 100644 --- a/substrate/client/rpc-spec-v2/Cargo.toml +++ b/substrate/client/rpc-spec-v2/Cargo.toml @@ -20,45 +20,43 @@ jsonrpsee = { workspace = true, features = ["client-core", "macros", "server-cor # Internal chain structures for "chain_spec". sc-chain-spec = { workspace = true, default-features = true } # Pool for submitting extrinsics required by "transaction" -array-bytes = { workspace = true, default-features = true } -codec = { workspace = true, default-features = true } -futures = { workspace = true } -futures-util = { workspace = true } -hex = { workspace = true, default-features = true } -itertools = { workspace = true } -log = { workspace = true, default-features = true } -parking_lot = { workspace = true, default-features = true } -rand = { workspace = true, default-features = true } -sc-client-api = { workspace = true, default-features = true } -sc-rpc = { workspace = true, default-features = true } sc-transaction-pool-api = { workspace = true, default-features = true } -schnellru = { workspace = true } -serde = { workspace = true, default-features = true } -sp-api = { workspace = true, default-features = true } -sp-blockchain = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } -sp-rpc = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-rpc = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } sp-version = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sc-rpc = { workspace = true, default-features = true } +codec = { workspace = true, default-features = true } thiserror = { workspace = true } -tokio = { features = ["sync"], workspace = true, default-features = true } +serde = { workspace = true, default-features = true } +hex = { workspace = true, default-features = true } +futures = { workspace = true } +parking_lot = { workspace = true, default-features = true } tokio-stream = { features = ["sync"], workspace = true } +tokio = { features = ["sync"], workspace = true, default-features = true } +array-bytes = { workspace = true, default-features = true } +log = { workspace = true, default-features = true } +futures-util = { workspace = true } +rand = { workspace = true, default-features = true } +schnellru = { workspace = true } [dev-dependencies] -assert_matches = { workspace = true } -async-trait = { workspace = true } jsonrpsee = { workspace = true, features = ["server", "ws-client"] } -pretty_assertions = { workspace = true } -sc-block-builder = { workspace = true, default-features = true } -sc-rpc = { workspace = true, default-features = true, features = ["test-helpers"] } -sc-service = { workspace = true, default-features = true } -sc-transaction-pool = { workspace = true, default-features = true } -sc-utils = { workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } +tokio = { features = ["macros"], workspace = true, default-features = true } +substrate-test-runtime-client = { workspace = true } +substrate-test-runtime = { workspace = true } +substrate-test-runtime-transaction-pool = { workspace = true } sp-consensus = { workspace = true, default-features = true } sp-externalities = { workspace = true, default-features = true } sp-maybe-compressed-blob = { workspace = true, default-features = true } -substrate-test-runtime = { workspace = true } -substrate-test-runtime-client = { workspace = true } -substrate-test-runtime-transaction-pool = { workspace = true } -tokio = { features = ["macros"], workspace = true, default-features = true } +sc-block-builder = { workspace = true, default-features = true } +sc-service = { features = ["test-helpers"], workspace = true, default-features = true } +sc-rpc = { workspace = true, default-features = true, features = ["test-helpers"] } +assert_matches = { workspace = true } +pretty_assertions = { workspace = true } +sc-transaction-pool = { workspace = true, default-features = true } +sc-utils = { workspace = true, default-features = true } diff --git a/substrate/client/rpc-spec-v2/src/archive/api.rs b/substrate/client/rpc-spec-v2/src/archive/api.rs index a205d0502c93..b19738304000 100644 --- a/substrate/client/rpc-spec-v2/src/archive/api.rs +++ b/substrate/client/rpc-spec-v2/src/archive/api.rs @@ -19,9 +19,7 @@ //! API trait of the archive methods. use crate::{ - common::events::{ - ArchiveStorageDiffEvent, ArchiveStorageDiffItem, ArchiveStorageEvent, StorageQuery, - }, + common::events::{ArchiveStorageResult, PaginatedStorageQuery}, MethodResult, }; use jsonrpsee::{core::RpcResult, proc_macros::rpc}; @@ -99,32 +97,11 @@ pub trait ArchiveApi { /// # Unstable /// /// This method is unstable and subject to change in the future. - #[subscription( - name = "archive_unstable_storage" => "archive_unstable_storageEvent", - unsubscribe = "archive_unstable_stopStorage", - item = ArchiveStorageEvent, - )] + #[method(name = "archive_unstable_storage", blocking)] fn archive_unstable_storage( &self, hash: Hash, - items: Vec>, + items: Vec>, child_trie: Option, - ); - - /// Returns the storage difference between two blocks. - /// - /// # Unstable - /// - /// This method is unstable and can change in minor or patch releases. - #[subscription( - name = "archive_unstable_storageDiff" => "archive_unstable_storageDiffEvent", - unsubscribe = "archive_unstable_storageDiff_stopStorageDiff", - item = ArchiveStorageDiffEvent, - )] - fn archive_unstable_storage_diff( - &self, - hash: Hash, - items: Vec>, - previous_hash: Option, - ); + ) -> RpcResult; } diff --git a/substrate/client/rpc-spec-v2/src/archive/archive.rs b/substrate/client/rpc-spec-v2/src/archive/archive.rs index 62e44a016241..dd6c566a76ed 100644 --- a/substrate/client/rpc-spec-v2/src/archive/archive.rs +++ b/substrate/client/rpc-spec-v2/src/archive/archive.rs @@ -19,29 +19,17 @@ //! API implementation for `archive`. use crate::{ - archive::{ - archive_storage::ArchiveStorageDiff, error::Error as ArchiveError, ArchiveApiServer, - }, - common::{ - events::{ - ArchiveStorageDiffEvent, ArchiveStorageDiffItem, ArchiveStorageEvent, StorageQuery, - }, - storage::{QueryResult, StorageSubscriptionClient}, - }, - hex_string, MethodResult, SubscriptionTaskExecutor, + archive::{error::Error as ArchiveError, ArchiveApiServer}, + common::events::{ArchiveStorageResult, PaginatedStorageQuery}, + hex_string, MethodResult, }; use codec::Encode; -use futures::FutureExt; -use jsonrpsee::{ - core::{async_trait, RpcResult}, - PendingSubscriptionSink, -}; +use jsonrpsee::core::{async_trait, RpcResult}; use sc_client_api::{ Backend, BlockBackend, BlockchainEvents, CallExecutor, ChildInfo, ExecutorProvider, StorageKey, StorageProvider, }; -use sc_rpc::utils::Subscription; use sp_api::{CallApiAt, CallContext}; use sp_blockchain::{ Backend as BlockChainBackend, Error as BlockChainError, HeaderBackend, HeaderMetadata, @@ -53,15 +41,37 @@ use sp_runtime::{ }; use std::{collections::HashSet, marker::PhantomData, sync::Arc}; -use tokio::sync::mpsc; +use super::archive_storage::ArchiveStorage; + +/// The configuration of [`Archive`]. +pub struct ArchiveConfig { + /// The maximum number of items the `archive_storage` can return for a descendant query before + /// pagination is required. + pub max_descendant_responses: usize, + /// The maximum number of queried items allowed for the `archive_storage` at a time. + pub max_queried_items: usize, +} -pub(crate) const LOG_TARGET: &str = "rpc-spec-v2::archive"; +/// The maximum number of items the `archive_storage` can return for a descendant query before +/// pagination is required. +/// +/// Note: this is identical to the `chainHead` value. +const MAX_DESCENDANT_RESPONSES: usize = 5; -/// The buffer capacity for each storage query. +/// The maximum number of queried items allowed for the `archive_storage` at a time. /// -/// This is small because the underlying JSON-RPC server has -/// its down buffer capacity per connection as well. -const STORAGE_QUERY_BUF: usize = 16; +/// Note: A queried item can also be a descendant query which can return up to +/// `MAX_DESCENDANT_RESPONSES`. +const MAX_QUERIED_ITEMS: usize = 8; + +impl Default for ArchiveConfig { + fn default() -> Self { + Self { + max_descendant_responses: MAX_DESCENDANT_RESPONSES, + max_queried_items: MAX_QUERIED_ITEMS, + } + } +} /// An API for archive RPC calls. pub struct Archive, Block: BlockT, Client> { @@ -69,10 +79,13 @@ pub struct Archive, Block: BlockT, Client> { client: Arc, /// Backend of the chain. backend: Arc, - /// Executor to spawn subscriptions. - executor: SubscriptionTaskExecutor, /// The hexadecimal encoded hash of the genesis block. genesis_hash: String, + /// The maximum number of items the `archive_storage` can return for a descendant query before + /// pagination is required. + storage_max_descendant_responses: usize, + /// The maximum number of queried items allowed for the `archive_storage` at a time. + storage_max_queried_items: usize, /// Phantom member to pin the block type. _phantom: PhantomData, } @@ -83,10 +96,17 @@ impl, Block: BlockT, Client> Archive { client: Arc, backend: Arc, genesis_hash: GenesisHash, - executor: SubscriptionTaskExecutor, + config: ArchiveConfig, ) -> Self { let genesis_hash = hex_string(&genesis_hash.as_ref()); - Self { client, backend, executor, genesis_hash, _phantom: PhantomData } + Self { + client, + backend, + genesis_hash, + storage_max_descendant_responses: config.max_descendant_responses, + storage_max_queried_items: config.max_queried_items, + _phantom: PhantomData, + } } } @@ -216,157 +236,46 @@ where fn archive_unstable_storage( &self, - pending: PendingSubscriptionSink, hash: Block::Hash, - items: Vec>, + items: Vec>, child_trie: Option, - ) { - let mut storage_client = - StorageSubscriptionClient::::new(self.client.clone()); - - let fut = async move { - let Ok(mut sink) = pending.accept().await.map(Subscription::from) else { return }; - - let items = match items - .into_iter() - .map(|query| { - let key = StorageKey(parse_hex_param(query.key)?); - Ok(StorageQuery { key, query_type: query.query_type }) - }) - .collect::, ArchiveError>>() - { - Ok(items) => items, - Err(error) => { - let _ = sink.send(&ArchiveStorageEvent::err(error.to_string())); - return - }, - }; - - let child_trie = child_trie.map(|child_trie| parse_hex_param(child_trie)).transpose(); - let child_trie = match child_trie { - Ok(child_trie) => child_trie.map(ChildInfo::new_default_from_vec), - Err(error) => { - let _ = sink.send(&ArchiveStorageEvent::err(error.to_string())); - return - }, - }; - - let (tx, mut rx) = tokio::sync::mpsc::channel(STORAGE_QUERY_BUF); - let storage_fut = storage_client.generate_events(hash, items, child_trie, tx); - - // We don't care about the return value of this join: - // - process_events might encounter an error (if the client disconnected) - // - storage_fut might encounter an error while processing a trie queries and - // the error is propagated via the sink. - let _ = futures::future::join(storage_fut, process_storage_events(&mut rx, &mut sink)) - .await; - }; - - self.executor.spawn("substrate-rpc-subscription", Some("rpc"), fut.boxed()); - } - - fn archive_unstable_storage_diff( - &self, - pending: PendingSubscriptionSink, - hash: Block::Hash, - items: Vec>, - previous_hash: Option, - ) { - let storage_client = ArchiveStorageDiff::new(self.client.clone()); - let client = self.client.clone(); - - log::trace!(target: LOG_TARGET, "Storage diff subscription started"); - - let fut = async move { - let Ok(mut sink) = pending.accept().await.map(Subscription::from) else { return }; - - let previous_hash = if let Some(previous_hash) = previous_hash { - previous_hash - } else { - let Ok(Some(current_header)) = client.header(hash) else { - let message = format!("Block header is not present: {hash}"); - let _ = sink.send(&ArchiveStorageDiffEvent::err(message)).await; - return - }; - *current_header.parent_hash() - }; - - let (tx, mut rx) = tokio::sync::mpsc::channel(STORAGE_QUERY_BUF); - let storage_fut = - storage_client.handle_trie_queries(hash, items, previous_hash, tx.clone()); - - // We don't care about the return value of this join: - // - process_events might encounter an error (if the client disconnected) - // - storage_fut might encounter an error while processing a trie queries and - // the error is propagated via the sink. - let _ = - futures::future::join(storage_fut, process_storage_diff_events(&mut rx, &mut sink)) - .await; - }; - - self.executor.spawn("substrate-rpc-subscription", Some("rpc"), fut.boxed()); - } -} - -/// Sends all the events of the storage_diff method to the sink. -async fn process_storage_diff_events( - rx: &mut mpsc::Receiver, - sink: &mut Subscription, -) { - loop { - tokio::select! { - _ = sink.closed() => { - return - }, - - maybe_event = rx.recv() => { - let Some(event) = maybe_event else { - break; - }; - - if event.is_done() { - log::debug!(target: LOG_TARGET, "Finished processing partial trie query"); - } else if event.is_err() { - log::debug!(target: LOG_TARGET, "Error encountered while processing partial trie query"); - } - - if sink.send(&event).await.is_err() { - return + ) -> RpcResult { + let items = items + .into_iter() + .map(|query| { + let key = StorageKey(parse_hex_param(query.key)?); + let pagination_start_key = query + .pagination_start_key + .map(|key| parse_hex_param(key).map(|key| StorageKey(key))) + .transpose()?; + + // Paginated start key is only supported + if pagination_start_key.is_some() && !query.query_type.is_descendant_query() { + return Err(ArchiveError::InvalidParam( + "Pagination start key is only supported for descendants queries" + .to_string(), + )) } - } - } - } -} -/// Sends all the events of the storage method to the sink. -async fn process_storage_events(rx: &mut mpsc::Receiver, sink: &mut Subscription) { - loop { - tokio::select! { - _ = sink.closed() => { - break - } - - maybe_storage = rx.recv() => { - let Some(event) = maybe_storage else { - break; - }; + Ok(PaginatedStorageQuery { + key, + query_type: query.query_type, + pagination_start_key, + }) + }) + .collect::, ArchiveError>>()?; - match event { - Ok(None) => continue, + let child_trie = child_trie + .map(|child_trie| parse_hex_param(child_trie)) + .transpose()? + .map(ChildInfo::new_default_from_vec); - Ok(Some(event)) => - if sink.send(&ArchiveStorageEvent::result(event)).await.is_err() { - return - }, + let storage_client = ArchiveStorage::new( + self.client.clone(), + self.storage_max_descendant_responses, + self.storage_max_queried_items, + ); - Err(error) => { - let _ = sink.send(&ArchiveStorageEvent::err(error)).await; - return - } - } - } - } + Ok(storage_client.handle_query(hash, items, child_trie)) } - - let _ = sink.send(&ArchiveStorageEvent::StorageDone).await; } diff --git a/substrate/client/rpc-spec-v2/src/archive/archive_storage.rs b/substrate/client/rpc-spec-v2/src/archive/archive_storage.rs index 390db765a48f..26e7c299de41 100644 --- a/substrate/client/rpc-spec-v2/src/archive/archive_storage.rs +++ b/substrate/client/rpc-spec-v2/src/archive/archive_storage.rs @@ -18,832 +18,112 @@ //! Implementation of the `archive_storage` method. -use std::{ - collections::{hash_map::Entry, HashMap}, - sync::Arc, -}; +use std::sync::Arc; -use itertools::Itertools; use sc_client_api::{Backend, ChildInfo, StorageKey, StorageProvider}; use sp_runtime::traits::Block as BlockT; -use super::error::Error as ArchiveError; -use crate::{ - archive::archive::LOG_TARGET, - common::{ - events::{ - ArchiveStorageDiffEvent, ArchiveStorageDiffItem, ArchiveStorageDiffOperationType, - ArchiveStorageDiffResult, ArchiveStorageDiffType, StorageResult, - }, - storage::Storage, - }, +use crate::common::{ + events::{ArchiveStorageResult, PaginatedStorageQuery, StorageQueryType}, + storage::{IterQueryType, QueryIter, Storage}, }; -use tokio::sync::mpsc; - -/// Parse hex-encoded string parameter as raw bytes. -/// -/// If the parsing fails, returns an error propagated to the RPC method. -pub fn parse_hex_param(param: String) -> Result, ArchiveError> { - // Methods can accept empty parameters. - if param.is_empty() { - return Ok(Default::default()) - } - - array_bytes::hex2bytes(¶m).map_err(|_| ArchiveError::InvalidParam(param)) -} - -#[derive(Debug, PartialEq, Clone)] -pub struct DiffDetails { - key: StorageKey, - return_type: ArchiveStorageDiffType, - child_trie_key: Option, - child_trie_key_string: Option, -} - -/// The type of storage query. -#[derive(Debug, PartialEq, Clone, Copy)] -enum FetchStorageType { - /// Only fetch the value. - Value, - /// Only fetch the hash. - Hash, - /// Fetch both the value and the hash. - Both, -} -/// The return value of the `fetch_storage` method. -#[derive(Debug, PartialEq, Clone)] -enum FetchedStorage { - /// Storage value under a key. - Value(StorageResult), - /// Storage hash under a key. - Hash(StorageResult), - /// Both storage value and hash under a key. - Both { value: StorageResult, hash: StorageResult }, -} - -pub struct ArchiveStorageDiff { +/// Generates the events of the `archive_storage` method. +pub struct ArchiveStorage { + /// Storage client. client: Storage, + /// The maximum number of responses the API can return for a descendant query at a time. + storage_max_descendant_responses: usize, + /// The maximum number of queried items allowed for the `archive_storage` at a time. + storage_max_queried_items: usize, } -impl ArchiveStorageDiff { - pub fn new(client: Arc) -> Self { - Self { client: Storage::new(client) } +impl ArchiveStorage { + /// Constructs a new [`ArchiveStorage`]. + pub fn new( + client: Arc, + storage_max_descendant_responses: usize, + storage_max_queried_items: usize, + ) -> Self { + Self { + client: Storage::new(client), + storage_max_descendant_responses, + storage_max_queried_items, + } } } -impl ArchiveStorageDiff +impl ArchiveStorage where Block: BlockT + 'static, BE: Backend + 'static, - Client: StorageProvider + Send + Sync + 'static, + Client: StorageProvider + 'static, { - /// Fetch the storage from the given key. - fn fetch_storage( + /// Generate the response of the `archive_storage` method. + pub fn handle_query( &self, hash: Block::Hash, - key: StorageKey, - maybe_child_trie: Option, - ty: FetchStorageType, - ) -> Result, String> { - match ty { - FetchStorageType::Value => { - let result = self.client.query_value(hash, &key, maybe_child_trie.as_ref())?; - - Ok(result.map(FetchedStorage::Value)) - }, - - FetchStorageType::Hash => { - let result = self.client.query_hash(hash, &key, maybe_child_trie.as_ref())?; - - Ok(result.map(FetchedStorage::Hash)) - }, - - FetchStorageType::Both => { - let Some(value) = self.client.query_value(hash, &key, maybe_child_trie.as_ref())? - else { - return Ok(None); - }; - - let Some(hash) = self.client.query_hash(hash, &key, maybe_child_trie.as_ref())? - else { - return Ok(None); - }; - - Ok(Some(FetchedStorage::Both { value, hash })) - }, - } - } - - /// Check if the key belongs to the provided query items. - /// - /// A key belongs to the query items when: - /// - the provided key is a prefix of the key in the query items. - /// - the query items are empty. - /// - /// Returns an optional `FetchStorageType` based on the query items. - /// If the key does not belong to the query items, returns `None`. - fn belongs_to_query(key: &StorageKey, items: &[DiffDetails]) -> Option { - // User has requested all keys, by default this fallbacks to fetching the value. - if items.is_empty() { - return Some(FetchStorageType::Value) - } - - let mut value = false; - let mut hash = false; + mut items: Vec>, + child_key: Option, + ) -> ArchiveStorageResult { + let discarded_items = items.len().saturating_sub(self.storage_max_queried_items); + items.truncate(self.storage_max_queried_items); + let mut storage_results = Vec::with_capacity(items.len()); for item in items { - if key.as_ref().starts_with(&item.key.as_ref()) { - match item.return_type { - ArchiveStorageDiffType::Value => value = true, - ArchiveStorageDiffType::Hash => hash = true, - } - } - } - - match (value, hash) { - (true, true) => Some(FetchStorageType::Both), - (true, false) => Some(FetchStorageType::Value), - (false, true) => Some(FetchStorageType::Hash), - (false, false) => None, - } - } - - /// Send the provided result to the `tx` sender. - /// - /// Returns `false` if the sender has been closed. - fn send_result( - tx: &mpsc::Sender, - result: FetchedStorage, - operation_type: ArchiveStorageDiffOperationType, - child_trie_key: Option, - ) -> bool { - let items = match result { - FetchedStorage::Value(storage_result) | FetchedStorage::Hash(storage_result) => - vec![storage_result], - FetchedStorage::Both { value, hash } => vec![value, hash], - }; - - for item in items { - let res = ArchiveStorageDiffEvent::StorageDiff(ArchiveStorageDiffResult { - key: item.key, - result: item.result, - operation_type, - child_trie_key: child_trie_key.clone(), - }); - if tx.blocking_send(res).is_err() { - return false - } - } - - true - } - - fn handle_trie_queries_inner( - &self, - hash: Block::Hash, - previous_hash: Block::Hash, - items: Vec, - tx: &mpsc::Sender, - ) -> Result<(), String> { - // Parse the child trie key as `ChildInfo` and `String`. - let maybe_child_trie = items.first().and_then(|item| item.child_trie_key.clone()); - let maybe_child_trie_str = - items.first().and_then(|item| item.child_trie_key_string.clone()); - - // Iterator over the current block and previous block - // at the same time to compare the keys. This approach effectively - // leverages backpressure to avoid memory consumption. - let keys_iter = self.client.raw_keys_iter(hash, maybe_child_trie.clone())?; - let previous_keys_iter = - self.client.raw_keys_iter(previous_hash, maybe_child_trie.clone())?; - - let mut diff_iter = lexicographic_diff(keys_iter, previous_keys_iter); - - while let Some(item) = diff_iter.next() { - let (operation_type, key) = match item { - Diff::Added(key) => (ArchiveStorageDiffOperationType::Added, key), - Diff::Deleted(key) => (ArchiveStorageDiffOperationType::Deleted, key), - Diff::Equal(key) => (ArchiveStorageDiffOperationType::Modified, key), - }; - - let Some(fetch_type) = Self::belongs_to_query(&key, &items) else { - // The key does not belong the the query items. - continue; - }; - - let maybe_result = match operation_type { - ArchiveStorageDiffOperationType::Added => - self.fetch_storage(hash, key.clone(), maybe_child_trie.clone(), fetch_type)?, - ArchiveStorageDiffOperationType::Deleted => self.fetch_storage( - previous_hash, - key.clone(), - maybe_child_trie.clone(), - fetch_type, - )?, - ArchiveStorageDiffOperationType::Modified => { - let Some(storage_result) = self.fetch_storage( + match item.query_type { + StorageQueryType::Value => { + match self.client.query_value(hash, &item.key, child_key.as_ref()) { + Ok(Some(value)) => storage_results.push(value), + Ok(None) => continue, + Err(error) => return ArchiveStorageResult::err(error), + } + }, + StorageQueryType::Hash => + match self.client.query_hash(hash, &item.key, child_key.as_ref()) { + Ok(Some(value)) => storage_results.push(value), + Ok(None) => continue, + Err(error) => return ArchiveStorageResult::err(error), + }, + StorageQueryType::ClosestDescendantMerkleValue => + match self.client.query_merkle_value(hash, &item.key, child_key.as_ref()) { + Ok(Some(value)) => storage_results.push(value), + Ok(None) => continue, + Err(error) => return ArchiveStorageResult::err(error), + }, + StorageQueryType::DescendantsValues => { + match self.client.query_iter_pagination( + QueryIter { + query_key: item.key, + ty: IterQueryType::Value, + pagination_start_key: item.pagination_start_key, + }, hash, - key.clone(), - maybe_child_trie.clone(), - fetch_type, - )? - else { - continue - }; - - let Some(previous_storage_result) = self.fetch_storage( - previous_hash, - key.clone(), - maybe_child_trie.clone(), - fetch_type, - )? - else { - continue - }; - - // For modified records we need to check the actual storage values. - if storage_result == previous_storage_result { - continue + child_key.as_ref(), + self.storage_max_descendant_responses, + ) { + Ok((results, _)) => storage_results.extend(results), + Err(error) => return ArchiveStorageResult::err(error), } - - Some(storage_result) }, - }; - - if let Some(storage_result) = maybe_result { - if !Self::send_result( - &tx, - storage_result, - operation_type, - maybe_child_trie_str.clone(), - ) { - return Ok(()) - } - } - } - - Ok(()) - } - - /// This method will iterate over the keys of the main trie or a child trie and fetch the - /// given keys. The fetched keys will be sent to the provided `tx` sender to leverage - /// the backpressure mechanism. - pub async fn handle_trie_queries( - &self, - hash: Block::Hash, - items: Vec>, - previous_hash: Block::Hash, - tx: mpsc::Sender, - ) -> Result<(), tokio::task::JoinError> { - let this = ArchiveStorageDiff { client: self.client.clone() }; - - tokio::task::spawn_blocking(move || { - // Deduplicate the items. - let mut trie_items = match deduplicate_storage_diff_items(items) { - Ok(items) => items, - Err(error) => { - let _ = tx.blocking_send(ArchiveStorageDiffEvent::err(error.to_string())); - return + StorageQueryType::DescendantsHashes => { + match self.client.query_iter_pagination( + QueryIter { + query_key: item.key, + ty: IterQueryType::Hash, + pagination_start_key: item.pagination_start_key, + }, + hash, + child_key.as_ref(), + self.storage_max_descendant_responses, + ) { + Ok((results, _)) => storage_results.extend(results), + Err(error) => return ArchiveStorageResult::err(error), + } }, }; - // Default to using the main storage trie if no items are provided. - if trie_items.is_empty() { - trie_items.push(Vec::new()); - } - log::trace!(target: LOG_TARGET, "Storage diff deduplicated items: {:?}", trie_items); - - for items in trie_items { - log::trace!( - target: LOG_TARGET, - "handle_trie_queries: hash={:?}, previous_hash={:?}, items={:?}", - hash, - previous_hash, - items - ); - - let result = this.handle_trie_queries_inner(hash, previous_hash, items, &tx); - - if let Err(error) = result { - log::trace!( - target: LOG_TARGET, - "handle_trie_queries: sending error={:?}", - error, - ); - - let _ = tx.blocking_send(ArchiveStorageDiffEvent::err(error)); - - return - } else { - log::trace!( - target: LOG_TARGET, - "handle_trie_queries: sending storage diff done", - ); - } - } - - let _ = tx.blocking_send(ArchiveStorageDiffEvent::StorageDiffDone); - }) - .await?; - - Ok(()) - } -} - -/// The result of the `lexicographic_diff` method. -#[derive(Debug, PartialEq)] -enum Diff { - Added(T), - Deleted(T), - Equal(T), -} - -/// Compare two iterators lexicographically and return the differences. -fn lexicographic_diff( - mut left: LeftIter, - mut right: RightIter, -) -> impl Iterator> -where - T: Ord, - LeftIter: Iterator, - RightIter: Iterator, -{ - let mut a = left.next(); - let mut b = right.next(); - - core::iter::from_fn(move || match (a.take(), b.take()) { - (Some(a_value), Some(b_value)) => - if a_value < b_value { - b = Some(b_value); - a = left.next(); - - Some(Diff::Added(a_value)) - } else if a_value > b_value { - a = Some(a_value); - b = right.next(); - - Some(Diff::Deleted(b_value)) - } else { - a = left.next(); - b = right.next(); - - Some(Diff::Equal(a_value)) - }, - (Some(a_value), None) => { - a = left.next(); - Some(Diff::Added(a_value)) - }, - (None, Some(b_value)) => { - b = right.next(); - Some(Diff::Deleted(b_value)) - }, - (None, None) => None, - }) -} - -/// Deduplicate the provided items and return a list of `DiffDetails`. -/// -/// Each list corresponds to a single child trie or the main trie. -fn deduplicate_storage_diff_items( - items: Vec>, -) -> Result>, ArchiveError> { - let mut deduplicated: HashMap, Vec> = HashMap::new(); - - for diff_item in items { - // Ensure the provided hex keys are valid before deduplication. - let key = StorageKey(parse_hex_param(diff_item.key)?); - let child_trie_key_string = diff_item.child_trie_key.clone(); - let child_trie_key = diff_item - .child_trie_key - .map(|child_trie_key| parse_hex_param(child_trie_key)) - .transpose()? - .map(ChildInfo::new_default_from_vec); - - let diff_item = DiffDetails { - key, - return_type: diff_item.return_type, - child_trie_key: child_trie_key.clone(), - child_trie_key_string, - }; - - match deduplicated.entry(child_trie_key.clone()) { - Entry::Occupied(mut entry) => { - let mut should_insert = true; - - for existing in entry.get() { - // This points to a different return type. - if existing.return_type != diff_item.return_type { - continue - } - // Keys and return types are identical. - if existing.key == diff_item.key { - should_insert = false; - break - } - - // The following two conditions ensure that we keep the shortest key. - - // The current key is a longer prefix of the existing key. - if diff_item.key.as_ref().starts_with(&existing.key.as_ref()) { - should_insert = false; - break - } - - // The existing key is a longer prefix of the current key. - // We need to keep the current key and remove the existing one. - if existing.key.as_ref().starts_with(&diff_item.key.as_ref()) { - let to_remove = existing.clone(); - entry.get_mut().retain(|item| item != &to_remove); - break; - } - } - - if should_insert { - entry.get_mut().push(diff_item); - } - }, - Entry::Vacant(entry) => { - entry.insert(vec![diff_item]); - }, } - } - - Ok(deduplicated - .into_iter() - .sorted_by_key(|(child_trie_key, _)| child_trie_key.clone()) - .map(|(_, values)| values) - .collect()) -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn dedup_empty() { - let items = vec![]; - let result = deduplicate_storage_diff_items(items).unwrap(); - assert!(result.is_empty()); - } - - #[test] - fn dedup_single() { - let items = vec![ArchiveStorageDiffItem { - key: "0x01".into(), - return_type: ArchiveStorageDiffType::Value, - child_trie_key: None, - }]; - let result = deduplicate_storage_diff_items(items).unwrap(); - assert_eq!(result.len(), 1); - assert_eq!(result[0].len(), 1); - - let expected = DiffDetails { - key: StorageKey(vec![1]), - return_type: ArchiveStorageDiffType::Value, - child_trie_key: None, - child_trie_key_string: None, - }; - assert_eq!(result[0][0], expected); - } - - #[test] - fn dedup_with_different_keys() { - let items = vec![ - ArchiveStorageDiffItem { - key: "0x01".into(), - return_type: ArchiveStorageDiffType::Value, - child_trie_key: None, - }, - ArchiveStorageDiffItem { - key: "0x02".into(), - return_type: ArchiveStorageDiffType::Value, - child_trie_key: None, - }, - ]; - let result = deduplicate_storage_diff_items(items).unwrap(); - assert_eq!(result.len(), 1); - assert_eq!(result[0].len(), 2); - - let expected = vec![ - DiffDetails { - key: StorageKey(vec![1]), - return_type: ArchiveStorageDiffType::Value, - child_trie_key: None, - child_trie_key_string: None, - }, - DiffDetails { - key: StorageKey(vec![2]), - return_type: ArchiveStorageDiffType::Value, - child_trie_key: None, - child_trie_key_string: None, - }, - ]; - assert_eq!(result[0], expected); - } - - #[test] - fn dedup_with_same_keys() { - // Identical keys. - let items = vec![ - ArchiveStorageDiffItem { - key: "0x01".into(), - return_type: ArchiveStorageDiffType::Value, - child_trie_key: None, - }, - ArchiveStorageDiffItem { - key: "0x01".into(), - return_type: ArchiveStorageDiffType::Value, - child_trie_key: None, - }, - ]; - let result = deduplicate_storage_diff_items(items).unwrap(); - assert_eq!(result.len(), 1); - assert_eq!(result[0].len(), 1); - - let expected = vec![DiffDetails { - key: StorageKey(vec![1]), - return_type: ArchiveStorageDiffType::Value, - child_trie_key: None, - child_trie_key_string: None, - }]; - assert_eq!(result[0], expected); - } - - #[test] - fn dedup_with_same_prefix() { - // Identical keys. - let items = vec![ - ArchiveStorageDiffItem { - key: "0x01".into(), - return_type: ArchiveStorageDiffType::Value, - child_trie_key: None, - }, - ArchiveStorageDiffItem { - key: "0x01ff".into(), - return_type: ArchiveStorageDiffType::Value, - child_trie_key: None, - }, - ]; - let result = deduplicate_storage_diff_items(items).unwrap(); - assert_eq!(result.len(), 1); - assert_eq!(result[0].len(), 1); - - let expected = vec![DiffDetails { - key: StorageKey(vec![1]), - return_type: ArchiveStorageDiffType::Value, - child_trie_key: None, - child_trie_key_string: None, - }]; - assert_eq!(result[0], expected); - } - - #[test] - fn dedup_with_different_return_types() { - let items = vec![ - ArchiveStorageDiffItem { - key: "0x01".into(), - return_type: ArchiveStorageDiffType::Value, - child_trie_key: None, - }, - ArchiveStorageDiffItem { - key: "0x01".into(), - return_type: ArchiveStorageDiffType::Hash, - child_trie_key: None, - }, - ]; - let result = deduplicate_storage_diff_items(items).unwrap(); - assert_eq!(result.len(), 1); - assert_eq!(result[0].len(), 2); - - let expected = vec![ - DiffDetails { - key: StorageKey(vec![1]), - return_type: ArchiveStorageDiffType::Value, - child_trie_key: None, - child_trie_key_string: None, - }, - DiffDetails { - key: StorageKey(vec![1]), - return_type: ArchiveStorageDiffType::Hash, - child_trie_key: None, - child_trie_key_string: None, - }, - ]; - assert_eq!(result[0], expected); - } - - #[test] - fn dedup_with_different_child_tries() { - let items = vec![ - ArchiveStorageDiffItem { - key: "0x01".into(), - return_type: ArchiveStorageDiffType::Value, - child_trie_key: Some("0x01".into()), - }, - ArchiveStorageDiffItem { - key: "0x01".into(), - return_type: ArchiveStorageDiffType::Value, - child_trie_key: Some("0x02".into()), - }, - ]; - let result = deduplicate_storage_diff_items(items).unwrap(); - assert_eq!(result.len(), 2); - assert_eq!(result[0].len(), 1); - assert_eq!(result[1].len(), 1); - - let expected = vec![ - vec![DiffDetails { - key: StorageKey(vec![1]), - return_type: ArchiveStorageDiffType::Value, - child_trie_key: Some(ChildInfo::new_default_from_vec(vec![1])), - child_trie_key_string: Some("0x01".into()), - }], - vec![DiffDetails { - key: StorageKey(vec![1]), - return_type: ArchiveStorageDiffType::Value, - child_trie_key: Some(ChildInfo::new_default_from_vec(vec![2])), - child_trie_key_string: Some("0x02".into()), - }], - ]; - assert_eq!(result, expected); - } - - #[test] - fn dedup_with_same_child_tries() { - let items = vec![ - ArchiveStorageDiffItem { - key: "0x01".into(), - return_type: ArchiveStorageDiffType::Value, - child_trie_key: Some("0x01".into()), - }, - ArchiveStorageDiffItem { - key: "0x01".into(), - return_type: ArchiveStorageDiffType::Value, - child_trie_key: Some("0x01".into()), - }, - ]; - let result = deduplicate_storage_diff_items(items).unwrap(); - assert_eq!(result.len(), 1); - assert_eq!(result[0].len(), 1); - - let expected = vec![DiffDetails { - key: StorageKey(vec![1]), - return_type: ArchiveStorageDiffType::Value, - child_trie_key: Some(ChildInfo::new_default_from_vec(vec![1])), - child_trie_key_string: Some("0x01".into()), - }]; - assert_eq!(result[0], expected); - } - - #[test] - fn dedup_with_shorter_key_reverse_order() { - let items = vec![ - ArchiveStorageDiffItem { - key: "0x01ff".into(), - return_type: ArchiveStorageDiffType::Value, - child_trie_key: None, - }, - ArchiveStorageDiffItem { - key: "0x01".into(), - return_type: ArchiveStorageDiffType::Value, - child_trie_key: None, - }, - ]; - let result = deduplicate_storage_diff_items(items).unwrap(); - assert_eq!(result.len(), 1); - assert_eq!(result[0].len(), 1); - - let expected = vec![DiffDetails { - key: StorageKey(vec![1]), - return_type: ArchiveStorageDiffType::Value, - child_trie_key: None, - child_trie_key_string: None, - }]; - assert_eq!(result[0], expected); - } - - #[test] - fn dedup_multiple_child_tries() { - let items = vec![ - ArchiveStorageDiffItem { - key: "0x02".into(), - return_type: ArchiveStorageDiffType::Value, - child_trie_key: None, - }, - ArchiveStorageDiffItem { - key: "0x01".into(), - return_type: ArchiveStorageDiffType::Value, - child_trie_key: Some("0x01".into()), - }, - ArchiveStorageDiffItem { - key: "0x02".into(), - return_type: ArchiveStorageDiffType::Hash, - child_trie_key: Some("0x01".into()), - }, - ArchiveStorageDiffItem { - key: "0x01".into(), - return_type: ArchiveStorageDiffType::Value, - child_trie_key: Some("0x02".into()), - }, - ArchiveStorageDiffItem { - key: "0x01".into(), - return_type: ArchiveStorageDiffType::Hash, - child_trie_key: Some("0x02".into()), - }, - ArchiveStorageDiffItem { - key: "0x01ff".into(), - return_type: ArchiveStorageDiffType::Value, - child_trie_key: Some("0x02".into()), - }, - ]; - - let result = deduplicate_storage_diff_items(items).unwrap(); - - let expected = vec![ - vec![DiffDetails { - key: StorageKey(vec![2]), - return_type: ArchiveStorageDiffType::Value, - child_trie_key: None, - child_trie_key_string: None, - }], - vec![ - DiffDetails { - key: StorageKey(vec![1]), - return_type: ArchiveStorageDiffType::Value, - child_trie_key: Some(ChildInfo::new_default_from_vec(vec![1])), - child_trie_key_string: Some("0x01".into()), - }, - DiffDetails { - key: StorageKey(vec![2]), - return_type: ArchiveStorageDiffType::Hash, - child_trie_key: Some(ChildInfo::new_default_from_vec(vec![1])), - child_trie_key_string: Some("0x01".into()), - }, - ], - vec![ - DiffDetails { - key: StorageKey(vec![1]), - return_type: ArchiveStorageDiffType::Value, - child_trie_key: Some(ChildInfo::new_default_from_vec(vec![2])), - child_trie_key_string: Some("0x02".into()), - }, - DiffDetails { - key: StorageKey(vec![1]), - return_type: ArchiveStorageDiffType::Hash, - child_trie_key: Some(ChildInfo::new_default_from_vec(vec![2])), - child_trie_key_string: Some("0x02".into()), - }, - ], - ]; - - assert_eq!(result, expected); - } - - #[test] - fn test_lexicographic_diff() { - let left = vec![1, 2, 3, 4, 5]; - let right = vec![2, 3, 4, 5, 6]; - - let diff = lexicographic_diff(left.into_iter(), right.into_iter()).collect::>(); - let expected = vec![ - Diff::Added(1), - Diff::Equal(2), - Diff::Equal(3), - Diff::Equal(4), - Diff::Equal(5), - Diff::Deleted(6), - ]; - assert_eq!(diff, expected); - } - - #[test] - fn test_lexicographic_diff_one_side_empty() { - let left = vec![]; - let right = vec![1, 2, 3, 4, 5, 6]; - - let diff = lexicographic_diff(left.into_iter(), right.into_iter()).collect::>(); - let expected = vec![ - Diff::Deleted(1), - Diff::Deleted(2), - Diff::Deleted(3), - Diff::Deleted(4), - Diff::Deleted(5), - Diff::Deleted(6), - ]; - assert_eq!(diff, expected); - - let left = vec![1, 2, 3, 4, 5, 6]; - let right = vec![]; - let diff = lexicographic_diff(left.into_iter(), right.into_iter()).collect::>(); - let expected = vec![ - Diff::Added(1), - Diff::Added(2), - Diff::Added(3), - Diff::Added(4), - Diff::Added(5), - Diff::Added(6), - ]; - assert_eq!(diff, expected); + ArchiveStorageResult::ok(storage_results, discarded_items) } } diff --git a/substrate/client/rpc-spec-v2/src/archive/mod.rs b/substrate/client/rpc-spec-v2/src/archive/mod.rs index 14fa104c113a..5f020c203eab 100644 --- a/substrate/client/rpc-spec-v2/src/archive/mod.rs +++ b/substrate/client/rpc-spec-v2/src/archive/mod.rs @@ -32,4 +32,4 @@ pub mod archive; pub mod error; pub use api::ArchiveApiServer; -pub use archive::Archive; +pub use archive::{Archive, ArchiveConfig}; diff --git a/substrate/client/rpc-spec-v2/src/archive/tests.rs b/substrate/client/rpc-spec-v2/src/archive/tests.rs index 48cbbaa4934a..078016f5b3e2 100644 --- a/substrate/client/rpc-spec-v2/src/archive/tests.rs +++ b/substrate/client/rpc-spec-v2/src/archive/tests.rs @@ -18,25 +18,24 @@ use crate::{ common::events::{ - ArchiveStorageDiffEvent, ArchiveStorageDiffItem, ArchiveStorageDiffOperationType, - ArchiveStorageDiffResult, ArchiveStorageDiffType, ArchiveStorageEvent, StorageQuery, - StorageQueryType, StorageResult, StorageResultType, + ArchiveStorageMethodOk, ArchiveStorageResult, PaginatedStorageQuery, StorageQueryType, + StorageResultType, }, hex_string, MethodResult, }; -use super::{archive::Archive, *}; +use super::{ + archive::{Archive, ArchiveConfig}, + *, +}; use assert_matches::assert_matches; use codec::{Decode, Encode}; use jsonrpsee::{ - core::{server::Subscription as RpcSubscription, EmptyServerParams as EmptyParams}, - rpc_params, MethodsError as Error, RpcModule, + core::EmptyServerParams as EmptyParams, rpc_params, MethodsError as Error, RpcModule, }; - use sc_block_builder::BlockBuilderBuilder; use sc_client_api::ChildInfo; -use sc_rpc::testing::TokioTestExecutor; use sp_blockchain::HeaderBackend; use sp_consensus::BlockOrigin; use sp_core::{Blake2Hasher, Hasher}; @@ -52,6 +51,8 @@ use substrate_test_runtime_client::{ const CHAIN_GENESIS: [u8; 32] = [0; 32]; const INVALID_HASH: [u8; 32] = [1; 32]; +const MAX_PAGINATION_LIMIT: usize = 5; +const MAX_QUERIED_LIMIT: usize = 5; const KEY: &[u8] = b":mock"; const VALUE: &[u8] = b"hello world"; const CHILD_STORAGE_KEY: &[u8] = b"child"; @@ -60,7 +61,10 @@ const CHILD_VALUE: &[u8] = b"child value"; type Header = substrate_test_runtime_client::runtime::Header; type Block = substrate_test_runtime_client::runtime::Block; -fn setup_api() -> (Arc>, RpcModule>>) { +fn setup_api( + max_descendant_responses: usize, + max_queried_items: usize, +) -> (Arc>, RpcModule>>) { let child_info = ChildInfo::new_default(CHILD_STORAGE_KEY); let builder = TestClientBuilder::new().add_extra_child_storage( &child_info, @@ -74,25 +78,16 @@ fn setup_api() -> (Arc>, RpcModule(sub: &mut RpcSubscription) -> T { - let (event, _sub_id) = tokio::time::timeout(std::time::Duration::from_secs(60), sub.next()) - .await - .unwrap() - .unwrap() - .unwrap(); - event -} - #[tokio::test] async fn archive_genesis() { - let (_client, api) = setup_api(); + let (_client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); let genesis: String = api.call("archive_unstable_genesisHash", EmptyParams::new()).await.unwrap(); @@ -101,7 +96,7 @@ async fn archive_genesis() { #[tokio::test] async fn archive_body() { - let (client, api) = setup_api(); + let (client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); // Invalid block hash. let invalid_hash = hex_string(&INVALID_HASH); @@ -117,8 +112,8 @@ async fn archive_body() { builder .push_transfer(runtime::Transfer { - from: Sr25519Keyring::Alice.into(), - to: Sr25519Keyring::Ferdie.into(), + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), amount: 42, nonce: 0, }) @@ -135,7 +130,7 @@ async fn archive_body() { #[tokio::test] async fn archive_header() { - let (client, api) = setup_api(); + let (client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); // Invalid block hash. let invalid_hash = hex_string(&INVALID_HASH); @@ -151,8 +146,8 @@ async fn archive_header() { builder .push_transfer(runtime::Transfer { - from: Sr25519Keyring::Alice.into(), - to: Sr25519Keyring::Ferdie.into(), + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), amount: 42, nonce: 0, }) @@ -169,7 +164,7 @@ async fn archive_header() { #[tokio::test] async fn archive_finalized_height() { - let (client, api) = setup_api(); + let (client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); let client_height: u32 = client.info().finalized_number.saturated_into(); @@ -181,7 +176,7 @@ async fn archive_finalized_height() { #[tokio::test] async fn archive_hash_by_height() { - let (client, api) = setup_api(); + let (client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); // Genesis height. let hashes: Vec = api.call("archive_unstable_hashByHeight", [0]).await.unwrap(); @@ -249,8 +244,8 @@ async fn archive_hash_by_height() { // imported block_builder .push_transfer(Transfer { - from: Sr25519Keyring::Alice.into(), - to: Sr25519Keyring::Ferdie.into(), + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), amount: 41, nonce: 0, }) @@ -287,7 +282,7 @@ async fn archive_hash_by_height() { #[tokio::test] async fn archive_call() { - let (client, api) = setup_api(); + let (client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); let invalid_hash = hex_string(&INVALID_HASH); // Invalid parameter (non-hex). @@ -330,7 +325,7 @@ async fn archive_call() { client.import(BlockOrigin::Own, block_1.clone()).await.unwrap(); // Valid call. - let alice_id = Sr25519Keyring::Alice.to_account_id(); + let alice_id = AccountKeyring::Alice.to_account_id(); // Hex encoded scale encoded bytes representing the call parameters. let call_parameters = hex_string(&alice_id.encode()); let result: MethodResult = api @@ -346,7 +341,7 @@ async fn archive_call() { #[tokio::test] async fn archive_storage_hashes_values() { - let (client, api) = setup_api(); + let (client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); let block = BlockBuilderBuilder::new(&*client) .on_parent_block(client.chain_info().genesis_hash) @@ -360,23 +355,42 @@ async fn archive_storage_hashes_values() { let block_hash = format!("{:?}", block.header.hash()); let key = hex_string(&KEY); - let items: Vec> = vec![ - StorageQuery { key: key.clone(), query_type: StorageQueryType::DescendantsHashes }, - StorageQuery { key: key.clone(), query_type: StorageQueryType::DescendantsValues }, - StorageQuery { key: key.clone(), query_type: StorageQueryType::Hash }, - StorageQuery { key: key.clone(), query_type: StorageQueryType::Value }, + let items: Vec> = vec![ + PaginatedStorageQuery { + key: key.clone(), + query_type: StorageQueryType::DescendantsHashes, + pagination_start_key: None, + }, + PaginatedStorageQuery { + key: key.clone(), + query_type: StorageQueryType::DescendantsValues, + pagination_start_key: None, + }, + PaginatedStorageQuery { + key: key.clone(), + query_type: StorageQueryType::Hash, + pagination_start_key: None, + }, + PaginatedStorageQuery { + key: key.clone(), + query_type: StorageQueryType::Value, + pagination_start_key: None, + }, ]; - let mut sub = api - .subscribe_unbounded("archive_unstable_storage", rpc_params![&block_hash, items.clone()]) + let result: ArchiveStorageResult = api + .call("archive_unstable_storage", rpc_params![&block_hash, items.clone()]) .await .unwrap(); - // Key has not been imported yet. - assert_eq!( - get_next_event::(&mut sub).await, - ArchiveStorageEvent::StorageDone, - ); + match result { + ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, discarded_items }) => { + // Key has not been imported yet. + assert_eq!(result.len(), 0); + assert_eq!(discarded_items, 0); + }, + _ => panic!("Unexpected result"), + }; // Import a block with the given key value pair. let mut builder = BlockBuilderBuilder::new(&*client) @@ -392,103 +406,32 @@ async fn archive_storage_hashes_values() { let expected_hash = format!("{:?}", Blake2Hasher::hash(&VALUE)); let expected_value = hex_string(&VALUE); - let mut sub = api - .subscribe_unbounded("archive_unstable_storage", rpc_params![&block_hash, items]) - .await - .unwrap(); - - assert_eq!( - get_next_event::(&mut sub).await, - ArchiveStorageEvent::Storage(StorageResult { - key: key.clone(), - result: StorageResultType::Hash(expected_hash.clone()), - child_trie_key: None, - }), - ); - - assert_eq!( - get_next_event::(&mut sub).await, - ArchiveStorageEvent::Storage(StorageResult { - key: key.clone(), - result: StorageResultType::Value(expected_value.clone()), - child_trie_key: None, - }), - ); - - assert_eq!( - get_next_event::(&mut sub).await, - ArchiveStorageEvent::Storage(StorageResult { - key: key.clone(), - result: StorageResultType::Hash(expected_hash), - child_trie_key: None, - }), - ); - - assert_eq!( - get_next_event::(&mut sub).await, - ArchiveStorageEvent::Storage(StorageResult { - key: key.clone(), - result: StorageResultType::Value(expected_value), - child_trie_key: None, - }), - ); - - assert_matches!( - get_next_event::(&mut sub).await, - ArchiveStorageEvent::StorageDone - ); -} - -#[tokio::test] -async fn archive_storage_hashes_values_child_trie() { - let (client, api) = setup_api(); - - // Get child storage values set in `setup_api`. - let child_info = hex_string(&CHILD_STORAGE_KEY); - let key = hex_string(&KEY); - let genesis_hash = format!("{:?}", client.genesis_hash()); - let expected_hash = format!("{:?}", Blake2Hasher::hash(&CHILD_VALUE)); - let expected_value = hex_string(&CHILD_VALUE); - - let items: Vec> = vec![ - StorageQuery { key: key.clone(), query_type: StorageQueryType::DescendantsHashes }, - StorageQuery { key: key.clone(), query_type: StorageQueryType::DescendantsValues }, - ]; - let mut sub = api - .subscribe_unbounded( - "archive_unstable_storage", - rpc_params![&genesis_hash, items, &child_info], - ) + let result: ArchiveStorageResult = api + .call("archive_unstable_storage", rpc_params![&block_hash, items]) .await .unwrap(); - assert_eq!( - get_next_event::(&mut sub).await, - ArchiveStorageEvent::Storage(StorageResult { - key: key.clone(), - result: StorageResultType::Hash(expected_hash.clone()), - child_trie_key: Some(child_info.clone()), - }) - ); - - assert_eq!( - get_next_event::(&mut sub).await, - ArchiveStorageEvent::Storage(StorageResult { - key: key.clone(), - result: StorageResultType::Value(expected_value.clone()), - child_trie_key: Some(child_info.clone()), - }) - ); - - assert_eq!( - get_next_event::(&mut sub).await, - ArchiveStorageEvent::StorageDone, - ); + match result { + ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, discarded_items }) => { + assert_eq!(result.len(), 4); + assert_eq!(discarded_items, 0); + + assert_eq!(result[0].key, key); + assert_eq!(result[0].result, StorageResultType::Hash(expected_hash.clone())); + assert_eq!(result[1].key, key); + assert_eq!(result[1].result, StorageResultType::Value(expected_value.clone())); + assert_eq!(result[2].key, key); + assert_eq!(result[2].result, StorageResultType::Hash(expected_hash)); + assert_eq!(result[3].key, key); + assert_eq!(result[3].result, StorageResultType::Value(expected_value)); + }, + _ => panic!("Unexpected result"), + }; } #[tokio::test] async fn archive_storage_closest_merkle_value() { - let (client, api) = setup_api(); + let (client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); /// The core of this test. /// @@ -500,47 +443,55 @@ async fn archive_storage_closest_merkle_value() { api: &RpcModule>>, block_hash: String, ) -> HashMap { - let mut sub = api - .subscribe_unbounded( + let result: ArchiveStorageResult = api + .call( "archive_unstable_storage", rpc_params![ &block_hash, vec![ - StorageQuery { + PaginatedStorageQuery { key: hex_string(b":AAAA"), query_type: StorageQueryType::ClosestDescendantMerkleValue, + pagination_start_key: None, }, - StorageQuery { + PaginatedStorageQuery { key: hex_string(b":AAAB"), query_type: StorageQueryType::ClosestDescendantMerkleValue, + pagination_start_key: None, }, // Key with descendant. - StorageQuery { + PaginatedStorageQuery { key: hex_string(b":A"), query_type: StorageQueryType::ClosestDescendantMerkleValue, + pagination_start_key: None, }, - StorageQuery { + PaginatedStorageQuery { key: hex_string(b":AA"), query_type: StorageQueryType::ClosestDescendantMerkleValue, + pagination_start_key: None, }, // Keys below this comment do not produce a result. // Key that exceed the keyspace of the trie. - StorageQuery { + PaginatedStorageQuery { key: hex_string(b":AAAAX"), query_type: StorageQueryType::ClosestDescendantMerkleValue, + pagination_start_key: None, }, - StorageQuery { + PaginatedStorageQuery { key: hex_string(b":AAABX"), query_type: StorageQueryType::ClosestDescendantMerkleValue, + pagination_start_key: None, }, // Key that are not part of the trie. - StorageQuery { + PaginatedStorageQuery { key: hex_string(b":AAX"), query_type: StorageQueryType::ClosestDescendantMerkleValue, + pagination_start_key: None, }, - StorageQuery { + PaginatedStorageQuery { key: hex_string(b":AAAX"), query_type: StorageQueryType::ClosestDescendantMerkleValue, + pagination_start_key: None, }, ] ], @@ -548,21 +499,19 @@ async fn archive_storage_closest_merkle_value() { .await .unwrap(); - let mut merkle_values = HashMap::new(); - loop { - let event = get_next_event::(&mut sub).await; - match event { - ArchiveStorageEvent::Storage(result) => { - let str_result = match result.result { + let merkle_values: HashMap<_, _> = match result { + ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, .. }) => result + .into_iter() + .map(|res| { + let value = match res.result { StorageResultType::ClosestDescendantMerkleValue(value) => value, - _ => panic!("Unexpected result type"), + _ => panic!("Unexpected StorageResultType"), }; - merkle_values.insert(result.key, str_result); - }, - ArchiveStorageEvent::StorageError(err) => panic!("Unexpected error {err:?}"), - ArchiveStorageEvent::StorageDone => break, - } - } + (res.key, value) + }) + .collect(), + _ => panic!("Unexpected result"), + }; // Response for AAAA, AAAB, A and AA. assert_eq!(merkle_values.len(), 4); @@ -641,9 +590,9 @@ async fn archive_storage_closest_merkle_value() { } #[tokio::test] -async fn archive_storage_iterations() { +async fn archive_storage_paginate_iterations() { // 1 iteration allowed before pagination kicks in. - let (client, api) = setup_api(); + let (client, api) = setup_api(1, MAX_QUERIED_LIMIT); // Import a new block with storage changes. let mut builder = BlockBuilderBuilder::new(&*client) @@ -662,344 +611,230 @@ async fn archive_storage_iterations() { // Calling with an invalid hash. let invalid_hash = hex_string(&INVALID_HASH); - let mut sub = api - .subscribe_unbounded( + let result: ArchiveStorageResult = api + .call( "archive_unstable_storage", rpc_params![ &invalid_hash, - vec![StorageQuery { + vec![PaginatedStorageQuery { key: hex_string(b":m"), query_type: StorageQueryType::DescendantsValues, + pagination_start_key: None, }] ], ) .await .unwrap(); - - assert_matches!( - get_next_event::(&mut sub).await, - ArchiveStorageEvent::StorageError(_) - ); + match result { + ArchiveStorageResult::Err(_) => (), + _ => panic!("Unexpected result"), + }; // Valid call with storage at the key. - let mut sub = api - .subscribe_unbounded( + let result: ArchiveStorageResult = api + .call( "archive_unstable_storage", rpc_params![ &block_hash, - vec![StorageQuery { + vec![PaginatedStorageQuery { key: hex_string(b":m"), query_type: StorageQueryType::DescendantsValues, + pagination_start_key: None, }] ], ) .await .unwrap(); + match result { + ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, discarded_items }) => { + assert_eq!(result.len(), 1); + assert_eq!(discarded_items, 0); - assert_eq!( - get_next_event::(&mut sub).await, - ArchiveStorageEvent::Storage(StorageResult { - key: hex_string(b":m"), - result: StorageResultType::Value(hex_string(b"a")), - child_trie_key: None, - }) - ); - - assert_eq!( - get_next_event::(&mut sub).await, - ArchiveStorageEvent::Storage(StorageResult { - key: hex_string(b":mo"), - result: StorageResultType::Value(hex_string(b"ab")), - child_trie_key: None, - }) - ); - - assert_eq!( - get_next_event::(&mut sub).await, - ArchiveStorageEvent::Storage(StorageResult { - key: hex_string(b":moD"), - result: StorageResultType::Value(hex_string(b"abcmoD")), - child_trie_key: None, - }) - ); - - assert_eq!( - get_next_event::(&mut sub).await, - ArchiveStorageEvent::Storage(StorageResult { - key: hex_string(b":moc"), - result: StorageResultType::Value(hex_string(b"abc")), - child_trie_key: None, - }) - ); - - assert_eq!( - get_next_event::(&mut sub).await, - ArchiveStorageEvent::Storage(StorageResult { - key: hex_string(b":mock"), - result: StorageResultType::Value(hex_string(b"abcd")), - child_trie_key: None, - }) - ); - - assert_matches!( - get_next_event::(&mut sub).await, - ArchiveStorageEvent::StorageDone - ); -} - -#[tokio::test] -async fn archive_storage_diff_main_trie() { - let (client, api) = setup_api(); - - let mut builder = BlockBuilderBuilder::new(&*client) - .on_parent_block(client.chain_info().genesis_hash) - .with_parent_block_number(0) - .build() - .unwrap(); - builder.push_storage_change(b":A".to_vec(), Some(b"B".to_vec())).unwrap(); - builder.push_storage_change(b":AA".to_vec(), Some(b"BB".to_vec())).unwrap(); - let prev_block = builder.build().unwrap().block; - let prev_hash = format!("{:?}", prev_block.header.hash()); - client.import(BlockOrigin::Own, prev_block.clone()).await.unwrap(); + assert_eq!(result[0].key, hex_string(b":m")); + assert_eq!(result[0].result, StorageResultType::Value(hex_string(b"a"))); + }, + _ => panic!("Unexpected result"), + }; - let mut builder = BlockBuilderBuilder::new(&*client) - .on_parent_block(prev_block.hash()) - .with_parent_block_number(1) - .build() + // Continue with pagination. + let result: ArchiveStorageResult = api + .call( + "archive_unstable_storage", + rpc_params![ + &block_hash, + vec![PaginatedStorageQuery { + key: hex_string(b":m"), + query_type: StorageQueryType::DescendantsValues, + pagination_start_key: Some(hex_string(b":m")), + }] + ], + ) + .await .unwrap(); - builder.push_storage_change(b":A".to_vec(), Some(b"11".to_vec())).unwrap(); - builder.push_storage_change(b":AA".to_vec(), Some(b"22".to_vec())).unwrap(); - builder.push_storage_change(b":AAA".to_vec(), Some(b"222".to_vec())).unwrap(); - let block = builder.build().unwrap().block; - let block_hash = format!("{:?}", block.header.hash()); - client.import(BlockOrigin::Own, block.clone()).await.unwrap(); + match result { + ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, discarded_items }) => { + assert_eq!(result.len(), 1); + assert_eq!(discarded_items, 0); - // Search for items in the main trie: - // - values of keys under ":A" - // - hashes of keys under ":AA" - let items = vec![ - ArchiveStorageDiffItem:: { - key: hex_string(b":A"), - return_type: ArchiveStorageDiffType::Value, - child_trie_key: None, + assert_eq!(result[0].key, hex_string(b":mo")); + assert_eq!(result[0].result, StorageResultType::Value(hex_string(b"ab"))); }, - ArchiveStorageDiffItem:: { - key: hex_string(b":AA"), - return_type: ArchiveStorageDiffType::Hash, - child_trie_key: None, - }, - ]; - let mut sub = api - .subscribe_unbounded( - "archive_unstable_storageDiff", - rpc_params![&block_hash, items.clone(), &prev_hash], + _ => panic!("Unexpected result"), + }; + + // Continue with pagination. + let result: ArchiveStorageResult = api + .call( + "archive_unstable_storage", + rpc_params![ + &block_hash, + vec![PaginatedStorageQuery { + key: hex_string(b":m"), + query_type: StorageQueryType::DescendantsValues, + pagination_start_key: Some(hex_string(b":mo")), + }] + ], ) .await .unwrap(); + match result { + ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, discarded_items }) => { + assert_eq!(result.len(), 1); + assert_eq!(discarded_items, 0); - let event = get_next_event::(&mut sub).await; - assert_eq!( - ArchiveStorageDiffEvent::StorageDiff(ArchiveStorageDiffResult { - key: hex_string(b":A"), - result: StorageResultType::Value(hex_string(b"11")), - operation_type: ArchiveStorageDiffOperationType::Modified, - child_trie_key: None, - }), - event, - ); - - let event = get_next_event::(&mut sub).await; - assert_eq!( - ArchiveStorageDiffEvent::StorageDiff(ArchiveStorageDiffResult { - key: hex_string(b":AA"), - result: StorageResultType::Value(hex_string(b"22")), - operation_type: ArchiveStorageDiffOperationType::Modified, - child_trie_key: None, - }), - event, - ); - - let event = get_next_event::(&mut sub).await; - assert_eq!( - ArchiveStorageDiffEvent::StorageDiff(ArchiveStorageDiffResult { - key: hex_string(b":AA"), - result: StorageResultType::Hash(format!("{:?}", Blake2Hasher::hash(b"22"))), - operation_type: ArchiveStorageDiffOperationType::Modified, - child_trie_key: None, - }), - event, - ); - - // Added key. - let event = get_next_event::(&mut sub).await; - assert_eq!( - ArchiveStorageDiffEvent::StorageDiff(ArchiveStorageDiffResult { - key: hex_string(b":AAA"), - result: StorageResultType::Value(hex_string(b"222")), - operation_type: ArchiveStorageDiffOperationType::Added, - child_trie_key: None, - }), - event, - ); - - let event = get_next_event::(&mut sub).await; - assert_eq!( - ArchiveStorageDiffEvent::StorageDiff(ArchiveStorageDiffResult { - key: hex_string(b":AAA"), - result: StorageResultType::Hash(format!("{:?}", Blake2Hasher::hash(b"222"))), - operation_type: ArchiveStorageDiffOperationType::Added, - child_trie_key: None, - }), - event, - ); + assert_eq!(result[0].key, hex_string(b":moD")); + assert_eq!(result[0].result, StorageResultType::Value(hex_string(b"abcmoD"))); + }, + _ => panic!("Unexpected result"), + }; - let event = get_next_event::(&mut sub).await; - assert_eq!(ArchiveStorageDiffEvent::StorageDiffDone, event); -} + // Continue with pagination. + let result: ArchiveStorageResult = api + .call( + "archive_unstable_storage", + rpc_params![ + &block_hash, + vec![PaginatedStorageQuery { + key: hex_string(b":m"), + query_type: StorageQueryType::DescendantsValues, + pagination_start_key: Some(hex_string(b":moD")), + }] + ], + ) + .await + .unwrap(); + match result { + ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, discarded_items }) => { + assert_eq!(result.len(), 1); + assert_eq!(discarded_items, 0); -#[tokio::test] -async fn archive_storage_diff_no_changes() { - let (client, api) = setup_api(); + assert_eq!(result[0].key, hex_string(b":moc")); + assert_eq!(result[0].result, StorageResultType::Value(hex_string(b"abc"))); + }, + _ => panic!("Unexpected result"), + }; - // Build 2 identical blocks. - let mut builder = BlockBuilderBuilder::new(&*client) - .on_parent_block(client.chain_info().genesis_hash) - .with_parent_block_number(0) - .build() + // Continue with pagination. + let result: ArchiveStorageResult = api + .call( + "archive_unstable_storage", + rpc_params![ + &block_hash, + vec![PaginatedStorageQuery { + key: hex_string(b":m"), + query_type: StorageQueryType::DescendantsValues, + pagination_start_key: Some(hex_string(b":moc")), + }] + ], + ) + .await .unwrap(); - builder.push_storage_change(b":A".to_vec(), Some(b"B".to_vec())).unwrap(); - builder.push_storage_change(b":AA".to_vec(), Some(b"BB".to_vec())).unwrap(); - builder.push_storage_change(b":B".to_vec(), Some(b"CC".to_vec())).unwrap(); - builder.push_storage_change(b":BA".to_vec(), Some(b"CC".to_vec())).unwrap(); - let prev_block = builder.build().unwrap().block; - let prev_hash = format!("{:?}", prev_block.header.hash()); - client.import(BlockOrigin::Own, prev_block.clone()).await.unwrap(); + match result { + ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, discarded_items }) => { + assert_eq!(result.len(), 1); + assert_eq!(discarded_items, 0); - let mut builder = BlockBuilderBuilder::new(&*client) - .on_parent_block(prev_block.hash()) - .with_parent_block_number(1) - .build() - .unwrap(); - builder.push_storage_change(b":A".to_vec(), Some(b"B".to_vec())).unwrap(); - builder.push_storage_change(b":AA".to_vec(), Some(b"BB".to_vec())).unwrap(); - let block = builder.build().unwrap().block; - let block_hash = format!("{:?}", block.header.hash()); - client.import(BlockOrigin::Own, block.clone()).await.unwrap(); + assert_eq!(result[0].key, hex_string(b":mock")); + assert_eq!(result[0].result, StorageResultType::Value(hex_string(b"abcd"))); + }, + _ => panic!("Unexpected result"), + }; - // Search for items in the main trie with keys prefixed with ":A". - let items = vec![ArchiveStorageDiffItem:: { - key: hex_string(b":A"), - return_type: ArchiveStorageDiffType::Value, - child_trie_key: None, - }]; - let mut sub = api - .subscribe_unbounded( - "archive_unstable_storageDiff", - rpc_params![&block_hash, items.clone(), &prev_hash], + // Continue with pagination until no keys are returned. + let result: ArchiveStorageResult = api + .call( + "archive_unstable_storage", + rpc_params![ + &block_hash, + vec![PaginatedStorageQuery { + key: hex_string(b":m"), + query_type: StorageQueryType::DescendantsValues, + pagination_start_key: Some(hex_string(b":mock")), + }] + ], ) .await .unwrap(); - - let event = get_next_event::(&mut sub).await; - assert_eq!(ArchiveStorageDiffEvent::StorageDiffDone, event); + match result { + ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, discarded_items }) => { + assert_eq!(result.len(), 0); + assert_eq!(discarded_items, 0); + }, + _ => panic!("Unexpected result"), + }; } #[tokio::test] -async fn archive_storage_diff_deleted_changes() { - let (client, api) = setup_api(); - - // Blocks are imported as forks. - let mut builder = BlockBuilderBuilder::new(&*client) - .on_parent_block(client.chain_info().genesis_hash) - .with_parent_block_number(0) - .build() - .unwrap(); - builder.push_storage_change(b":A".to_vec(), Some(b"B".to_vec())).unwrap(); - builder.push_storage_change(b":AA".to_vec(), Some(b"BB".to_vec())).unwrap(); - builder.push_storage_change(b":B".to_vec(), Some(b"CC".to_vec())).unwrap(); - builder.push_storage_change(b":BA".to_vec(), Some(b"CC".to_vec())).unwrap(); - let prev_block = builder.build().unwrap().block; - let prev_hash = format!("{:?}", prev_block.header.hash()); - client.import(BlockOrigin::Own, prev_block.clone()).await.unwrap(); +async fn archive_storage_discarded_items() { + // One query at a time + let (client, api) = setup_api(MAX_PAGINATION_LIMIT, 1); + // Import a new block with storage changes. let mut builder = BlockBuilderBuilder::new(&*client) .on_parent_block(client.chain_info().genesis_hash) .with_parent_block_number(0) .build() .unwrap(); - builder - .push_transfer(Transfer { - from: Sr25519Keyring::Alice.into(), - to: Sr25519Keyring::Ferdie.into(), - amount: 41, - nonce: 0, - }) - .unwrap(); - builder.push_storage_change(b":A".to_vec(), Some(b"B".to_vec())).unwrap(); + builder.push_storage_change(b":m".to_vec(), Some(b"a".to_vec())).unwrap(); let block = builder.build().unwrap().block; let block_hash = format!("{:?}", block.header.hash()); client.import(BlockOrigin::Own, block.clone()).await.unwrap(); - // Search for items in the main trie with keys prefixed with ":A". - let items = vec![ArchiveStorageDiffItem:: { - key: hex_string(b":A"), - return_type: ArchiveStorageDiffType::Value, - child_trie_key: None, - }]; - - let mut sub = api - .subscribe_unbounded( - "archive_unstable_storageDiff", - rpc_params![&block_hash, items.clone(), &prev_hash], - ) - .await - .unwrap(); - - let event = get_next_event::(&mut sub).await; - assert_eq!( - ArchiveStorageDiffEvent::StorageDiff(ArchiveStorageDiffResult { - key: hex_string(b":AA"), - result: StorageResultType::Value(hex_string(b"BB")), - operation_type: ArchiveStorageDiffOperationType::Deleted, - child_trie_key: None, - }), - event, - ); - - let event = get_next_event::(&mut sub).await; - assert_eq!(ArchiveStorageDiffEvent::StorageDiffDone, event); -} - -#[tokio::test] -async fn archive_storage_diff_invalid_params() { - let invalid_hash = hex_string(&INVALID_HASH); - let (_, api) = setup_api(); - - // Invalid shape for parameters. - let items: Vec> = Vec::new(); - let err = api - .subscribe_unbounded( - "archive_unstable_storageDiff", - rpc_params!["123", items.clone(), &invalid_hash], - ) - .await - .unwrap_err(); - assert_matches!(err, - Error::JsonRpc(ref err) if err.code() == crate::chain_head::error::json_rpc_spec::INVALID_PARAM_ERROR && err.message() == "Invalid params" - ); - - // The shape is right, but the block hash is invalid. - let items: Vec> = Vec::new(); - let mut sub = api - .subscribe_unbounded( - "archive_unstable_storageDiff", - rpc_params![&invalid_hash, items.clone(), &invalid_hash], + // Valid call with storage at the key. + let result: ArchiveStorageResult = api + .call( + "archive_unstable_storage", + rpc_params![ + &block_hash, + vec![ + PaginatedStorageQuery { + key: hex_string(b":m"), + query_type: StorageQueryType::Value, + pagination_start_key: None, + }, + PaginatedStorageQuery { + key: hex_string(b":m"), + query_type: StorageQueryType::Hash, + pagination_start_key: None, + }, + PaginatedStorageQuery { + key: hex_string(b":m"), + query_type: StorageQueryType::Hash, + pagination_start_key: None, + } + ] + ], ) .await .unwrap(); + match result { + ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, discarded_items }) => { + assert_eq!(result.len(), 1); + assert_eq!(discarded_items, 2); - let event = get_next_event::(&mut sub).await; - assert_matches!(event, - ArchiveStorageDiffEvent::StorageDiffError(ref err) if err.error.contains("Header was not found") - ); + assert_eq!(result[0].key, hex_string(b":m")); + assert_eq!(result[0].result, StorageResultType::Value(hex_string(b"a"))); + }, + _ => panic!("Unexpected result"), + }; } diff --git a/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs b/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs index b949fb25402b..61eb47d1b9ab 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs @@ -318,7 +318,7 @@ where }), }; - let (rp, rp_fut) = method_started_response(operation_id, None); + let (rp, rp_fut) = method_started_response(operation_id); let fut = async move { // Wait for the server to send out the response and if it produces an error no event // should be generated. @@ -432,8 +432,7 @@ where let mut storage_client = ChainHeadStorage::::new(self.client.clone()); - // Storage items are never discarded. - let (rp, rp_fut) = method_started_response(block_guard.operation().operation_id(), Some(0)); + let (rp, rp_fut) = method_started_response(block_guard.operation().operation_id()); let fut = async move { // Wait for the server to send out the response and if it produces an error no event @@ -508,7 +507,7 @@ where let operation_id = block_guard.operation().operation_id(); let client = self.client.clone(); - let (rp, rp_fut) = method_started_response(operation_id.clone(), None); + let (rp, rp_fut) = method_started_response(operation_id.clone()); let fut = async move { // Wait for the server to send out the response and if it produces an error no event // should be generated. @@ -631,9 +630,8 @@ where fn method_started_response( operation_id: String, - discarded_items: Option, ) -> (ResponsePayload<'static, MethodResponse>, MethodResponseFuture) { - let rp = MethodResponse::Started(MethodResponseStarted { operation_id, discarded_items }); + let rp = MethodResponse::Started(MethodResponseStarted { operation_id, discarded_items: None }); ResponsePayload::success(rp).notify_on_completion() } diff --git a/substrate/client/rpc-spec-v2/src/chain_head/event.rs b/substrate/client/rpc-spec-v2/src/chain_head/event.rs index de74145a3f08..bd9863060910 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/event.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/event.rs @@ -235,7 +235,7 @@ pub struct OperationCallDone { pub output: String, } -/// The response of the `chainHead_storage` method. +/// The response of the `chainHead_call` method. #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct OperationStorageItems { @@ -536,7 +536,6 @@ mod tests { items: vec![StorageResult { key: "0x1".into(), result: StorageResultType::Value("0x123".to_string()), - child_trie_key: None, }], }); diff --git a/substrate/client/rpc-spec-v2/src/chain_head/subscription/inner.rs b/substrate/client/rpc-spec-v2/src/chain_head/subscription/inner.rs index 3e1bd23776d3..95a7c7fe1832 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/subscription/inner.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/subscription/inner.rs @@ -784,7 +784,7 @@ mod tests { use super::*; use jsonrpsee::ConnectionId; use sc_block_builder::BlockBuilderBuilder; - use sc_service::client::new_with_backend; + use sc_service::client::new_in_mem; use sp_consensus::BlockOrigin; use sp_core::{testing::TaskExecutor, H256}; use substrate_test_runtime_client::{ @@ -811,13 +811,13 @@ mod tests { ) .unwrap(); let client = Arc::new( - new_with_backend::<_, _, Block, _, RuntimeApi>( + new_in_mem::<_, Block, _, RuntimeApi>( backend.clone(), executor, genesis_block_builder, - Box::new(TaskExecutor::new()), None, None, + Box::new(TaskExecutor::new()), client_config, ) .unwrap(), diff --git a/substrate/client/rpc-spec-v2/src/chain_head/tests.rs b/substrate/client/rpc-spec-v2/src/chain_head/tests.rs index 3ec5e805ecd5..c505566d887d 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/tests.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/tests.rs @@ -34,7 +34,7 @@ use jsonrpsee::{ use sc_block_builder::BlockBuilderBuilder; use sc_client_api::ChildInfo; use sc_rpc::testing::TokioTestExecutor; -use sc_service::client::new_with_backend; +use sc_service::client::new_in_mem; use sp_blockchain::HeaderBackend; use sp_consensus::BlockOrigin; use sp_core::{ @@ -506,8 +506,8 @@ async fn get_body() { .unwrap(); builder .push_transfer(runtime::Transfer { - from: Sr25519Keyring::Alice.into(), - to: Sr25519Keyring::Ferdie.into(), + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), amount: 42, nonce: 0, }) @@ -580,7 +580,7 @@ async fn call_runtime() { ); // Valid call. - let alice_id = Sr25519Keyring::Alice.to_account_id(); + let alice_id = AccountKeyring::Alice.to_account_id(); // Hex encoded scale encoded bytes representing the call parameters. let call_parameters = hex_string(&alice_id.encode()); let response: MethodResponse = api @@ -670,7 +670,7 @@ async fn call_runtime_without_flag() { ); // Valid runtime call on a subscription started with `with_runtime` false. - let alice_id = Sr25519Keyring::Alice.to_account_id(); + let alice_id = AccountKeyring::Alice.to_account_id(); let call_parameters = hex_string(&alice_id.encode()); let err = api .call::<_, serde_json::Value>( @@ -1256,7 +1256,7 @@ async fn unique_operation_ids() { assert!(op_ids.insert(operation_id)); // Valid `chainHead_v1_call` call. - let alice_id = Sr25519Keyring::Alice.to_account_id(); + let alice_id = AccountKeyring::Alice.to_account_id(); let call_parameters = hex_string(&alice_id.encode()); let response: MethodResponse = api .call( @@ -1423,8 +1423,8 @@ async fn follow_generates_initial_blocks() { // imported block_builder .push_transfer(Transfer { - from: Sr25519Keyring::Alice.into(), - to: Sr25519Keyring::Ferdie.into(), + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), amount: 41, nonce: 0, }) @@ -2046,8 +2046,8 @@ async fn follow_prune_best_block() { // imported block_builder .push_transfer(Transfer { - from: Sr25519Keyring::Alice.into(), - to: Sr25519Keyring::Ferdie.into(), + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), amount: 41, nonce: 0, }) @@ -2217,8 +2217,8 @@ async fn follow_forks_pruned_block() { // imported block_builder .push_transfer(Transfer { - from: Sr25519Keyring::Alice.into(), - to: Sr25519Keyring::Ferdie.into(), + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), amount: 41, nonce: 0, }) @@ -2233,8 +2233,8 @@ async fn follow_forks_pruned_block() { .unwrap(); block_builder .push_transfer(Transfer { - from: Sr25519Keyring::Bob.into(), - to: Sr25519Keyring::Ferdie.into(), + from: AccountKeyring::Bob.into(), + to: AccountKeyring::Ferdie.into(), amount: 41, nonce: 0, }) @@ -2379,8 +2379,8 @@ async fn follow_report_multiple_pruned_block() { // imported block_builder .push_transfer(Transfer { - from: Sr25519Keyring::Alice.into(), - to: Sr25519Keyring::Ferdie.into(), + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), amount: 41, nonce: 0, }) @@ -2397,8 +2397,8 @@ async fn follow_report_multiple_pruned_block() { block_builder .push_transfer(Transfer { - from: Sr25519Keyring::Bob.into(), - to: Sr25519Keyring::Ferdie.into(), + from: AccountKeyring::Bob.into(), + to: AccountKeyring::Ferdie.into(), amount: 41, nonce: 0, }) @@ -2547,13 +2547,13 @@ async fn pin_block_references() { .unwrap(); let client = Arc::new( - new_with_backend::<_, _, Block, _, RuntimeApi>( + new_in_mem::<_, Block, _, RuntimeApi>( backend.clone(), executor, genesis_block_builder, - Box::new(TokioTestExecutor::default()), None, None, + Box::new(TokioTestExecutor::default()), client_config, ) .unwrap(), @@ -2871,7 +2871,7 @@ async fn ensure_operation_limits_works() { let operation_id = match response { MethodResponse::Started(started) => { // Check discarded items. - assert_eq!(started.discarded_items, Some(0)); + assert!(started.discarded_items.is_none()); started.operation_id }, MethodResponse::LimitReached => panic!("Expected started response"), @@ -2883,7 +2883,7 @@ async fn ensure_operation_limits_works() { ); // The storage is finished and capacity must be released. - let alice_id = Sr25519Keyring::Alice.to_account_id(); + let alice_id = AccountKeyring::Alice.to_account_id(); // Hex encoded scale encoded bytes representing the call parameters. let call_parameters = hex_string(&alice_id.encode()); let response: MethodResponse = api @@ -3228,10 +3228,7 @@ async fn storage_closest_merkle_value() { .await .unwrap(); let operation_id = match response { - MethodResponse::Started(started) => { - assert_eq!(started.discarded_items, Some(0)); - started.operation_id - }, + MethodResponse::Started(started) => started.operation_id, MethodResponse::LimitReached => panic!("Expected started response"), }; @@ -3537,7 +3534,7 @@ async fn chain_head_single_connection_context() { .unwrap(); assert_matches!(response, MethodResponse::LimitReached); - let alice_id = Sr25519Keyring::Alice.to_account_id(); + let alice_id = AccountKeyring::Alice.to_account_id(); // Hex encoded scale encoded bytes representing the call parameters. let call_parameters = hex_string(&alice_id.encode()); let response: MethodResponse = ChainHeadApiClient::::chain_head_unstable_call( @@ -3663,8 +3660,8 @@ async fn follow_unique_pruned_blocks() { let block_6_hash = import_block(client.clone(), block_2_f_hash, 2).await.hash(); // Import block 2 as best on the fork. let mut tx_alice_ferdie = Transfer { - from: Sr25519Keyring::Alice.into(), - to: Sr25519Keyring::Ferdie.into(), + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), amount: 41, nonce: 0, }; @@ -3846,8 +3843,8 @@ async fn follow_report_best_block_of_a_known_block() { // imported block_builder .push_transfer(Transfer { - from: Sr25519Keyring::Alice.into(), - to: Sr25519Keyring::Ferdie.into(), + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), amount: 41, nonce: 0, }) diff --git a/substrate/client/rpc-spec-v2/src/common/events.rs b/substrate/client/rpc-spec-v2/src/common/events.rs index 44f722c0c61b..b1627d74c844 100644 --- a/substrate/client/rpc-spec-v2/src/common/events.rs +++ b/substrate/client/rpc-spec-v2/src/common/events.rs @@ -78,14 +78,10 @@ pub struct StorageResult { /// The result of the query. #[serde(flatten)] pub result: StorageResultType, - /// The child trie key if provided. - #[serde(skip_serializing_if = "Option::is_none")] - #[serde(default)] - pub child_trie_key: Option, } /// The type of the storage query. -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub enum StorageResultType { /// Fetch the value of the provided key. @@ -109,41 +105,23 @@ pub struct StorageResultErr { /// The result of a storage call. #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -#[serde(tag = "event")] -pub enum ArchiveStorageEvent { +#[serde(untagged)] +pub enum ArchiveStorageResult { /// Query generated a result. - Storage(StorageResult), + Ok(ArchiveStorageMethodOk), /// Query encountered an error. - StorageError(ArchiveStorageMethodErr), - /// Operation storage is done. - StorageDone, + Err(ArchiveStorageMethodErr), } -impl ArchiveStorageEvent { - /// Create a new `ArchiveStorageEvent::StorageErr` event. - pub fn err(error: String) -> Self { - Self::StorageError(ArchiveStorageMethodErr { error }) - } - - /// Create a new `ArchiveStorageEvent::StorageResult` event. - pub fn result(result: StorageResult) -> Self { - Self::Storage(result) - } - - /// Checks if the event is a `StorageDone` event. - pub fn is_done(&self) -> bool { - matches!(self, Self::StorageDone) +impl ArchiveStorageResult { + /// Create a new `ArchiveStorageResult::Ok` result. + pub fn ok(result: Vec, discarded_items: usize) -> Self { + Self::Ok(ArchiveStorageMethodOk { result, discarded_items }) } - /// Checks if the event is a `StorageErr` event. - pub fn is_err(&self) -> bool { - matches!(self, Self::StorageError(_)) - } - - /// Checks if the event is a `StorageResult` event. - pub fn is_result(&self) -> bool { - matches!(self, Self::Storage(_)) + /// Create a new `ArchiveStorageResult::Err` result. + pub fn err(error: String) -> Self { + Self::Err(ArchiveStorageMethodErr { error }) } } @@ -158,229 +136,22 @@ pub struct ArchiveStorageMethodOk { } /// The error of a storage call. -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct ArchiveStorageMethodErr { /// Reported error. pub error: String, } -/// The type of the archive storage difference query. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub enum ArchiveStorageDiffType { - /// The result is provided as value of the key. - Value, - /// The result the hash of the value of the key. - Hash, -} - -/// The storage item to query. -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct ArchiveStorageDiffItem { - /// The provided key. - pub key: Key, - /// The type of the storage query. - pub return_type: ArchiveStorageDiffType, - /// The child trie key if provided. - #[serde(skip_serializing_if = "Option::is_none")] - #[serde(default)] - pub child_trie_key: Option, -} - -/// The result of a storage difference call. -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct ArchiveStorageDiffMethodResult { - /// Reported results. - pub result: Vec, -} - -/// The result of a storage difference call operation type. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub enum ArchiveStorageDiffOperationType { - /// The key is added. - Added, - /// The key is modified. - Modified, - /// The key is removed. - Deleted, -} - -/// The result of an individual storage difference key. -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct ArchiveStorageDiffResult { - /// The hex-encoded key of the result. - pub key: String, - /// The result of the query. - #[serde(flatten)] - pub result: StorageResultType, - /// The operation type. - #[serde(rename = "type")] - pub operation_type: ArchiveStorageDiffOperationType, - /// The child trie key if provided. - #[serde(skip_serializing_if = "Option::is_none")] - #[serde(default)] - pub child_trie_key: Option, -} - -/// The event generated by the `archive_storageDiff` method. -/// -/// The `archive_storageDiff` can generate the following events: -/// - `storageDiff` event - generated when a `ArchiveStorageDiffResult` is produced. -/// - `storageDiffError` event - generated when an error is produced. -/// - `storageDiffDone` event - generated when the `archive_storageDiff` method completed. -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -#[serde(tag = "event")] -pub enum ArchiveStorageDiffEvent { - /// The `storageDiff` event. - StorageDiff(ArchiveStorageDiffResult), - /// The `storageDiffError` event. - StorageDiffError(ArchiveStorageMethodErr), - /// The `storageDiffDone` event. - StorageDiffDone, -} - -impl ArchiveStorageDiffEvent { - /// Create a new `ArchiveStorageDiffEvent::StorageDiffError` event. - pub fn err(error: String) -> Self { - Self::StorageDiffError(ArchiveStorageMethodErr { error }) - } - - /// Checks if the event is a `StorageDiffDone` event. - pub fn is_done(&self) -> bool { - matches!(self, Self::StorageDiffDone) - } - - /// Checks if the event is a `StorageDiffError` event. - pub fn is_err(&self) -> bool { - matches!(self, Self::StorageDiffError(_)) - } -} - #[cfg(test)] mod tests { use super::*; - #[test] - fn archive_diff_input() { - // Item with Value. - let item = ArchiveStorageDiffItem { - key: "0x1", - return_type: ArchiveStorageDiffType::Value, - child_trie_key: None, - }; - // Encode - let ser = serde_json::to_string(&item).unwrap(); - let exp = r#"{"key":"0x1","returnType":"value"}"#; - assert_eq!(ser, exp); - // Decode - let dec: ArchiveStorageDiffItem<&str> = serde_json::from_str(exp).unwrap(); - assert_eq!(dec, item); - - // Item with Hash. - let item = ArchiveStorageDiffItem { - key: "0x1", - return_type: ArchiveStorageDiffType::Hash, - child_trie_key: None, - }; - // Encode - let ser = serde_json::to_string(&item).unwrap(); - let exp = r#"{"key":"0x1","returnType":"hash"}"#; - assert_eq!(ser, exp); - // Decode - let dec: ArchiveStorageDiffItem<&str> = serde_json::from_str(exp).unwrap(); - assert_eq!(dec, item); - - // Item with Value and child trie key. - let item = ArchiveStorageDiffItem { - key: "0x1", - return_type: ArchiveStorageDiffType::Value, - child_trie_key: Some("0x2"), - }; - // Encode - let ser = serde_json::to_string(&item).unwrap(); - let exp = r#"{"key":"0x1","returnType":"value","childTrieKey":"0x2"}"#; - assert_eq!(ser, exp); - // Decode - let dec: ArchiveStorageDiffItem<&str> = serde_json::from_str(exp).unwrap(); - assert_eq!(dec, item); - - // Item with Hash and child trie key. - let item = ArchiveStorageDiffItem { - key: "0x1", - return_type: ArchiveStorageDiffType::Hash, - child_trie_key: Some("0x2"), - }; - // Encode - let ser = serde_json::to_string(&item).unwrap(); - let exp = r#"{"key":"0x1","returnType":"hash","childTrieKey":"0x2"}"#; - assert_eq!(ser, exp); - // Decode - let dec: ArchiveStorageDiffItem<&str> = serde_json::from_str(exp).unwrap(); - assert_eq!(dec, item); - } - - #[test] - fn archive_diff_output() { - // Item with Value. - let item = ArchiveStorageDiffResult { - key: "0x1".into(), - result: StorageResultType::Value("res".into()), - operation_type: ArchiveStorageDiffOperationType::Added, - child_trie_key: None, - }; - // Encode - let ser = serde_json::to_string(&item).unwrap(); - let exp = r#"{"key":"0x1","value":"res","type":"added"}"#; - assert_eq!(ser, exp); - // Decode - let dec: ArchiveStorageDiffResult = serde_json::from_str(exp).unwrap(); - assert_eq!(dec, item); - - // Item with Hash. - let item = ArchiveStorageDiffResult { - key: "0x1".into(), - result: StorageResultType::Hash("res".into()), - operation_type: ArchiveStorageDiffOperationType::Modified, - child_trie_key: None, - }; - // Encode - let ser = serde_json::to_string(&item).unwrap(); - let exp = r#"{"key":"0x1","hash":"res","type":"modified"}"#; - assert_eq!(ser, exp); - // Decode - let dec: ArchiveStorageDiffResult = serde_json::from_str(exp).unwrap(); - assert_eq!(dec, item); - - // Item with Hash, child trie key and removed. - let item = ArchiveStorageDiffResult { - key: "0x1".into(), - result: StorageResultType::Hash("res".into()), - operation_type: ArchiveStorageDiffOperationType::Deleted, - child_trie_key: Some("0x2".into()), - }; - // Encode - let ser = serde_json::to_string(&item).unwrap(); - let exp = r#"{"key":"0x1","hash":"res","type":"deleted","childTrieKey":"0x2"}"#; - assert_eq!(ser, exp); - // Decode - let dec: ArchiveStorageDiffResult = serde_json::from_str(exp).unwrap(); - assert_eq!(dec, item); - } - #[test] fn storage_result() { // Item with Value. - let item = StorageResult { - key: "0x1".into(), - result: StorageResultType::Value("res".into()), - child_trie_key: None, - }; + let item = + StorageResult { key: "0x1".into(), result: StorageResultType::Value("res".into()) }; // Encode let ser = serde_json::to_string(&item).unwrap(); let exp = r#"{"key":"0x1","value":"res"}"#; @@ -390,11 +161,8 @@ mod tests { assert_eq!(dec, item); // Item with Hash. - let item = StorageResult { - key: "0x1".into(), - result: StorageResultType::Hash("res".into()), - child_trie_key: None, - }; + let item = + StorageResult { key: "0x1".into(), result: StorageResultType::Hash("res".into()) }; // Encode let ser = serde_json::to_string(&item).unwrap(); let exp = r#"{"key":"0x1","hash":"res"}"#; @@ -407,7 +175,6 @@ mod tests { let item = StorageResult { key: "0x1".into(), result: StorageResultType::ClosestDescendantMerkleValue("res".into()), - child_trie_key: None, }; // Encode let ser = serde_json::to_string(&item).unwrap(); diff --git a/substrate/client/rpc-spec-v2/src/common/storage.rs b/substrate/client/rpc-spec-v2/src/common/storage.rs index a1e34d51530e..2e24a8da8ca8 100644 --- a/substrate/client/rpc-spec-v2/src/common/storage.rs +++ b/substrate/client/rpc-spec-v2/src/common/storage.rs @@ -24,7 +24,7 @@ use sc_client_api::{Backend, ChildInfo, StorageKey, StorageProvider}; use sp_runtime::traits::Block as BlockT; use tokio::sync::mpsc; -use super::events::{StorageQuery, StorageQueryType, StorageResult, StorageResultType}; +use super::events::{StorageResult, StorageResultType}; use crate::hex_string; /// Call into the storage of blocks. @@ -70,6 +70,9 @@ pub enum IterQueryType { /// The result of making a query call. pub type QueryResult = Result, String>; +/// The result of iterating over keys. +pub type QueryIterResult = Result<(Vec, Option), String>; + impl Storage where Block: BlockT + 'static, @@ -94,7 +97,6 @@ where QueryResult::Ok(opt.map(|storage_data| StorageResult { key: hex_string(&key.0), result: StorageResultType::Value(hex_string(&storage_data.0)), - child_trie_key: child_key.map(|c| hex_string(&c.storage_key())), })) }) .unwrap_or_else(|error| QueryResult::Err(error.to_string())) @@ -118,7 +120,6 @@ where QueryResult::Ok(opt.map(|storage_data| StorageResult { key: hex_string(&key.0), result: StorageResultType::Hash(hex_string(&storage_data.as_ref())), - child_trie_key: child_key.map(|c| hex_string(&c.storage_key())), })) }) .unwrap_or_else(|error| QueryResult::Err(error.to_string())) @@ -148,7 +149,6 @@ where StorageResult { key: hex_string(&key.0), result: StorageResultType::ClosestDescendantMerkleValue(result), - child_trie_key: child_key.map(|c| hex_string(&c.storage_key())), } })) }) @@ -199,111 +199,53 @@ where } } - /// Raw iterator over the keys. - pub fn raw_keys_iter( + /// Iterate over at most the provided number of keys. + /// + /// Returns the storage result with a potential next key to resume iteration. + pub fn query_iter_pagination( &self, + query: QueryIter, hash: Block::Hash, - child_key: Option, - ) -> Result, String> { - let keys_iter = if let Some(child_key) = child_key { - self.client.child_storage_keys(hash, child_key, None, None) - } else { - self.client.storage_keys(hash, None, None) - }; + child_key: Option<&ChildInfo>, + count: usize, + ) -> QueryIterResult { + let QueryIter { ty, query_key, pagination_start_key } = query; - keys_iter.map_err(|err| err.to_string()) - } -} + let mut keys_iter = if let Some(child_key) = child_key { + self.client.child_storage_keys( + hash, + child_key.to_owned(), + Some(&query_key), + pagination_start_key.as_ref(), + ) + } else { + self.client.storage_keys(hash, Some(&query_key), pagination_start_key.as_ref()) + } + .map_err(|err| err.to_string())?; -/// Generates storage events for `chainHead_storage` and `archive_storage` subscriptions. -pub struct StorageSubscriptionClient { - /// Storage client. - client: Storage, - _phandom: PhantomData<(BE, Block)>, -} + let mut ret = Vec::with_capacity(count); + let mut next_pagination_key = None; + for _ in 0..count { + let Some(key) = keys_iter.next() else { break }; -impl Clone for StorageSubscriptionClient { - fn clone(&self) -> Self { - Self { client: self.client.clone(), _phandom: PhantomData } - } -} + next_pagination_key = Some(key.clone()); -impl StorageSubscriptionClient { - /// Constructs a new [`StorageSubscriptionClient`]. - pub fn new(client: Arc) -> Self { - Self { client: Storage::new(client), _phandom: PhantomData } - } -} + let result = match ty { + IterQueryType::Value => self.query_value(hash, &key, child_key), + IterQueryType::Hash => self.query_hash(hash, &key, child_key), + }?; -impl StorageSubscriptionClient -where - Block: BlockT + 'static, - BE: Backend + 'static, - Client: StorageProvider + Send + Sync + 'static, -{ - /// Generate storage events to the provided sender. - pub async fn generate_events( - &mut self, - hash: Block::Hash, - items: Vec>, - child_key: Option, - tx: mpsc::Sender, - ) -> Result<(), tokio::task::JoinError> { - let this = self.clone(); - - tokio::task::spawn_blocking(move || { - for item in items { - match item.query_type { - StorageQueryType::Value => { - let rp = this.client.query_value(hash, &item.key, child_key.as_ref()); - if tx.blocking_send(rp).is_err() { - break; - } - }, - StorageQueryType::Hash => { - let rp = this.client.query_hash(hash, &item.key, child_key.as_ref()); - if tx.blocking_send(rp).is_err() { - break; - } - }, - StorageQueryType::ClosestDescendantMerkleValue => { - let rp = - this.client.query_merkle_value(hash, &item.key, child_key.as_ref()); - if tx.blocking_send(rp).is_err() { - break; - } - }, - StorageQueryType::DescendantsValues => { - let query = QueryIter { - query_key: item.key, - ty: IterQueryType::Value, - pagination_start_key: None, - }; - this.client.query_iter_pagination_with_producer( - query, - hash, - child_key.as_ref(), - &tx, - ) - }, - StorageQueryType::DescendantsHashes => { - let query = QueryIter { - query_key: item.key, - ty: IterQueryType::Hash, - pagination_start_key: None, - }; - this.client.query_iter_pagination_with_producer( - query, - hash, - child_key.as_ref(), - &tx, - ) - }, - } + if let Some(value) = result { + ret.push(value); } - }) - .await?; + } - Ok(()) + // Save the next key if any to continue the iteration. + let maybe_next_query = keys_iter.next().map(|_| QueryIter { + ty, + query_key, + pagination_start_key: next_pagination_key, + }); + Ok((ret, maybe_next_query)) } } diff --git a/substrate/client/rpc-spec-v2/src/transaction/tests/middleware_pool.rs b/substrate/client/rpc-spec-v2/src/transaction/tests/middleware_pool.rs index a543969a89b8..adcc987f9c39 100644 --- a/substrate/client/rpc-spec-v2/src/transaction/tests/middleware_pool.rs +++ b/substrate/client/rpc-spec-v2/src/transaction/tests/middleware_pool.rs @@ -16,16 +16,16 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use async_trait::async_trait; use codec::Encode; +use futures::Future; use sc_transaction_pool::BasicPool; use sc_transaction_pool_api::{ - ImportNotificationStream, PoolStatus, ReadyTransactions, TransactionFor, TransactionPool, - TransactionSource, TransactionStatusStreamFor, TxHash, + ImportNotificationStream, PoolFuture, PoolStatus, ReadyTransactions, TransactionFor, + TransactionPool, TransactionSource, TransactionStatusStreamFor, TxHash, }; use crate::hex_string; -use futures::StreamExt; +use futures::{FutureExt, StreamExt}; use sp_runtime::traits::Block as BlockT; use std::{collections::HashMap, pin::Pin, sync::Arc}; @@ -77,64 +77,67 @@ impl MiddlewarePool { } } -#[async_trait] impl TransactionPool for MiddlewarePool { type Block = as TransactionPool>::Block; type Hash = as TransactionPool>::Hash; type InPoolTransaction = as TransactionPool>::InPoolTransaction; type Error = as TransactionPool>::Error; - async fn submit_at( + fn submit_at( &self, at: ::Hash, source: TransactionSource, xts: Vec>, - ) -> Result, Self::Error>>, Self::Error> { - self.inner_pool.submit_at(at, source, xts).await + ) -> PoolFuture, Self::Error>>, Self::Error> { + self.inner_pool.submit_at(at, source, xts) } - async fn submit_one( + fn submit_one( &self, at: ::Hash, source: TransactionSource, xt: TransactionFor, - ) -> Result, Self::Error> { - self.inner_pool.submit_one(at, source, xt).await + ) -> PoolFuture, Self::Error> { + self.inner_pool.submit_one(at, source, xt) } - async fn submit_and_watch( + fn submit_and_watch( &self, at: ::Hash, source: TransactionSource, xt: TransactionFor, - ) -> Result>>, Self::Error> { - let transaction = hex_string(&xt.encode()); + ) -> PoolFuture>>, Self::Error> { + let pool = self.inner_pool.clone(); let sender = self.sender.clone(); + let transaction = hex_string(&xt.encode()); - let watcher = match self.inner_pool.submit_and_watch(at, source, xt).await { - Ok(watcher) => watcher, - Err(err) => { - let _ = sender.send(MiddlewarePoolEvent::PoolError { - transaction: transaction.clone(), - err: err.to_string(), + async move { + let watcher = match pool.submit_and_watch(at, source, xt).await { + Ok(watcher) => watcher, + Err(err) => { + let _ = sender.send(MiddlewarePoolEvent::PoolError { + transaction: transaction.clone(), + err: err.to_string(), + }); + return Err(err); + }, + }; + + let watcher = watcher.map(move |status| { + let sender = sender.clone(); + let transaction = transaction.clone(); + + let _ = sender.send(MiddlewarePoolEvent::TransactionStatus { + transaction, + status: status.clone(), }); - return Err(err); - }, - }; - - let watcher = watcher.map(move |status| { - let sender = sender.clone(); - let transaction = transaction.clone(); - let _ = sender.send(MiddlewarePoolEvent::TransactionStatus { - transaction, - status: status.clone(), + status }); - status - }); - - Ok(watcher.boxed()) + Ok(watcher.boxed()) + } + .boxed() } fn remove_invalid(&self, hashes: &[TxHash]) -> Vec> { @@ -161,11 +164,17 @@ impl TransactionPool for MiddlewarePool { self.inner_pool.ready_transaction(hash) } - async fn ready_at( + fn ready_at( &self, at: ::Hash, - ) -> Box> + Send> { - self.inner_pool.ready_at(at).await + ) -> Pin< + Box< + dyn Future< + Output = Box> + Send>, + > + Send, + >, + > { + self.inner_pool.ready_at(at) } fn ready(&self) -> Box> + Send> { @@ -176,11 +185,18 @@ impl TransactionPool for MiddlewarePool { self.inner_pool.futures() } - async fn ready_at_with_timeout( + fn ready_at_with_timeout( &self, at: ::Hash, _timeout: std::time::Duration, - ) -> Box> + Send> { - self.inner_pool.ready_at(at).await + ) -> Pin< + Box< + dyn Future< + Output = Box> + Send>, + > + Send + + '_, + >, + > { + self.inner_pool.ready_at(at) } } diff --git a/substrate/client/rpc-spec-v2/src/transaction/tests/transaction_broadcast_tests.rs b/substrate/client/rpc-spec-v2/src/transaction/tests/transaction_broadcast_tests.rs index c2f11878e8fc..efb3bd94ddbf 100644 --- a/substrate/client/rpc-spec-v2/src/transaction/tests/transaction_broadcast_tests.rs +++ b/substrate/client/rpc-spec-v2/src/transaction/tests/transaction_broadcast_tests.rs @@ -23,7 +23,7 @@ use jsonrpsee::{rpc_params, MethodsError as Error}; use sc_transaction_pool::{Options, PoolLimit}; use sc_transaction_pool_api::{ChainEvent, MaintainedTransactionPool, TransactionPool}; use std::sync::Arc; -use substrate_test_runtime_client::Sr25519Keyring::*; +use substrate_test_runtime_client::AccountKeyring::*; use substrate_test_runtime_transaction_pool::uxt; const MAX_TX_PER_CONNECTION: usize = 4; diff --git a/substrate/client/rpc-spec-v2/src/transaction/tests/transaction_tests.rs b/substrate/client/rpc-spec-v2/src/transaction/tests/transaction_tests.rs index 879d51eaf5f3..53c5b8ce3895 100644 --- a/substrate/client/rpc-spec-v2/src/transaction/tests/transaction_tests.rs +++ b/substrate/client/rpc-spec-v2/src/transaction/tests/transaction_tests.rs @@ -26,7 +26,7 @@ use jsonrpsee::rpc_params; use sc_transaction_pool_api::{ChainEvent, MaintainedTransactionPool}; use sp_core::H256; use std::{sync::Arc, vec}; -use substrate_test_runtime_client::Sr25519Keyring::*; +use substrate_test_runtime_client::AccountKeyring::*; use substrate_test_runtime_transaction_pool::uxt; // Test helpers. diff --git a/substrate/client/rpc/Cargo.toml b/substrate/client/rpc/Cargo.toml index 8be932f02ed4..6fe28a3873e9 100644 --- a/substrate/client/rpc/Cargo.toml +++ b/substrate/client/rpc/Cargo.toml @@ -21,6 +21,7 @@ futures = { workspace = true } jsonrpsee = { features = ["server"], workspace = true } log = { workspace = true, default-features = true } parking_lot = { workspace = true, default-features = true } +serde_json = { workspace = true, default-features = true } sc-block-builder = { workspace = true, default-features = true } sc-chain-spec = { workspace = true, default-features = true } sc-client-api = { workspace = true, default-features = true } @@ -29,7 +30,6 @@ sc-rpc-api = { workspace = true, default-features = true } sc-tracing = { workspace = true, default-features = true } sc-transaction-pool-api = { workspace = true, default-features = true } sc-utils = { workspace = true, default-features = true } -serde_json = { workspace = true, default-features = true } sp-api = { workspace = true, default-features = true } sp-blockchain = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } @@ -38,22 +38,22 @@ sp-offchain = { workspace = true, default-features = true } sp-rpc = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } sp-session = { workspace = true, default-features = true } -sp-statement-store = { workspace = true, default-features = true } sp-version = { workspace = true, default-features = true } +sp-statement-store = { workspace = true, default-features = true } tokio = { workspace = true, default-features = true } [dev-dependencies] assert_matches = { workspace = true } -pretty_assertions = { workspace = true } sc-block-builder = { workspace = true, default-features = true } sc-network = { workspace = true, default-features = true } sc-network-common = { workspace = true, default-features = true } sc-transaction-pool = { workspace = true, default-features = true } sp-consensus = { workspace = true, default-features = true } sp-crypto-hashing = { workspace = true, default-features = true } +tokio = { workspace = true, default-features = true } sp-io = { workspace = true, default-features = true } substrate-test-runtime-client = { workspace = true } -tokio = { workspace = true, default-features = true } +pretty_assertions = { workspace = true } [features] test-helpers = [] diff --git a/substrate/client/rpc/src/author/mod.rs b/substrate/client/rpc/src/author/mod.rs index 6afc871e565a..731f4df2f6f3 100644 --- a/substrate/client/rpc/src/author/mod.rs +++ b/substrate/client/rpc/src/author/mod.rs @@ -29,6 +29,7 @@ use crate::{ }; use codec::{Decode, Encode}; +use futures::TryFutureExt; use jsonrpsee::{core::async_trait, types::ErrorObject, Extensions, PendingSubscriptionSink}; use sc_rpc_api::check_if_safe; use sc_transaction_pool_api::{ @@ -190,16 +191,14 @@ where }, }; - let pool = self.pool.clone(); + let submit = self.pool.submit_and_watch(best_block_hash, TX_SOURCE, dxt).map_err(|e| { + e.into_pool_error() + .map(error::Error::from) + .unwrap_or_else(|e| error::Error::Verification(Box::new(e))) + }); + let fut = async move { - let submit = - pool.submit_and_watch(best_block_hash, TX_SOURCE, dxt).await.map_err(|e| { - e.into_pool_error() - .map(error::Error::from) - .unwrap_or_else(|e| error::Error::Verification(Box::new(e))) - }); - - let stream = match submit { + let stream = match submit.await { Ok(stream) => stream, Err(err) => { let _ = pending.reject(ErrorObject::from(err)).await; diff --git a/substrate/client/rpc/src/author/tests.rs b/substrate/client/rpc/src/author/tests.rs index b1c899667624..ab0b8bdab699 100644 --- a/substrate/client/rpc/src/author/tests.rs +++ b/substrate/client/rpc/src/author/tests.rs @@ -39,15 +39,15 @@ use std::sync::Arc; use substrate_test_runtime_client::{ self, runtime::{Block, Extrinsic, ExtrinsicBuilder, SessionKeys, Transfer}, - Backend, Client, DefaultTestClientBuilderExt, Sr25519Keyring, TestClientBuilderExt, + AccountKeyring, Backend, Client, DefaultTestClientBuilderExt, TestClientBuilderExt, }; -fn uxt(sender: Sr25519Keyring, nonce: u64) -> Extrinsic { +fn uxt(sender: AccountKeyring, nonce: u64) -> Extrinsic { let tx = Transfer { amount: Default::default(), nonce, from: sender.into(), - to: Sr25519Keyring::Bob.into(), + to: AccountKeyring::Bob.into(), }; ExtrinsicBuilder::new_transfer(tx).build() } @@ -99,7 +99,7 @@ impl TestSetup { async fn author_submit_transaction_should_not_cause_error() { let api = TestSetup::into_rpc(); - let xt: Bytes = uxt(Sr25519Keyring::Alice, 1).encode().into(); + let xt: Bytes = uxt(AccountKeyring::Alice, 1).encode().into(); let extrinsic_hash: H256 = blake2_256(&xt).into(); let response: H256 = api.call("author_submitExtrinsic", [xt.clone()]).await.unwrap(); @@ -116,7 +116,7 @@ async fn author_should_watch_extrinsic() { let api = TestSetup::into_rpc(); let xt = to_hex( &ExtrinsicBuilder::new_call_with_priority(0) - .signer(Sr25519Keyring::Alice.into()) + .signer(AccountKeyring::Alice.into()) .build() .encode(), true, @@ -135,7 +135,7 @@ async fn author_should_watch_extrinsic() { // Replace the extrinsic and observe the subscription is notified. let (xt_replacement, xt_hash) = { let tx = ExtrinsicBuilder::new_call_with_priority(1) - .signer(Sr25519Keyring::Alice.into()) + .signer(AccountKeyring::Alice.into()) .build() .encode(); let hash = blake2_256(&tx); @@ -172,7 +172,7 @@ async fn author_should_return_watch_validation_error() { async fn author_should_return_pending_extrinsics() { let api = TestSetup::into_rpc(); - let xt_bytes: Bytes = uxt(Sr25519Keyring::Alice, 0).encode().into(); + let xt_bytes: Bytes = uxt(AccountKeyring::Alice, 0).encode().into(); api.call::<_, H256>("author_submitExtrinsic", [to_hex(&xt_bytes, true)]) .await .unwrap(); @@ -190,14 +190,14 @@ async fn author_should_remove_extrinsics() { // Submit three extrinsics, then remove two of them (will cause the third to be removed as well, // having a higher nonce) - let xt1_bytes = uxt(Sr25519Keyring::Alice, 0).encode(); + let xt1_bytes = uxt(AccountKeyring::Alice, 0).encode(); let xt1 = to_hex(&xt1_bytes, true); let xt1_hash: H256 = api.call("author_submitExtrinsic", [xt1]).await.unwrap(); - let xt2 = to_hex(&uxt(Sr25519Keyring::Alice, 1).encode(), true); + let xt2 = to_hex(&uxt(AccountKeyring::Alice, 1).encode(), true); let xt2_hash: H256 = api.call("author_submitExtrinsic", [xt2]).await.unwrap(); - let xt3 = to_hex(&uxt(Sr25519Keyring::Bob, 0).encode(), true); + let xt3 = to_hex(&uxt(AccountKeyring::Bob, 0).encode(), true); let xt3_hash: H256 = api.call("author_submitExtrinsic", [xt3]).await.unwrap(); assert_eq!(setup.pool.status().ready, 3); diff --git a/substrate/client/rpc/src/state/tests.rs b/substrate/client/rpc/src/state/tests.rs index c02f0d0b759b..6b711f2425e9 100644 --- a/substrate/client/rpc/src/state/tests.rs +++ b/substrate/client/rpc/src/state/tests.rs @@ -228,8 +228,8 @@ async fn should_notify_about_storage_changes() { .unwrap(); builder .push_transfer(Transfer { - from: Sr25519Keyring::Alice.into(), - to: Sr25519Keyring::Ferdie.into(), + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), amount: 42, nonce: 0, }) @@ -255,11 +255,11 @@ async fn should_send_initial_storage_changes_and_notifications() { let alice_balance_key = [ sp_crypto_hashing::twox_128(b"System"), sp_crypto_hashing::twox_128(b"Account"), - sp_crypto_hashing::blake2_128(&Sr25519Keyring::Alice.public()), + sp_crypto_hashing::blake2_128(&AccountKeyring::Alice.public()), ] .concat() .iter() - .chain(Sr25519Keyring::Alice.public().0.iter()) + .chain(AccountKeyring::Alice.public().0.iter()) .cloned() .collect::>(); @@ -281,8 +281,8 @@ async fn should_send_initial_storage_changes_and_notifications() { .unwrap(); builder .push_transfer(Transfer { - from: Sr25519Keyring::Alice.into(), - to: Sr25519Keyring::Ferdie.into(), + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), amount: 42, nonce: 0, }) diff --git a/substrate/client/runtime-utilities/Cargo.toml b/substrate/client/runtime-utilities/Cargo.toml deleted file mode 100644 index 716b577d384a..000000000000 --- a/substrate/client/runtime-utilities/Cargo.toml +++ /dev/null @@ -1,36 +0,0 @@ -[package] -description = "Substrate client utilities for frame runtime functions calls." -name = "sc-runtime-utilities" -version = "0.1.0" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" -authors.workspace = true -edition.workspace = true -homepage.workspace = true -repository.workspace = true -documentation = "https://docs.rs/sc-metadata" - -[lints] -workspace = true - -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] - -[dependencies] -codec = { workspace = true, default-features = true } - -sc-executor = { workspace = true, default-features = true } -sc-executor-common = { workspace = true, default-features = true } -sp-core = { workspace = true, default-features = true } -sp-crypto-hashing = { workspace = true, default-features = true } -sp-state-machine = { workspace = true, default-features = true } -sp-wasm-interface = { workspace = true, default-features = true } - - -thiserror = { workspace = true } - -[dev-dependencies] -cumulus-primitives-proof-size-hostfunction = { workspace = true, default-features = true } -cumulus-test-runtime = { workspace = true, default-features = true } -sp-io = { workspace = true, default-features = true } -sp-version = { workspace = true, default-features = true } -subxt = { workspace = true, features = ["native"] } diff --git a/substrate/client/runtime-utilities/src/error.rs b/substrate/client/runtime-utilities/src/error.rs deleted file mode 100644 index a0f1e45a5e57..000000000000 --- a/substrate/client/runtime-utilities/src/error.rs +++ /dev/null @@ -1,35 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . -//! Errors types of runtime utilities. - -/// Generic result for the runtime utilities. -pub type Result = std::result::Result; - -/// Error type for the runtime utilities. -#[derive(Debug, thiserror::Error)] -#[allow(missing_docs)] -pub enum Error { - #[error("Scale codec error: {0}")] - ScaleCodec(#[from] codec::Error), - #[error("Opaque metadata not found")] - OpaqueMetadataNotFound, - #[error("Stable metadata version not found")] - StableMetadataVersionNotFound, - #[error("WASM executor error: {0}")] - Executor(#[from] sc_executor_common::error::Error), -} diff --git a/substrate/client/service/Cargo.toml b/substrate/client/service/Cargo.toml index e46b252f30bf..f2fc65ef2439 100644 --- a/substrate/client/service/Cargo.toml +++ b/substrate/client/service/Cargo.toml @@ -20,70 +20,72 @@ default = ["rocksdb"] # The RocksDB feature activates the RocksDB database backend. If it is not activated, and you pass # a path to a database, an error will be produced at runtime. rocksdb = ["sc-client-db/rocksdb"] +# exposes the client type +test-helpers = [] runtime-benchmarks = [ "sc-client-db/runtime-benchmarks", "sp-runtime/runtime-benchmarks", ] [dependencies] -async-trait = { workspace = true } -codec = { workspace = true, default-features = true } -directories = { workspace = true } -exit-future = { workspace = true } -futures = { workspace = true } -futures-timer = { workspace = true } jsonrpsee = { features = ["server"], workspace = true } -log = { workspace = true, default-features = true } +thiserror = { workspace = true } +futures = { workspace = true } +rand = { workspace = true, default-features = true } parking_lot = { workspace = true, default-features = true } +log = { workspace = true, default-features = true } +futures-timer = { workspace = true } +exit-future = { workspace = true } pin-project = { workspace = true } -prometheus-endpoint = { workspace = true, default-features = true } -rand = { workspace = true, default-features = true } -sc-chain-spec = { workspace = true, default-features = true } -sc-client-api = { workspace = true, default-features = true } -sc-client-db = { workspace = true } -sc-consensus = { workspace = true, default-features = true } -sc-executor = { workspace = true, default-features = true } -sc-informant = { workspace = true, default-features = true } -sc-keystore = { workspace = true, default-features = true } -sc-network = { workspace = true, default-features = true } -sc-network-common = { workspace = true, default-features = true } -sc-network-light = { workspace = true, default-features = true } -sc-network-sync = { workspace = true, default-features = true } -sc-network-transactions = { workspace = true, default-features = true } -sc-network-types = { workspace = true, default-features = true } -sc-rpc = { workspace = true, default-features = true } -sc-rpc-server = { workspace = true, default-features = true } -sc-rpc-spec-v2 = { workspace = true, default-features = true } -sc-sysinfo = { workspace = true, default-features = true } -sc-telemetry = { workspace = true, default-features = true } -sc-tracing = { workspace = true, default-features = true } -sc-transaction-pool = { workspace = true, default-features = true } -sc-transaction-pool-api = { workspace = true, default-features = true } -sc-utils = { workspace = true, default-features = true } -schnellru = { workspace = true } serde = { workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } -sp-api = { workspace = true, default-features = true } +sc-keystore = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-trie = { workspace = true, default-features = true } +sp-externalities = { workspace = true, default-features = true } +sc-utils = { workspace = true, default-features = true } +sp-version = { workspace = true, default-features = true } sp-blockchain = { workspace = true, default-features = true } -sp-consensus = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } -sp-externalities = { workspace = true, default-features = true } sp-keystore = { workspace = true, default-features = true } -sp-runtime = { workspace = true, default-features = true } sp-session = { workspace = true, default-features = true } sp-state-machine = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sc-consensus = { workspace = true, default-features = true } sp-storage = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } +sc-network-common = { workspace = true, default-features = true } +sc-network-light = { workspace = true, default-features = true } +sc-network-sync = { workspace = true, default-features = true } +sc-network-types = { workspace = true, default-features = true } +sc-network-transactions = { workspace = true, default-features = true } +sc-chain-spec = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sc-client-db = { workspace = true } +codec = { workspace = true, default-features = true } +sc-executor = { workspace = true, default-features = true } +sc-transaction-pool = { workspace = true, default-features = true } sp-transaction-pool = { workspace = true, default-features = true } +sc-transaction-pool-api = { workspace = true, default-features = true } sp-transaction-storage-proof = { workspace = true, default-features = true } -sp-trie = { workspace = true, default-features = true } -sp-version = { workspace = true, default-features = true } -static_init = { workspace = true } -tempfile = { workspace = true } -thiserror = { workspace = true } -tokio = { features = ["parking_lot", "rt-multi-thread", "time"], workspace = true, default-features = true } +sc-rpc-server = { workspace = true, default-features = true } +sc-rpc = { workspace = true, default-features = true } +sc-rpc-spec-v2 = { workspace = true, default-features = true } +sc-informant = { workspace = true, default-features = true } +sc-telemetry = { workspace = true, default-features = true } +prometheus-endpoint = { workspace = true, default-features = true } +sc-tracing = { workspace = true, default-features = true } +sc-sysinfo = { workspace = true, default-features = true } tracing = { workspace = true, default-features = true } tracing-futures = { workspace = true } +async-trait = { workspace = true } +tokio = { features = ["parking_lot", "rt-multi-thread", "time"], workspace = true, default-features = true } +tempfile = { workspace = true } +directories = { workspace = true } +static_init = { workspace = true } +schnellru = { workspace = true } [dev-dependencies] -substrate-test-runtime = { workspace = true } substrate-test-runtime-client = { workspace = true } +substrate-test-runtime = { workspace = true } diff --git a/substrate/client/service/src/builder.rs b/substrate/client/service/src/builder.rs index a47a05c0a190..ce4ce7c08248 100644 --- a/substrate/client/service/src/builder.rs +++ b/substrate/client/service/src/builder.rs @@ -25,7 +25,7 @@ use crate::{ start_rpc_servers, BuildGenesisBlock, GenesisBlockBuilder, RpcHandlers, SpawnTaskHandle, TaskManager, TransactionPoolAdapter, }; -use futures::{select, FutureExt, StreamExt}; +use futures::{channel::oneshot, future::ready, FutureExt, StreamExt}; use jsonrpsee::RpcModule; use log::info; use prometheus_endpoint::Registry; @@ -90,11 +90,7 @@ use sp_consensus::block_validation::{ use sp_core::traits::{CodeExecutor, SpawnNamed}; use sp_keystore::KeystorePtr; use sp_runtime::traits::{Block as BlockT, BlockIdTo, NumberFor, Zero}; -use std::{ - str::FromStr, - sync::Arc, - time::{Duration, SystemTime}, -}; +use std::{str::FromStr, sync::Arc, time::SystemTime}; /// Full client type. pub type TFullClient = @@ -581,42 +577,22 @@ pub async fn propagate_transaction_notifications( Block: BlockT, ExPool: MaintainedTransactionPool::Hash>, { - const TELEMETRY_INTERVAL: Duration = Duration::from_secs(1); - // transaction notifications - let mut notifications = transaction_pool.import_notification_stream().fuse(); - let mut timer = futures_timer::Delay::new(TELEMETRY_INTERVAL).fuse(); - let mut tx_imported = false; - - loop { - select! { - notification = notifications.next() => { - let Some(hash) = notification else { return }; - - tx_handler_controller.propagate_transaction(hash); - - tx_imported = true; - }, - _ = timer => { - timer = futures_timer::Delay::new(TELEMETRY_INTERVAL).fuse(); - - if !tx_imported { - continue; - } - - tx_imported = false; - let status = transaction_pool.status(); - - telemetry!( - telemetry; - SUBSTRATE_INFO; - "txpool.import"; - "ready" => status.ready, - "future" => status.future, - ); - } - } - } + transaction_pool + .import_notification_stream() + .for_each(move |hash| { + tx_handler_controller.propagate_transaction(hash); + let status = transaction_pool.status(); + telemetry!( + telemetry; + SUBSTRATE_INFO; + "txpool.import"; + "ready" => status.ready, + "future" => status.future, + ); + ready(()) + }) + .await; } /// Initialize telemetry with provided configuration and return telemetry handle @@ -755,7 +731,8 @@ where client.clone(), backend.clone(), genesis_hash, - task_executor.clone(), + // Defaults to sensible limits for the `Archive`. + sc_rpc_spec_v2::archive::ArchiveConfig::default(), ) .into_rpc(); rpc_api.merge(archive_v2).map_err(|e| Error::Application(e.into()))?; @@ -845,6 +822,7 @@ pub fn build_network( Arc, TracingUnboundedSender>, sc_network_transactions::TransactionsHandlerController<::Hash>, + NetworkStarter, Arc>, ), Error, @@ -1006,6 +984,7 @@ pub fn build_network_advanced( Arc, TracingUnboundedSender>, sc_network_transactions::TransactionsHandlerController<::Hash>, + NetworkStarter, Arc>, ), Error, @@ -1146,6 +1125,22 @@ where announce_block, ); + // TODO: Normally, one is supposed to pass a list of notifications protocols supported by the + // node through the `NetworkConfiguration` struct. But because this function doesn't know in + // advance which components, such as GrandPa or Polkadot, will be plugged on top of the + // service, it is unfortunately not possible to do so without some deep refactoring. To + // bypass this problem, the `NetworkService` provides a `register_notifications_protocol` + // method that can be called even after the network has been initialized. However, we want to + // avoid the situation where `register_notifications_protocol` is called *after* the network + // actually connects to other peers. For this reason, we delay the process of the network + // future until the user calls `NetworkStarter::start_network`. + // + // This entire hack should eventually be removed in favour of passing the list of protocols + // through the configuration. + // + // See also https://github.com/paritytech/substrate/issues/6827 + let (network_start_tx, network_start_rx) = oneshot::channel(); + // The network worker is responsible for gathering all network messages and processing // them. This is quite a heavy task, and at the time of the writing of this comment it // frequently happens that this future takes several seconds or in some situations @@ -1153,9 +1148,26 @@ where // issue, and ideally we would like to fix the network future to take as little time as // possible, but we also take the extra harm-prevention measure to execute the networking // future using `spawn_blocking`. - spawn_handle.spawn_blocking("network-worker", Some("networking"), future); + spawn_handle.spawn_blocking("network-worker", Some("networking"), async move { + if network_start_rx.await.is_err() { + log::warn!( + "The NetworkStart returned as part of `build_network` has been silently dropped" + ); + // This `return` might seem unnecessary, but we don't want to make it look like + // everything is working as normal even though the user is clearly misusing the API. + return + } - Ok((network, system_rpc_tx, tx_handler_controller, sync_service.clone())) + future.await + }); + + Ok(( + network, + system_rpc_tx, + tx_handler_controller, + NetworkStarter(network_start_tx), + sync_service.clone(), + )) } /// Configuration for [`build_default_syncing_engine`]. @@ -1384,3 +1396,21 @@ where warp_sync_protocol_name, )?)) } + +/// Object used to start the network. +#[must_use] +pub struct NetworkStarter(oneshot::Sender<()>); + +impl NetworkStarter { + /// Create a new NetworkStarter + pub fn new(sender: oneshot::Sender<()>) -> Self { + NetworkStarter(sender) + } + + /// Start the network. Call this after all sub-components have been initialized. + /// + /// > **Note**: If you don't call this function, the networking will not work. + pub fn start_network(self) { + let _ = self.0.send(()); + } +} diff --git a/substrate/client/service/src/client/client.rs b/substrate/client/service/src/client/client.rs index eddbb9260c05..ce5b92551bf2 100644 --- a/substrate/client/service/src/client/client.rs +++ b/substrate/client/service/src/client/client.rs @@ -85,8 +85,10 @@ use std::{ sync::Arc, }; -use super::call_executor::LocalCallExecutor; -use sp_core::traits::CodeExecutor; +#[cfg(feature = "test-helpers")] +use { + super::call_executor::LocalCallExecutor, sc_client_api::in_mem, sp_core::traits::CodeExecutor, +}; type NotificationSinks = Mutex>>; @@ -150,6 +152,39 @@ enum PrepareStorageChangesResult { Discard(ImportResult), Import(Option>), } + +/// Create an instance of in-memory client. +#[cfg(feature = "test-helpers")] +pub fn new_in_mem( + backend: Arc>, + executor: E, + genesis_block_builder: G, + prometheus_registry: Option, + telemetry: Option, + spawn_handle: Box, + config: ClientConfig, +) -> sp_blockchain::Result< + Client, LocalCallExecutor, E>, Block, RA>, +> +where + E: CodeExecutor + sc_executor::RuntimeVersionOf, + Block: BlockT, + G: BuildGenesisBlock< + Block, + BlockImportOperation = as backend::Backend>::BlockImportOperation, + >, +{ + new_with_backend( + backend, + executor, + genesis_block_builder, + spawn_handle, + prometheus_registry, + telemetry, + config, + ) +} + /// Client configuration items. #[derive(Debug, Clone)] pub struct ClientConfig { @@ -183,6 +218,7 @@ impl Default for ClientConfig { /// Create a client with the explicitly provided backend. /// This is useful for testing backend implementations. +#[cfg(feature = "test-helpers")] pub fn new_with_backend( backend: Arc, executor: E, diff --git a/substrate/client/service/src/client/mod.rs b/substrate/client/service/src/client/mod.rs index 3020b3d296f4..ec77a92f162f 100644 --- a/substrate/client/service/src/client/mod.rs +++ b/substrate/client/service/src/client/mod.rs @@ -56,4 +56,5 @@ pub use call_executor::LocalCallExecutor; pub use client::{Client, ClientConfig}; pub(crate) use code_provider::CodeProvider; -pub use self::client::new_with_backend; +#[cfg(feature = "test-helpers")] +pub use self::client::{new_in_mem, new_with_backend}; diff --git a/substrate/client/service/src/lib.rs b/substrate/client/service/src/lib.rs index 2a3144a33e1a..ee4f4e7622e7 100644 --- a/substrate/client/service/src/lib.rs +++ b/substrate/client/service/src/lib.rs @@ -23,11 +23,14 @@ #![recursion_limit = "1024"] pub mod chain_ops; -pub mod client; pub mod config; pub mod error; mod builder; +#[cfg(feature = "test-helpers")] +pub mod client; +#[cfg(not(feature = "test-helpers"))] +mod client; mod metrics; mod task_manager; @@ -61,8 +64,8 @@ pub use self::{ new_client, new_db_backend, new_full_client, new_full_parts, new_full_parts_record_import, new_full_parts_with_genesis_builder, new_wasm_executor, propagate_transaction_notifications, spawn_tasks, BuildNetworkAdvancedParams, - BuildNetworkParams, DefaultSyncingEngineConfig, KeystoreContainer, SpawnTasksParams, - TFullBackend, TFullCallExecutor, TFullClient, + BuildNetworkParams, DefaultSyncingEngineConfig, KeystoreContainer, NetworkStarter, + SpawnTasksParams, TFullBackend, TFullCallExecutor, TFullClient, }, client::{ClientConfig, LocalCallExecutor}, error::Error, @@ -525,17 +528,13 @@ where }; let start = std::time::Instant::now(); - let pool = self.pool.clone(); - let client = self.client.clone(); + let import_future = self.pool.submit_one( + self.client.info().best_hash, + sc_transaction_pool_api::TransactionSource::External, + uxt, + ); Box::pin(async move { - match pool - .submit_one( - client.info().best_hash, - sc_transaction_pool_api::TransactionSource::External, - uxt, - ) - .await - { + match import_future.await { Ok(_) => { let elapsed = start.elapsed(); debug!(target: sc_transaction_pool::LOG_TARGET, "import transaction: {elapsed:?}"); @@ -596,8 +595,8 @@ mod tests { let transaction = Transfer { amount: 5, nonce: 0, - from: Sr25519Keyring::Alice.into(), - to: Sr25519Keyring::Bob.into(), + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Bob.into(), } .into_unchecked_extrinsic(); block_on(pool.submit_one(best.hash(), source, transaction.clone())).unwrap(); diff --git a/substrate/client/service/test/Cargo.toml b/substrate/client/service/test/Cargo.toml index 45b2d8c5eea3..0edfc5b19314 100644 --- a/substrate/client/service/test/Cargo.toml +++ b/substrate/client/service/test/Cargo.toml @@ -15,13 +15,15 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -array-bytes = { workspace = true, default-features = true } async-channel = { workspace = true } -codec = { workspace = true, default-features = true } +array-bytes = { workspace = true, default-features = true } fdlimit = { workspace = true } futures = { workspace = true } log = { workspace = true, default-features = true } +codec = { workspace = true, default-features = true } parking_lot = { workspace = true, default-features = true } +tempfile = { workspace = true } +tokio = { features = ["time"], workspace = true, default-features = true } sc-block-builder = { workspace = true, default-features = true } sc-client-api = { workspace = true, default-features = true } sc-client-db = { workspace = true } @@ -29,19 +31,17 @@ sc-consensus = { workspace = true, default-features = true } sc-executor = { workspace = true, default-features = true } sc-network = { workspace = true, default-features = true } sc-network-sync = { workspace = true, default-features = true } -sc-service = { workspace = true, default-features = true } +sc-service = { features = ["test-helpers"], workspace = true, default-features = true } sc-transaction-pool-api = { workspace = true, default-features = true } sp-api = { workspace = true, default-features = true } sp-blockchain = { workspace = true, default-features = true } sp-consensus = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } -sp-io = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } sp-state-machine = { workspace = true, default-features = true } sp-storage = { workspace = true, default-features = true } sp-tracing = { workspace = true, default-features = true } sp-trie = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } substrate-test-runtime = { workspace = true } substrate-test-runtime-client = { workspace = true } -tempfile = { workspace = true } -tokio = { features = ["time"], workspace = true, default-features = true } diff --git a/substrate/client/service/test/src/client/mod.rs b/substrate/client/service/test/src/client/mod.rs index ef5de93d64ca..55bbfcdd8594 100644 --- a/substrate/client/service/test/src/client/mod.rs +++ b/substrate/client/service/test/src/client/mod.rs @@ -29,7 +29,7 @@ use sc_consensus::{ BlockCheckParams, BlockImport, BlockImportParams, ForkChoiceStrategy, ImportResult, }; use sc_executor::WasmExecutor; -use sc_service::client::{new_with_backend, Client, LocalCallExecutor}; +use sc_service::client::{new_in_mem, Client, LocalCallExecutor}; use sp_api::ProvideRuntimeApi; use sp_consensus::{BlockOrigin, Error as ConsensusError, SelectChain}; use sp_core::{testing::TaskExecutor, traits::CallContext, H256}; @@ -48,8 +48,8 @@ use substrate_test_runtime_client::{ genesismap::{insert_genesis_block, GenesisStorageBuilder}, Block, BlockNumber, Digest, Hash, Header, RuntimeApi, Transfer, }, - BlockBuilderExt, ClientBlockImportExt, ClientExt, DefaultTestClientBuilderExt, Sr25519Keyring, - TestClientBuilder, TestClientBuilderExt, + AccountKeyring, BlockBuilderExt, ClientBlockImportExt, ClientExt, DefaultTestClientBuilderExt, + Sr25519Keyring, TestClientBuilder, TestClientBuilderExt, }; mod db; @@ -126,8 +126,8 @@ fn block1(genesis_hash: Hash, backend: &InMemoryBackend) -> Vec 1, genesis_hash, vec![Transfer { - from: Sr25519Keyring::One.into(), - to: Sr25519Keyring::Two.into(), + from: AccountKeyring::One.into(), + to: AccountKeyring::Two.into(), amount: 69 * DOLLARS, nonce: 0, }], @@ -158,7 +158,7 @@ fn finality_notification_check( fn construct_genesis_should_work_with_native() { let mut storage = GenesisStorageBuilder::new( vec![Sr25519Keyring::One.public().into(), Sr25519Keyring::Two.public().into()], - vec![Sr25519Keyring::One.into(), Sr25519Keyring::Two.into()], + vec![AccountKeyring::One.into(), AccountKeyring::Two.into()], 1000 * DOLLARS, ) .build(); @@ -189,7 +189,7 @@ fn construct_genesis_should_work_with_native() { fn construct_genesis_should_work_with_wasm() { let mut storage = GenesisStorageBuilder::new( vec![Sr25519Keyring::One.public().into(), Sr25519Keyring::Two.public().into()], - vec![Sr25519Keyring::One.into(), Sr25519Keyring::Two.into()], + vec![AccountKeyring::One.into(), AccountKeyring::Two.into()], 1000 * DOLLARS, ) .build(); @@ -223,14 +223,14 @@ fn client_initializes_from_genesis_ok() { assert_eq!( client .runtime_api() - .balance_of(client.chain_info().best_hash, Sr25519Keyring::Alice.into()) + .balance_of(client.chain_info().best_hash, AccountKeyring::Alice.into()) .unwrap(), 1000 * DOLLARS ); assert_eq!( client .runtime_api() - .balance_of(client.chain_info().best_hash, Sr25519Keyring::Ferdie.into()) + .balance_of(client.chain_info().best_hash, AccountKeyring::Ferdie.into()) .unwrap(), 0 * DOLLARS ); @@ -266,8 +266,8 @@ fn block_builder_works_with_transactions() { builder .push_transfer(Transfer { - from: Sr25519Keyring::Alice.into(), - to: Sr25519Keyring::Ferdie.into(), + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), amount: 42 * DOLLARS, nonce: 0, }) @@ -301,14 +301,14 @@ fn block_builder_works_with_transactions() { assert_eq!( client .runtime_api() - .balance_of(client.chain_info().best_hash, Sr25519Keyring::Alice.into()) + .balance_of(client.chain_info().best_hash, AccountKeyring::Alice.into()) .unwrap(), 958 * DOLLARS ); assert_eq!( client .runtime_api() - .balance_of(client.chain_info().best_hash, Sr25519Keyring::Ferdie.into()) + .balance_of(client.chain_info().best_hash, AccountKeyring::Ferdie.into()) .unwrap(), 42 * DOLLARS ); @@ -325,8 +325,8 @@ fn block_builder_does_not_include_invalid() { builder .push_transfer(Transfer { - from: Sr25519Keyring::Alice.into(), - to: Sr25519Keyring::Ferdie.into(), + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), amount: 42 * DOLLARS, nonce: 0, }) @@ -334,8 +334,8 @@ fn block_builder_does_not_include_invalid() { assert!(builder .push_transfer(Transfer { - from: Sr25519Keyring::Alice.into(), - to: Sr25519Keyring::Ferdie.into(), + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), amount: 30 * DOLLARS, nonce: 0, }) @@ -491,8 +491,8 @@ fn uncles_with_multiple_forks() { // this push is required as otherwise B2 has the same hash as A2 and won't get imported builder .push_transfer(Transfer { - from: Sr25519Keyring::Alice.into(), - to: Sr25519Keyring::Ferdie.into(), + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), amount: 41 * DOLLARS, nonce: 0, }) @@ -531,8 +531,8 @@ fn uncles_with_multiple_forks() { // this push is required as otherwise C3 has the same hash as B3 and won't get imported builder .push_transfer(Transfer { - from: Sr25519Keyring::Alice.into(), - to: Sr25519Keyring::Ferdie.into(), + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), amount: 1 * DOLLARS, nonce: 1, }) @@ -549,8 +549,8 @@ fn uncles_with_multiple_forks() { // this push is required as otherwise D2 has the same hash as B2 and won't get imported builder .push_transfer(Transfer { - from: Sr25519Keyring::Alice.into(), - to: Sr25519Keyring::Ferdie.into(), + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), amount: 1 * DOLLARS, nonce: 0, }) @@ -691,8 +691,8 @@ fn finality_target_on_longest_chain_with_multiple_forks() { // this push is required as otherwise B2 has the same hash as A2 and won't get imported builder .push_transfer(Transfer { - from: Sr25519Keyring::Alice.into(), - to: Sr25519Keyring::Ferdie.into(), + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), amount: 41 * DOLLARS, nonce: 0, }) @@ -732,8 +732,8 @@ fn finality_target_on_longest_chain_with_multiple_forks() { // this push is required as otherwise C3 has the same hash as B3 and won't get imported builder .push_transfer(Transfer { - from: Sr25519Keyring::Alice.into(), - to: Sr25519Keyring::Ferdie.into(), + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), amount: 1 * DOLLARS, nonce: 1, }) @@ -751,8 +751,8 @@ fn finality_target_on_longest_chain_with_multiple_forks() { // this push is required as otherwise D2 has the same hash as B2 and won't get imported builder .push_transfer(Transfer { - from: Sr25519Keyring::Alice.into(), - to: Sr25519Keyring::Ferdie.into(), + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), amount: 1 * DOLLARS, nonce: 0, }) @@ -982,8 +982,8 @@ fn finality_target_with_best_not_on_longest_chain() { // this push is required as otherwise B2 has the same hash as A2 and won't get imported builder .push_transfer(Transfer { - from: Sr25519Keyring::Alice.into(), - to: Sr25519Keyring::Ferdie.into(), + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), amount: 41 * DOLLARS, nonce: 0, }) @@ -1134,8 +1134,8 @@ fn importing_diverged_finalized_block_should_trigger_reorg() { .unwrap(); // needed to make sure B1 gets a different hash from A1 b1.push_transfer(Transfer { - from: Sr25519Keyring::Alice.into(), - to: Sr25519Keyring::Ferdie.into(), + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), amount: 1 * DOLLARS, nonce: 0, }) @@ -1195,8 +1195,8 @@ fn finalizing_diverged_block_should_trigger_reorg() { .unwrap(); // needed to make sure B1 gets a different hash from A1 b1.push_transfer(Transfer { - from: Sr25519Keyring::Alice.into(), - to: Sr25519Keyring::Ferdie.into(), + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), amount: 1 * DOLLARS, nonce: 0, }) @@ -1303,8 +1303,8 @@ fn finality_notifications_content() { .unwrap(); // needed to make sure B1 gets a different hash from A1 b1.push_transfer(Transfer { - from: Sr25519Keyring::Alice.into(), - to: Sr25519Keyring::Ferdie.into(), + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), amount: 1, nonce: 0, }) @@ -1329,8 +1329,8 @@ fn finality_notifications_content() { .unwrap(); // needed to make sure B1 gets a different hash from A1 c1.push_transfer(Transfer { - from: Sr25519Keyring::Alice.into(), - to: Sr25519Keyring::Ferdie.into(), + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), amount: 2 * DOLLARS, nonce: 0, }) @@ -1346,8 +1346,8 @@ fn finality_notifications_content() { // needed to make sure D3 gets a different hash from A3 d3.push_transfer(Transfer { - from: Sr25519Keyring::Alice.into(), - to: Sr25519Keyring::Ferdie.into(), + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), amount: 2 * DOLLARS, nonce: 0, }) @@ -1415,7 +1415,7 @@ fn state_reverted_on_reorg() { let current_balance = |client: &substrate_test_runtime_client::TestClient| { client .runtime_api() - .balance_of(client.chain_info().best_hash, Sr25519Keyring::Alice.into()) + .balance_of(client.chain_info().best_hash, AccountKeyring::Alice.into()) .unwrap() }; @@ -1428,8 +1428,8 @@ fn state_reverted_on_reorg() { .build() .unwrap(); a1.push_transfer(Transfer { - from: Sr25519Keyring::Alice.into(), - to: Sr25519Keyring::Bob.into(), + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Bob.into(), amount: 10 * DOLLARS, nonce: 0, }) @@ -1443,8 +1443,8 @@ fn state_reverted_on_reorg() { .build() .unwrap(); b1.push_transfer(Transfer { - from: Sr25519Keyring::Alice.into(), - to: Sr25519Keyring::Ferdie.into(), + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), amount: 50 * DOLLARS, nonce: 0, }) @@ -1460,8 +1460,8 @@ fn state_reverted_on_reorg() { .build() .unwrap(); a2.push_transfer(Transfer { - from: Sr25519Keyring::Alice.into(), - to: Sr25519Keyring::Charlie.into(), + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Charlie.into(), amount: 10 * DOLLARS, nonce: 1, }) @@ -1530,8 +1530,8 @@ fn doesnt_import_blocks_that_revert_finality() { // needed to make sure B1 gets a different hash from A1 b1.push_transfer(Transfer { - from: Sr25519Keyring::Alice.into(), - to: Sr25519Keyring::Ferdie.into(), + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), amount: 1 * DOLLARS, nonce: 0, }) @@ -1580,8 +1580,8 @@ fn doesnt_import_blocks_that_revert_finality() { // needed to make sure C1 gets a different hash from A1 and B1 c1.push_transfer(Transfer { - from: Sr25519Keyring::Alice.into(), - to: Sr25519Keyring::Ferdie.into(), + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), amount: 2 * DOLLARS, nonce: 0, }) @@ -1788,8 +1788,8 @@ fn returns_status_for_pruned_blocks() { // b1 is created, but not imported b1.push_transfer(Transfer { - from: Sr25519Keyring::Alice.into(), - to: Sr25519Keyring::Ferdie.into(), + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), amount: 1 * DOLLARS, nonce: 0, }) @@ -2087,13 +2087,13 @@ fn cleans_up_closed_notification_sinks_on_block_import() { // NOTE: we need to build the client here instead of using the client // provided by test_runtime_client otherwise we can't access the private // `import_notification_sinks` and `finality_notification_sinks` fields. - let mut client = new_with_backend::<_, _, Block, _, RuntimeApi>( + let mut client = new_in_mem::<_, Block, _, RuntimeApi>( backend, executor, genesis_block_builder, - Box::new(TaskExecutor::new()), None, None, + Box::new(TaskExecutor::new()), client_config, ) .unwrap(); @@ -2191,8 +2191,8 @@ fn reorg_triggers_a_notification_even_for_sources_that_should_not_trigger_notifi // needed to make sure B1 gets a different hash from A1 b1.push_transfer(Transfer { - from: Sr25519Keyring::Alice.into(), - to: Sr25519Keyring::Ferdie.into(), + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), amount: 1 * DOLLARS, nonce: 0, }) diff --git a/substrate/client/statement-store/Cargo.toml b/substrate/client/statement-store/Cargo.toml index c0219b294ced..e5087eae6eca 100644 --- a/substrate/client/statement-store/Cargo.toml +++ b/substrate/client/statement-store/Cargo.toml @@ -17,18 +17,18 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] log = { workspace = true, default-features = true } -parity-db = { workspace = true } parking_lot = { workspace = true, default-features = true } +parity-db = { workspace = true } +tokio = { features = ["time"], workspace = true, default-features = true } +sp-statement-store = { workspace = true, default-features = true } prometheus-endpoint = { workspace = true, default-features = true } -sc-client-api = { workspace = true, default-features = true } -sc-keystore = { workspace = true, default-features = true } sp-api = { workspace = true, default-features = true } sp-blockchain = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } -sp-statement-store = { workspace = true, default-features = true } -tokio = { features = ["time"], workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sc-keystore = { workspace = true, default-features = true } [dev-dependencies] -sp-tracing = { workspace = true } tempfile = { workspace = true } +sp-tracing = { workspace = true } diff --git a/substrate/client/storage-monitor/Cargo.toml b/substrate/client/storage-monitor/Cargo.toml index 3d8cb72b1a92..c017184ced66 100644 --- a/substrate/client/storage-monitor/Cargo.toml +++ b/substrate/client/storage-monitor/Cargo.toml @@ -13,8 +13,8 @@ workspace = true [dependencies] clap = { features = ["derive", "string"], workspace = true } -fs4 = { workspace = true } log = { workspace = true, default-features = true } +fs4 = { workspace = true } sp-core = { workspace = true, default-features = true } -thiserror = { workspace = true } tokio = { features = ["time"], workspace = true, default-features = true } +thiserror = { workspace = true } diff --git a/substrate/client/sync-state-rpc/Cargo.toml b/substrate/client/sync-state-rpc/Cargo.toml index 91c30f5aa2cc..cbab8f4d7b0d 100644 --- a/substrate/client/sync-state-rpc/Cargo.toml +++ b/substrate/client/sync-state-rpc/Cargo.toml @@ -17,13 +17,13 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { workspace = true, default-features = true } jsonrpsee = { features = ["client-core", "macros", "server-core"], workspace = true } +serde = { features = ["derive"], workspace = true, default-features = true } +serde_json = { workspace = true, default-features = true } +thiserror = { workspace = true } sc-chain-spec = { workspace = true, default-features = true } sc-client-api = { workspace = true, default-features = true } sc-consensus-babe = { workspace = true, default-features = true } sc-consensus-epochs = { workspace = true, default-features = true } sc-consensus-grandpa = { workspace = true, default-features = true } -serde = { features = ["derive"], workspace = true, default-features = true } -serde_json = { workspace = true, default-features = true } sp-blockchain = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } -thiserror = { workspace = true } diff --git a/substrate/client/sysinfo/Cargo.toml b/substrate/client/sysinfo/Cargo.toml index c7eed77eda7f..190e6e279b90 100644 --- a/substrate/client/sysinfo/Cargo.toml +++ b/substrate/client/sysinfo/Cargo.toml @@ -17,16 +17,16 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -derive_more = { workspace = true, default-features = true } futures = { workspace = true } libc = { workspace = true } log = { workspace = true, default-features = true } rand = { workspace = true, default-features = true } rand_pcg = { workspace = true } +derive_more = { workspace = true, default-features = true } regex = { workspace = true } -sc-telemetry = { workspace = true, default-features = true } serde = { features = ["derive"], workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } +sc-telemetry = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } sp-crypto-hashing = { workspace = true, default-features = true } sp-io = { workspace = true, default-features = true } diff --git a/substrate/client/telemetry/Cargo.toml b/substrate/client/telemetry/Cargo.toml index 4a41a6b6deca..f87e8b66f731 100644 --- a/substrate/client/telemetry/Cargo.toml +++ b/substrate/client/telemetry/Cargo.toml @@ -19,13 +19,13 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] chrono = { workspace = true } futures = { workspace = true } -libp2p = { features = ["dns", "tcp", "tokio", "websocket"], workspace = true } +libp2p = { features = ["dns", "tcp", "tokio", "wasm-ext", "websocket"], workspace = true } log = { workspace = true, default-features = true } parking_lot = { workspace = true, default-features = true } pin-project = { workspace = true } -rand = { workspace = true, default-features = true } -sc-network = { workspace = true, default-features = true } sc-utils = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } +rand = { workspace = true, default-features = true } serde = { features = ["derive"], workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } thiserror = { workspace = true } diff --git a/substrate/client/telemetry/src/node.rs b/substrate/client/telemetry/src/node.rs index 2c8d424c4340..0bbdbfb622ef 100644 --- a/substrate/client/telemetry/src/node.rs +++ b/substrate/client/telemetry/src/node.rs @@ -18,13 +18,7 @@ use crate::TelemetryPayload; use futures::{channel::mpsc, prelude::*}; -use libp2p::{ - core::{ - transport::{DialOpts, PortUse, Transport}, - Endpoint, - }, - Multiaddr, -}; +use libp2p::{core::transport::Transport, Multiaddr}; use rand::Rng as _; use std::{ fmt, mem, @@ -235,10 +229,7 @@ where }, NodeSocket::ReconnectNow => { let addr = self.addr.clone(); - match self - .transport - .dial(addr, DialOpts { role: Endpoint::Dialer, port_use: PortUse::New }) - { + match self.transport.dial(addr) { Ok(d) => { log::trace!(target: "telemetry", "Re-dialing {}", self.addr); socket = NodeSocket::Dialing(d); diff --git a/substrate/client/tracing/Cargo.toml b/substrate/client/tracing/Cargo.toml index 949f6f6018ad..b8f5e40caf83 100644 --- a/substrate/client/tracing/Cargo.toml +++ b/substrate/client/tracing/Cargo.toml @@ -16,23 +16,15 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -chrono = { workspace = true } -codec = { workspace = true, default-features = true } console = { workspace = true } is-terminal = { workspace = true } +chrono = { workspace = true } +codec = { workspace = true, default-features = true } libc = { workspace = true } log = { workspace = true, default-features = true } parking_lot = { workspace = true, default-features = true } rustc-hash = { workspace = true } -sc-client-api = { workspace = true, default-features = true } -sc-tracing-proc-macro = { workspace = true, default-features = true } serde = { workspace = true, default-features = true } -sp-api = { workspace = true, default-features = true } -sp-blockchain = { workspace = true, default-features = true } -sp-core = { workspace = true, default-features = true } -sp-rpc = { workspace = true, default-features = true } -sp-runtime = { workspace = true, default-features = true } -sp-tracing = { workspace = true, default-features = true } thiserror = { workspace = true } tracing = { workspace = true, default-features = true } tracing-log = { workspace = true } @@ -40,6 +32,14 @@ tracing-subscriber = { workspace = true, features = [ "env-filter", "parking_lot", ] } +sc-client-api = { workspace = true, default-features = true } +sc-tracing-proc-macro = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-rpc = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } [dev-dependencies] criterion = { workspace = true, default-features = true } diff --git a/substrate/client/tracing/src/logging/directives.rs b/substrate/client/tracing/src/logging/directives.rs index 811511bb20f5..a99e9c4c8909 100644 --- a/substrate/client/tracing/src/logging/directives.rs +++ b/substrate/client/tracing/src/logging/directives.rs @@ -40,7 +40,7 @@ pub(crate) fn add_default_directives(directives: &str) { add_directives(directives); } -/// Add directives to current directives. +/// Add directives to current directives pub fn add_directives(directives: &str) { CURRENT_DIRECTIVES .get_or_init(|| Mutex::new(Vec::new())) @@ -48,11 +48,6 @@ pub fn add_directives(directives: &str) { .push(directives.to_owned()); } -/// Returns the current directives. -pub fn get_directives() -> Vec { - CURRENT_DIRECTIVES.get_or_init(|| Mutex::new(Vec::new())).lock().clone() -} - /// Parse `Directive` and add to default directives if successful. /// /// Ensures the supplied directive will be restored when resetting the log filter. diff --git a/substrate/client/transaction-pool/Cargo.toml b/substrate/client/transaction-pool/Cargo.toml index 72586b984920..d346add93a64 100644 --- a/substrate/client/transaction-pool/Cargo.toml +++ b/substrate/client/transaction-pool/Cargo.toml @@ -25,11 +25,12 @@ itertools = { workspace = true } linked-hash-map = { workspace = true } log = { workspace = true, default-features = true } parking_lot = { workspace = true, default-features = true } +serde = { features = ["derive"], workspace = true, default-features = true } +thiserror = { workspace = true } prometheus-endpoint = { workspace = true, default-features = true } sc-client-api = { workspace = true, default-features = true } sc-transaction-pool-api = { workspace = true, default-features = true } sc-utils = { workspace = true, default-features = true } -serde = { features = ["derive"], workspace = true, default-features = true } sp-api = { workspace = true, default-features = true } sp-blockchain = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } @@ -37,9 +38,8 @@ sp-crypto-hashing = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } sp-tracing = { workspace = true, default-features = true } sp-transaction-pool = { workspace = true, default-features = true } -thiserror = { workspace = true } -tokio = { workspace = true, default-features = true, features = ["macros", "time"] } tokio-stream = { workspace = true } +tokio = { workspace = true, default-features = true, features = ["macros", "time"] } [dev-dependencies] array-bytes = { workspace = true, default-features = true } diff --git a/substrate/client/transaction-pool/api/Cargo.toml b/substrate/client/transaction-pool/api/Cargo.toml index 6671492a4e92..c55ee70b2cf5 100644 --- a/substrate/client/transaction-pool/api/Cargo.toml +++ b/substrate/client/transaction-pool/api/Cargo.toml @@ -17,10 +17,10 @@ codec = { workspace = true, default-features = true } futures = { workspace = true } log = { workspace = true, default-features = true } serde = { features = ["derive"], workspace = true, default-features = true } +thiserror = { workspace = true } sp-blockchain = { workspace = true, default-features = true } sp-core = { workspace = true } sp-runtime = { workspace = true } -thiserror = { workspace = true } [dev-dependencies] serde_json = { workspace = true, default-features = true } diff --git a/substrate/client/transaction-pool/api/src/lib.rs b/substrate/client/transaction-pool/api/src/lib.rs index 6f771e9479bd..3ac1a79a0c28 100644 --- a/substrate/client/transaction-pool/api/src/lib.rs +++ b/substrate/client/transaction-pool/api/src/lib.rs @@ -23,7 +23,7 @@ pub mod error; use async_trait::async_trait; use codec::Codec; -use futures::Stream; +use futures::{Future, Stream}; use serde::{de::DeserializeOwned, Deserialize, Serialize}; use sp_core::offchain::TransactionPoolExt; use sp_runtime::traits::{Block as BlockT, Member}; @@ -208,6 +208,9 @@ pub type LocalTransactionFor

= <

::Block as BlockT> /// Transaction's index within the block in which it was included. pub type TxIndex = usize; +/// Typical future type used in transaction pool api. +pub type PoolFuture = std::pin::Pin> + Send>>; + /// In-pool transaction interface. /// /// The pool is container of transactions that are implementing this trait. @@ -235,7 +238,6 @@ pub trait InPoolTransaction { } /// Transaction pool interface. -#[async_trait] pub trait TransactionPool: Send + Sync { /// Block type. type Block: BlockT; @@ -251,40 +253,46 @@ pub trait TransactionPool: Send + Sync { // *** RPC - /// Asynchronously imports a bunch of unverified transactions to the pool. - async fn submit_at( + /// Returns a future that imports a bunch of unverified transactions to the pool. + fn submit_at( &self, at: ::Hash, source: TransactionSource, xts: Vec>, - ) -> Result, Self::Error>>, Self::Error>; + ) -> PoolFuture, Self::Error>>, Self::Error>; - /// Asynchronously imports one unverified transaction to the pool. - async fn submit_one( + /// Returns a future that imports one unverified transaction to the pool. + fn submit_one( &self, at: ::Hash, source: TransactionSource, xt: TransactionFor, - ) -> Result, Self::Error>; + ) -> PoolFuture, Self::Error>; - /// Asynchronously imports a single transaction and starts to watch their progress in the + /// Returns a future that imports a single transaction and starts to watch their progress in the /// pool. - async fn submit_and_watch( + fn submit_and_watch( &self, at: ::Hash, source: TransactionSource, xt: TransactionFor, - ) -> Result>>, Self::Error>; + ) -> PoolFuture>>, Self::Error>; // *** Block production / Networking /// Get an iterator for ready transactions ordered by priority. /// - /// Guaranteed to resolve only when transaction pool got updated at `at` block. - /// Guaranteed to resolve immediately when `None` is passed. - async fn ready_at( + /// Guarantees to return only when transaction pool got updated at `at` block. + /// Guarantees to return immediately when `None` is passed. + fn ready_at( &self, at: ::Hash, - ) -> Box> + Send>; + ) -> Pin< + Box< + dyn Future< + Output = Box> + Send>, + > + Send, + >, + >; /// Get an iterator for ready transactions ordered by priority. fn ready(&self) -> Box> + Send>; @@ -314,15 +322,22 @@ pub trait TransactionPool: Send + Sync { /// Return specific ready transaction by hash, if there is one. fn ready_transaction(&self, hash: &TxHash) -> Option>; - /// Asynchronously returns a set of ready transaction at given block within given timeout. + /// Returns set of ready transaction at given block within given timeout. /// - /// If the timeout is hit during method execution, then the best effort (without executing full - /// maintain process) set of ready transactions for given block is returned. - async fn ready_at_with_timeout( + /// If the timeout is hit during method execution then the best effort set of ready transactions + /// for given block, without executing full maintain process is returned. + fn ready_at_with_timeout( &self, at: ::Hash, timeout: std::time::Duration, - ) -> Box> + Send>; + ) -> Pin< + Box< + dyn Future< + Output = Box> + Send>, + > + Send + + '_, + >, + >; } /// An iterator of ready transactions. diff --git a/substrate/client/transaction-pool/benches/basics.rs b/substrate/client/transaction-pool/benches/basics.rs index 5e40b0fb72d6..0d8c1cbba9b4 100644 --- a/substrate/client/transaction-pool/benches/basics.rs +++ b/substrate/client/transaction-pool/benches/basics.rs @@ -152,7 +152,7 @@ fn uxt(transfer: TransferData) -> Extrinsic { } fn bench_configured(pool: Pool, number: u64, api: Arc) { - let source = TimedTransactionSource::new_external(false); + let source = TransactionSource::External; let mut futures = Vec::new(); let mut tags = Vec::new(); let at = HashAndNumber { @@ -171,7 +171,7 @@ fn bench_configured(pool: Pool, number: u64, api: Arc) { tags.push(to_tag(nonce, AccountId::from_h256(H256::from_low_u64_be(1)))); - futures.push(pool.submit_one(&at, source.clone(), xt)); + futures.push(pool.submit_one(&at, source, xt)); } let res = block_on(futures::future::join_all(futures.into_iter())); diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs index 7679e3b169d2..2dd5836c570f 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs @@ -24,7 +24,7 @@ use crate::{ common::log_xt::log_xt_trace, fork_aware_txpool::stream_map_util::next_event, - graph::{self, BlockHash, ExtrinsicHash}, + graph::{BlockHash, ChainApi, ExtrinsicHash}, LOG_TARGET, }; use futures::stream::StreamExt; @@ -33,44 +33,12 @@ use sc_transaction_pool_api::TransactionStatus; use sc_utils::mpsc; use sp_runtime::traits::Block as BlockT; use std::{ - collections::{ - hash_map::{Entry, OccupiedEntry}, - HashMap, HashSet, - }, + collections::{hash_map::Entry, HashMap, HashSet}, fmt::{self, Debug, Formatter}, pin::Pin, }; use tokio_stream::StreamMap; -/// Represents a transaction that was removed from the transaction pool, including the reason of its -/// removal. -#[derive(Debug, PartialEq)] -pub struct DroppedTransaction { - /// Hash of the dropped extrinsic. - pub tx_hash: Hash, - /// Reason of the transaction being dropped. - pub reason: DroppedReason, -} - -impl DroppedTransaction { - fn new_usurped(tx_hash: Hash, by: Hash) -> Self { - Self { reason: DroppedReason::Usurped(by), tx_hash } - } - - fn new_enforced_by_limts(tx_hash: Hash) -> Self { - Self { reason: DroppedReason::LimitsEnforced, tx_hash } - } -} - -/// Provides reason of why transactions was dropped. -#[derive(Debug, PartialEq)] -pub enum DroppedReason { - /// Transaction was replaced by other transaction (e.g. because of higher priority). - Usurped(Hash), - /// Transaction was dropped because of internal pool limits being enforced. - LimitsEnforced, -} - /// Dropped-logic related event from the single view. pub type ViewStreamEvent = crate::graph::DroppedByLimitsEvent, BlockHash>; @@ -79,8 +47,7 @@ type ViewStream = Pin> + Se /// Stream of extrinsic hashes that were dropped by the views and have no references by existing /// views. -pub(crate) type StreamOfDropped = - Pin>> + Send>>; +pub(crate) type StreamOfDropped = Pin> + Send>>; /// A type alias for a sender used as the controller of the [`MultiViewDropWatcherContext`]. /// Used to send control commands from the [`MultiViewDroppedWatcherController`] to @@ -92,29 +59,38 @@ type Controller = mpsc::TracingUnboundedSender; type CommandReceiver = mpsc::TracingUnboundedReceiver; /// Commands to control the instance of dropped transactions stream [`StreamOfDropped`]. -enum Command +enum Command where - ChainApi: graph::ChainApi, + C: ChainApi, { /// Adds a new stream of dropped-related events originating in a view with a specific block /// hash - AddView(BlockHash, ViewStream), + AddView(BlockHash, ViewStream), /// Removes an existing view's stream associated with a specific block hash. - RemoveView(BlockHash), - /// Removes referencing views for given extrinsic hashes. + RemoveView(BlockHash), + /// Adds initial views for given extrinsics hashes. + /// + /// This message should be sent when the external submission of a transaction occures. It + /// provides the list of initial views for given extrinsics hashes. + /// The dropped notification is not sent if it comes from the initial views. It allows to keep + /// transaction in the mempool, even if all the views are full at the time of submitting + /// transaction to the pool. + AddInitialViews(Vec>, BlockHash), + /// Removes all initial views for given extrinsic hashes. /// /// Intended to ba called on finalization. - RemoveFinalizedTxs(Vec>), + RemoveFinalizedTxs(Vec>), } -impl Debug for Command +impl Debug for Command where - ChainApi: graph::ChainApi, + C: ChainApi, { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { match self { Command::AddView(..) => write!(f, "AddView"), Command::RemoveView(..) => write!(f, "RemoveView"), + Command::AddInitialViews(..) => write!(f, "AddInitialViews"), Command::RemoveFinalizedTxs(..) => write!(f, "RemoveFinalizedTxs"), } } @@ -125,114 +101,37 @@ where /// /// This struct maintains a mapping of active views and their corresponding streams, as well as the /// state of each transaction with respect to these views. -struct MultiViewDropWatcherContext +struct MultiViewDropWatcherContext where - ChainApi: graph::ChainApi, + C: ChainApi, { /// A map that associates the views identified by corresponding block hashes with their streams /// of dropped-related events. This map is used to keep track of active views and their event /// streams. - stream_map: StreamMap, ViewStream>, + stream_map: StreamMap, ViewStream>, /// A receiver for commands to control the state of the stream, allowing the addition and /// removal of views. This is used to dynamically update which views are being tracked. - command_receiver: CommandReceiver>, + command_receiver: CommandReceiver>, + /// For each transaction hash we keep the set of hashes representing the views that see this - /// transaction as ready or in_block. - /// - /// Even if all views referencing a ready transactions are removed, we still want to keep - /// transaction, there can be a fork which sees the transaction as ready. + /// transaction as ready or future. /// /// Once transaction is dropped, dropping view is removed from the set. - ready_transaction_views: HashMap, HashSet>>, - /// For each transaction hash we keep the set of hashes representing the views that see this - /// transaction as future. - /// - /// Once all views referencing a future transactions are removed, the future can be dropped. - /// - /// Once transaction is dropped, dropping view is removed from the set. - future_transaction_views: HashMap, HashSet>>, + transaction_states: HashMap, HashSet>>, - /// Transactions that need to be notified as dropped. - pending_dropped_transactions: Vec>, + /// The list of initial view for every extrinsic. + /// + /// Dropped notifications from initial views will be silenced. This allows to accept the + /// transaction into the mempool, even if all the views are full at the time of submitting new + /// transaction. + initial_views: HashMap, HashSet>>, } impl MultiViewDropWatcherContext where - C: graph::ChainApi + 'static, - <::Block as BlockT>::Hash: Unpin, + C: ChainApi + 'static, + <::Block as BlockT>::Hash: Unpin, { - /// Provides the ready or future `HashSet` containing views referencing given transaction. - fn transaction_views( - &mut self, - tx_hash: ExtrinsicHash, - ) -> Option, HashSet>>> { - if let Entry::Occupied(views_keeping_tx_valid) = self.ready_transaction_views.entry(tx_hash) - { - return Some(views_keeping_tx_valid) - } - if let Entry::Occupied(views_keeping_tx_valid) = - self.future_transaction_views.entry(tx_hash) - { - return Some(views_keeping_tx_valid) - } - None - } - - /// Processes the command and updates internal state accordingly. - fn handle_command(&mut self, cmd: Command) { - match cmd { - Command::AddView(key, stream) => { - trace!( - target: LOG_TARGET, - "dropped_watcher: Command::AddView {key:?} views:{:?}", - self.stream_map.keys().collect::>() - ); - self.stream_map.insert(key, stream); - }, - Command::RemoveView(key) => { - trace!( - target: LOG_TARGET, - "dropped_watcher: Command::RemoveView {key:?} views:{:?}", - self.stream_map.keys().collect::>() - ); - self.stream_map.remove(&key); - self.ready_transaction_views.iter_mut().for_each(|(tx_hash, views)| { - trace!( - target: LOG_TARGET, - "[{:?}] dropped_watcher: Command::RemoveView ready views: {:?}", - tx_hash, - views - ); - views.remove(&key); - }); - - self.future_transaction_views.iter_mut().for_each(|(tx_hash, views)| { - trace!( - target: LOG_TARGET, - "[{:?}] dropped_watcher: Command::RemoveView future views: {:?}", - tx_hash, - views - ); - views.remove(&key); - if views.is_empty() { - self.pending_dropped_transactions.push(*tx_hash); - } - }); - }, - Command::RemoveFinalizedTxs(xts) => { - log_xt_trace!( - target: LOG_TARGET, - xts.clone(), - "[{:?}] dropped_watcher: finalized xt removed" - ); - xts.iter().for_each(|xt| { - self.ready_transaction_views.remove(xt); - self.future_transaction_views.remove(xt); - }); - }, - } - } - /// Processes a `ViewStreamEvent` from a specific view and updates the internal state /// accordingly. /// @@ -242,69 +141,49 @@ where &mut self, block_hash: BlockHash, event: ViewStreamEvent, - ) -> Option>> { + ) -> Option> { trace!( target: LOG_TARGET, - "dropped_watcher: handle_event: event:{event:?} from:{block_hash:?} future_views:{:?} ready_views:{:?} stream_map views:{:?}, ", - self.future_transaction_views.get(&event.0), - self.ready_transaction_views.get(&event.0), + "dropped_watcher: handle_event: event:{:?} views:{:?}, ", + event, self.stream_map.keys().collect::>(), ); let (tx_hash, status) = event; match status { - TransactionStatus::Future => { - self.future_transaction_views.entry(tx_hash).or_default().insert(block_hash); - }, - TransactionStatus::Ready | TransactionStatus::InBlock(..) => { - // note: if future transaction was once seens as the ready we may want to treat it - // as ready transactions. Unreferenced future transactions are more likely to be - // removed when the last referencing view is removed then ready transactions. - // Transcaction seen as ready is likely quite close to be included in some - // future fork. - if let Some(mut views) = self.future_transaction_views.remove(&tx_hash) { - views.insert(block_hash); - self.ready_transaction_views.insert(tx_hash, views); - } else { - self.ready_transaction_views.entry(tx_hash).or_default().insert(block_hash); - } + TransactionStatus::Ready | TransactionStatus::Future => { + self.transaction_states.entry(tx_hash).or_default().insert(block_hash); }, - TransactionStatus::Dropped => { - if let Some(mut views_keeping_tx_valid) = self.transaction_views(tx_hash) { + TransactionStatus::Dropped | TransactionStatus::Usurped(_) => { + if let Entry::Occupied(mut views_keeping_tx_valid) = + self.transaction_states.entry(tx_hash) + { views_keeping_tx_valid.get_mut().remove(&block_hash); - if views_keeping_tx_valid.get().is_empty() { - return Some(DroppedTransaction::new_enforced_by_limts(tx_hash)) + if views_keeping_tx_valid.get().is_empty() || + views_keeping_tx_valid + .get() + .iter() + .all(|h| !self.stream_map.contains_key(h)) + { + return self + .initial_views + .get(&tx_hash) + .map(|list| !list.contains(&block_hash)) + .unwrap_or(true) + .then(|| { + debug!("[{:?}] dropped_watcher: removing tx", tx_hash); + tx_hash + }) } } else { debug!("[{:?}] dropped_watcher: removing (non-tracked) tx", tx_hash); - return Some(DroppedTransaction::new_enforced_by_limts(tx_hash)) + return Some(tx_hash) } }, - TransactionStatus::Usurped(by) => - return Some(DroppedTransaction::new_usurped(tx_hash, by)), _ => {}, }; None } - /// Gets pending dropped transactions if any. - fn get_pending_dropped_transaction(&mut self) -> Option>> { - while let Some(tx_hash) = self.pending_dropped_transactions.pop() { - // never drop transaction that was seen as ready. It may not have a referencing - // view now, but such fork can appear. - if self.ready_transaction_views.get(&tx_hash).is_some() { - continue - } - - if let Some(views) = self.future_transaction_views.get(&tx_hash) { - if views.is_empty() { - self.future_transaction_views.remove(&tx_hash); - return Some(DroppedTransaction::new_enforced_by_limts(tx_hash)) - } - } - } - None - } - /// Creates a new `StreamOfDropped` and its associated event stream controller. /// /// This method initializes the internal structures and unfolds the stream of dropped @@ -321,29 +200,47 @@ where let ctx = Self { stream_map: StreamMap::new(), command_receiver, - ready_transaction_views: Default::default(), - future_transaction_views: Default::default(), - pending_dropped_transactions: Default::default(), + transaction_states: Default::default(), + initial_views: Default::default(), }; let stream_map = futures::stream::unfold(ctx, |mut ctx| async move { loop { - if let Some(dropped) = ctx.get_pending_dropped_transaction() { - debug!("dropped_watcher: sending out (pending): {dropped:?}"); - return Some((dropped, ctx)); - } tokio::select! { biased; + cmd = ctx.command_receiver.next() => { + match cmd? { + Command::AddView(key,stream) => { + trace!(target: LOG_TARGET,"dropped_watcher: Command::AddView {key:?} views:{:?}",ctx.stream_map.keys().collect::>()); + ctx.stream_map.insert(key,stream); + }, + Command::RemoveView(key) => { + trace!(target: LOG_TARGET,"dropped_watcher: Command::RemoveView {key:?} views:{:?}",ctx.stream_map.keys().collect::>()); + ctx.stream_map.remove(&key); + }, + Command::AddInitialViews(xts,block_hash) => { + log_xt_trace!(target: LOG_TARGET, xts.clone(), "[{:?}] dropped_watcher: xt initial view added {block_hash:?}"); + xts.into_iter().for_each(|xt| { + ctx.initial_views.entry(xt).or_default().insert(block_hash); + }); + }, + Command::RemoveFinalizedTxs(xts) => { + log_xt_trace!(target: LOG_TARGET, xts.clone(), "[{:?}] dropped_watcher: finalized xt removed"); + xts.iter().for_each(|xt| { + ctx.initial_views.remove(xt); + ctx.transaction_states.remove(xt); + }); + + }, + } + }, + Some(event) = next_event(&mut ctx.stream_map) => { if let Some(dropped) = ctx.handle_event(event.0, event.1) { debug!("dropped_watcher: sending out: {dropped:?}"); return Some((dropped, ctx)); } - }, - cmd = ctx.command_receiver.next() => { - ctx.handle_command(cmd?); } - } } }) @@ -357,30 +254,30 @@ where /// /// This struct provides methods to add and remove streams associated with views to and from the /// stream. -pub struct MultiViewDroppedWatcherController { +pub struct MultiViewDroppedWatcherController { /// A controller allowing to update the state of the associated [`StreamOfDropped`]. - controller: Controller>, + controller: Controller>, } -impl Clone for MultiViewDroppedWatcherController { +impl Clone for MultiViewDroppedWatcherController { fn clone(&self) -> Self { Self { controller: self.controller.clone() } } } -impl MultiViewDroppedWatcherController +impl MultiViewDroppedWatcherController where - ChainApi: graph::ChainApi + 'static, - <::Block as BlockT>::Hash: Unpin, + C: ChainApi + 'static, + <::Block as BlockT>::Hash: Unpin, { /// Creates new [`StreamOfDropped`] and its controller. - pub fn new() -> (MultiViewDroppedWatcherController, StreamOfDropped) { - let (stream_map, ctrl) = MultiViewDropWatcherContext::::event_stream(); + pub fn new() -> (MultiViewDroppedWatcherController, StreamOfDropped) { + let (stream_map, ctrl) = MultiViewDropWatcherContext::::event_stream(); (Self { controller: ctrl }, stream_map.boxed()) } /// Notifies the [`StreamOfDropped`] that new view was created. - pub fn add_view(&self, key: BlockHash, view: ViewStream) { + pub fn add_view(&self, key: BlockHash, view: ViewStream) { let _ = self.controller.unbounded_send(Command::AddView(key, view)).map_err(|e| { trace!(target: LOG_TARGET, "dropped_watcher: add_view {key:?} send message failed: {e}"); }); @@ -388,22 +285,40 @@ where /// Notifies the [`StreamOfDropped`] that the view was destroyed and shall be removed the /// stream map. - pub fn remove_view(&self, key: BlockHash) { + pub fn remove_view(&self, key: BlockHash) { let _ = self.controller.unbounded_send(Command::RemoveView(key)).map_err(|e| { trace!(target: LOG_TARGET, "dropped_watcher: remove_view {key:?} send message failed: {e}"); }); } - /// Removes status info for finalized transactions. - pub fn remove_finalized_txs( + /// Adds the initial view for the given transactions hashes. + /// + /// This message should be called when the external submission of a transaction occures. It + /// provides the list of initial views for given extrinsics hashes. + /// + /// The dropped notification is not sent if it comes from the initial views. It allows to keep + /// transaction in the mempool, even if all the views are full at the time of submitting + /// transaction to the pool. + pub fn add_initial_views( &self, - xts: impl IntoIterator> + Clone, + xts: impl IntoIterator> + Clone, + block_hash: BlockHash, ) { + let _ = self + .controller + .unbounded_send(Command::AddInitialViews(xts.into_iter().collect(), block_hash)) + .map_err(|e| { + trace!(target: LOG_TARGET, "dropped_watcher: add_initial_views_ send message failed: {e}"); + }); + } + + /// Removes all initial views for finalized transactions. + pub fn remove_finalized_txs(&self, xts: impl IntoIterator> + Clone) { let _ = self .controller .unbounded_send(Command::RemoveFinalizedTxs(xts.into_iter().collect())) .map_err(|e| { - trace!(target: LOG_TARGET, "dropped_watcher: remove_finalized_txs send message failed: {e}"); + trace!(target: LOG_TARGET, "dropped_watcher: remove_initial_views send message failed: {e}"); }); } } @@ -433,7 +348,7 @@ mod dropped_watcher_tests { watcher.add_view(block_hash, view_stream); let handle = tokio::spawn(async move { output_stream.take(1).collect::>().await }); - assert_eq!(handle.await.unwrap(), vec![DroppedTransaction::new_enforced_by_limts(tx_hash)]); + assert_eq!(handle.await.unwrap(), vec![tx_hash]); } #[tokio::test] @@ -483,10 +398,7 @@ mod dropped_watcher_tests { watcher.add_view(block_hash0, view_stream0); watcher.add_view(block_hash1, view_stream1); let handle = tokio::spawn(async move { output_stream.take(1).collect::>().await }); - assert_eq!( - handle.await.unwrap(), - vec![DroppedTransaction::new_enforced_by_limts(tx_hash1)] - ); + assert_eq!(handle.await.unwrap(), vec![tx_hash1]); } #[tokio::test] @@ -511,11 +423,10 @@ mod dropped_watcher_tests { watcher.add_view(block_hash0, view_stream0); assert!(output_stream.next().now_or_never().is_none()); - watcher.remove_view(block_hash0); watcher.add_view(block_hash1, view_stream1); let handle = tokio::spawn(async move { output_stream.take(1).collect::>().await }); - assert_eq!(handle.await.unwrap(), vec![DroppedTransaction::new_enforced_by_limts(tx_hash)]); + assert_eq!(handle.await.unwrap(), vec![tx_hash]); } #[tokio::test] @@ -558,6 +469,65 @@ mod dropped_watcher_tests { let block_hash2 = H256::repeat_byte(0x03); watcher.add_view(block_hash2, view_stream2); let handle = tokio::spawn(async move { output_stream.take(1).collect::>().await }); - assert_eq!(handle.await.unwrap(), vec![DroppedTransaction::new_enforced_by_limts(tx_hash)]); + assert_eq!(handle.await.unwrap(), vec![tx_hash]); + } + + #[tokio::test] + async fn test06() { + sp_tracing::try_init_simple(); + let (watcher, mut output_stream) = MultiViewDroppedWatcher::new(); + assert!(output_stream.next().now_or_never().is_none()); + + let block_hash0 = H256::repeat_byte(0x01); + let block_hash1 = H256::repeat_byte(0x02); + let tx_hash = H256::repeat_byte(0x0b); + + let view_stream0 = futures::stream::iter(vec![ + (tx_hash, TransactionStatus::Future), + (tx_hash, TransactionStatus::InBlock((block_hash1, 0))), + ]) + .boxed(); + watcher.add_view(block_hash0, view_stream0); + assert!(output_stream.next().now_or_never().is_none()); + + let view_stream1 = futures::stream::iter(vec![ + (tx_hash, TransactionStatus::Ready), + (tx_hash, TransactionStatus::Dropped), + ]) + .boxed(); + + watcher.add_view(block_hash1, view_stream1); + watcher.add_initial_views(vec![tx_hash], block_hash1); + assert!(output_stream.next().now_or_never().is_none()); + } + + #[tokio::test] + async fn test07() { + sp_tracing::try_init_simple(); + let (watcher, mut output_stream) = MultiViewDroppedWatcher::new(); + assert!(output_stream.next().now_or_never().is_none()); + + let block_hash0 = H256::repeat_byte(0x01); + let block_hash1 = H256::repeat_byte(0x02); + let tx_hash = H256::repeat_byte(0x0b); + + let view_stream0 = futures::stream::iter(vec![ + (tx_hash, TransactionStatus::Future), + (tx_hash, TransactionStatus::InBlock((block_hash1, 0))), + ]) + .boxed(); + watcher.add_view(block_hash0, view_stream0); + watcher.add_initial_views(vec![tx_hash], block_hash0); + assert!(output_stream.next().now_or_never().is_none()); + + let view_stream1 = futures::stream::iter(vec![ + (tx_hash, TransactionStatus::Ready), + (tx_hash, TransactionStatus::Dropped), + ]) + .boxed(); + watcher.add_view(block_hash1, view_stream1); + + let handle = tokio::spawn(async move { output_stream.take(1).collect::>().await }); + assert_eq!(handle.await.unwrap(), vec![tx_hash]); } } diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs index 4ec87f1fefa4..7e72b44adf38 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs @@ -23,7 +23,7 @@ use super::{ import_notification_sink::MultiViewImportNotificationSink, metrics::MetricsLink as PrometheusMetrics, multi_view_listener::MultiViewListener, - tx_mem_pool::{InsertionInfo, TxInMemPool, TxMemPool, TXMEMPOOL_TRANSACTION_LIMIT_MULTIPLIER}, + tx_mem_pool::{TxInMemPool, TxMemPool, TXMEMPOOL_TRANSACTION_LIMIT_MULTIPLIER}, view::View, view_store::ViewStore, }; @@ -31,13 +31,9 @@ use crate::{ api::FullChainApi, common::log_xt::log_xt_trace, enactment_state::{EnactmentAction, EnactmentState}, - fork_aware_txpool::{dropped_watcher::DroppedReason, revalidation_worker}, - graph::{ - self, - base_pool::{TimedTransactionSource, Transaction}, - ExtrinsicFor, ExtrinsicHash, IsValidator, Options, - }, - ReadyIteratorFor, LOG_TARGET, + fork_aware_txpool::revalidation_worker, + graph::{self, base_pool::Transaction, ExtrinsicFor, ExtrinsicHash, IsValidator, Options}, + PolledIterator, ReadyIteratorFor, LOG_TARGET, }; use async_trait::async_trait; use futures::{ @@ -49,8 +45,9 @@ use futures::{ use parking_lot::Mutex; use prometheus_endpoint::Registry as PrometheusRegistry; use sc_transaction_pool_api::{ - ChainEvent, ImportNotificationStream, MaintainedTransactionPool, PoolStatus, TransactionFor, - TransactionPool, TransactionSource, TransactionStatusStreamFor, TxHash, + error::{Error, IntoPoolError}, + ChainEvent, ImportNotificationStream, MaintainedTransactionPool, PoolFuture, PoolStatus, + TransactionFor, TransactionPool, TransactionSource, TransactionStatusStreamFor, TxHash, }; use sp_blockchain::{HashAndNumber, TreeRoute}; use sp_core::traits::SpawnEssentialNamed; @@ -196,19 +193,13 @@ where listener.clone(), Default::default(), mempool_max_transactions_count, - ready_limits.total_bytes + future_limits.total_bytes, )); let (dropped_stream_controller, dropped_stream) = MultiViewDroppedWatcherController::::new(); - - let view_store = - Arc::new(ViewStore::new(pool_api.clone(), listener, dropped_stream_controller)); - let dropped_monitor_task = Self::dropped_monitor_task( dropped_stream, mempool.clone(), - view_store.clone(), import_notification_sink.clone(), ); @@ -225,8 +216,8 @@ where ( Self { mempool, - api: pool_api, - view_store, + api: pool_api.clone(), + view_store: Arc::new(ViewStore::new(pool_api, listener, dropped_stream_controller)), ready_poll: Arc::from(Mutex::from(ReadyPoll::new())), enactment_state: Arc::new(Mutex::new(EnactmentState::new( best_block_hash, @@ -242,17 +233,14 @@ where ) } - /// Monitors the stream of dropped transactions and removes them from the mempool and - /// view_store. + /// Monitors the stream of dropped transactions and removes them from the mempool. /// /// This asynchronous task continuously listens for dropped transaction notifications provided /// within `dropped_stream` and ensures that these transactions are removed from the `mempool` - /// and `import_notification_sink` instances. For Usurped events, the transaction is also - /// removed from the view_store. + /// and `import_notification_sink` instances. async fn dropped_monitor_task( mut dropped_stream: StreamOfDropped, mempool: Arc>, - view_store: Arc>, import_notification_sink: MultiViewImportNotificationSink< Block::Hash, ExtrinsicHash, @@ -263,33 +251,9 @@ where log::debug!(target: LOG_TARGET, "fatp::dropped_monitor_task: terminated..."); break; }; - let dropped_tx_hash = dropped.tx_hash; - log::trace!(target: LOG_TARGET, "[{:?}] fatp::dropped notification {:?}, removing", dropped_tx_hash,dropped.reason); - match dropped.reason { - DroppedReason::Usurped(new_tx_hash) => { - if let Some(new_tx) = mempool.get_by_hash(new_tx_hash) { - view_store - .replace_transaction( - new_tx.source(), - new_tx.tx(), - dropped_tx_hash, - new_tx.is_watched(), - ) - .await; - } else { - log::trace!( - target:LOG_TARGET, - "error: dropped_monitor_task: no entry in mempool for new transaction {:?}", - new_tx_hash, - ); - } - }, - DroppedReason::LimitsEnforced => {}, - }; - - mempool.remove_dropped_transaction(&dropped_tx_hash).await; - view_store.listener.transaction_dropped(dropped); - import_notification_sink.clean_notified_items(&[dropped_tx_hash]); + log::trace!(target: LOG_TARGET, "[{:?}] fatp::dropped notification, removing", dropped); + mempool.remove_dropped_transactions(&[dropped]).await; + import_notification_sink.clean_notified_items(&[dropped]); } } @@ -319,18 +283,13 @@ where listener.clone(), metrics.clone(), TXMEMPOOL_TRANSACTION_LIMIT_MULTIPLIER * (options.ready.count + options.future.count), - options.ready.total_bytes + options.future.total_bytes, )); let (dropped_stream_controller, dropped_stream) = MultiViewDroppedWatcherController::::new(); - - let view_store = - Arc::new(ViewStore::new(pool_api.clone(), listener, dropped_stream_controller)); let dropped_monitor_task = Self::dropped_monitor_task( dropped_stream, mempool.clone(), - view_store.clone(), import_notification_sink.clone(), ); @@ -346,8 +305,8 @@ where Self { mempool, - api: pool_api, - view_store, + api: pool_api.clone(), + view_store: Arc::new(ViewStore::new(pool_api, listener, dropped_stream_controller)), ready_poll: Arc::from(Mutex::from(ReadyPoll::new())), enactment_state: Arc::new(Mutex::new(EnactmentState::new( best_block_hash, @@ -406,16 +365,6 @@ where self.mempool.unwatched_and_watched_count() } - /// Returns a set of future transactions for given block hash. - /// - /// Intended for logging / tests. - pub fn futures_at( - &self, - at: Block::Hash, - ) -> Option, ExtrinsicFor>>> { - self.view_store.futures_at(at) - } - /// Returns a best-effort set of ready transactions for a given block, without executing full /// maintain process. /// @@ -425,13 +374,14 @@ where /// /// Pruning is just rebuilding the underlying transactions graph, no validations are executed, /// so this process shall be fast. - pub async fn ready_at_light(&self, at: Block::Hash) -> ReadyIteratorFor { + pub fn ready_at_light(&self, at: Block::Hash) -> PolledIterator { let start = Instant::now(); let api = self.api.clone(); log::trace!(target: LOG_TARGET, "fatp::ready_at_light {:?}", at); let Ok(block_number) = self.api.resolve_block_number(at) else { - return Box::new(std::iter::empty()) + let empty: ReadyIteratorFor = Box::new(std::iter::empty()); + return Box::pin(async { empty }) }; let best_result = { @@ -450,53 +400,57 @@ where ) }; - if let Ok((Some(best_tree_route), Some(best_view))) = best_result { - let tmp_view: View = - View::new_from_other(&best_view, &HashAndNumber { hash: at, number: block_number }); + Box::pin(async move { + if let Ok((Some(best_tree_route), Some(best_view))) = best_result { + let tmp_view: View = View::new_from_other( + &best_view, + &HashAndNumber { hash: at, number: block_number }, + ); - let mut all_extrinsics = vec![]; + let mut all_extrinsics = vec![]; - for h in best_tree_route.enacted() { - let extrinsics = api - .block_body(h.hash) - .await - .unwrap_or_else(|e| { - log::warn!(target: LOG_TARGET, "Compute ready light transactions: error request: {}", e); - None - }) - .unwrap_or_default() + for h in best_tree_route.enacted() { + let extrinsics = api + .block_body(h.hash) + .await + .unwrap_or_else(|e| { + log::warn!(target: LOG_TARGET, "Compute ready light transactions: error request: {}", e); + None + }) + .unwrap_or_default() + .into_iter() + .map(|t| api.hash_and_length(&t).0); + all_extrinsics.extend(extrinsics); + } + + let before_count = tmp_view.pool.validated_pool().status().ready; + let tags = tmp_view + .pool + .validated_pool() + .extrinsics_tags(&all_extrinsics) .into_iter() - .map(|t| api.hash_and_length(&t).0); - all_extrinsics.extend(extrinsics); + .flatten() + .flatten() + .collect::>(); + let _ = tmp_view.pool.validated_pool().prune_tags(tags); + + let after_count = tmp_view.pool.validated_pool().status().ready; + log::debug!(target: LOG_TARGET, + "fatp::ready_at_light {} from {} before: {} to be removed: {} after: {} took:{:?}", + at, + best_view.at.hash, + before_count, + all_extrinsics.len(), + after_count, + start.elapsed() + ); + Box::new(tmp_view.pool.validated_pool().ready()) + } else { + let empty: ReadyIteratorFor = Box::new(std::iter::empty()); + log::debug!(target: LOG_TARGET, "fatp::ready_at_light {} -> empty, took:{:?}", at, start.elapsed()); + empty } - - let before_count = tmp_view.pool.validated_pool().status().ready; - let tags = tmp_view - .pool - .validated_pool() - .extrinsics_tags(&all_extrinsics) - .into_iter() - .flatten() - .flatten() - .collect::>(); - let _ = tmp_view.pool.validated_pool().prune_tags(tags); - - let after_count = tmp_view.pool.validated_pool().status().ready; - log::debug!(target: LOG_TARGET, - "fatp::ready_at_light {} from {} before: {} to be removed: {} after: {} took:{:?}", - at, - best_view.at.hash, - before_count, - all_extrinsics.len(), - after_count, - start.elapsed() - ); - Box::new(tmp_view.pool.validated_pool().ready()) - } else { - let empty: ReadyIteratorFor = Box::new(std::iter::empty()); - log::debug!(target: LOG_TARGET, "fatp::ready_at_light {} -> empty, took:{:?}", at, start.elapsed()); - empty - } + }) } /// Waits for the set of ready transactions for a given block up to a specified timeout. @@ -509,18 +463,18 @@ where /// maintain. /// /// Returns a future resolving to a ready iterator of transactions. - async fn ready_at_with_timeout_internal( + fn ready_at_with_timeout_internal( &self, at: Block::Hash, timeout: std::time::Duration, - ) -> ReadyIteratorFor { + ) -> PolledIterator { log::debug!(target: LOG_TARGET, "fatp::ready_at_with_timeout at {:?} allowed delay: {:?}", at, timeout); let timeout = futures_timer::Delay::new(timeout); let (view_already_exists, ready_at) = self.ready_at_internal(at); if view_already_exists { - return ready_at.await; + return ready_at; } let maybe_ready = async move { @@ -538,19 +492,18 @@ where }; let fall_back_ready = self.ready_at_light(at); - let (maybe_ready, fall_back_ready) = - futures::future::join(maybe_ready, fall_back_ready).await; - maybe_ready.unwrap_or(fall_back_ready) + Box::pin(async { + let (maybe_ready, fall_back_ready) = + futures::future::join(maybe_ready.boxed(), fall_back_ready.boxed()).await; + maybe_ready.unwrap_or(fall_back_ready) + }) } - fn ready_at_internal( - &self, - at: Block::Hash, - ) -> (bool, Pin> + Send>>) { + fn ready_at_internal(&self, at: Block::Hash) -> (bool, PolledIterator) { let mut ready_poll = self.ready_poll.lock(); if let Some((view, inactive)) = self.view_store.get_view_at(at, true) { - log::debug!(target: LOG_TARGET, "fatp::ready_at_internal {at:?} (inactive:{inactive:?})"); + log::debug!(target: LOG_TARGET, "fatp::ready_at {at:?} (inactive:{inactive:?})"); let iterator: ReadyIteratorFor = Box::new(view.pool.validated_pool().ready()); return (true, async move { iterator }.boxed()); } @@ -565,7 +518,7 @@ where }) .boxed(); log::debug!(target: LOG_TARGET, - "fatp::ready_at_internal {at:?} pending keys: {:?}", + "fatp::ready_at {at:?} pending keys: {:?}", ready_poll.pollers.keys() ); (false, pending) @@ -619,7 +572,6 @@ fn reduce_multiview_result(input: HashMap>>) -> Vec TransactionPool for ForkAwareTxPool where Block: BlockT, @@ -637,93 +589,131 @@ where /// /// The internal limits of the pool are checked. The results of submissions to individual views /// are reduced to single result. Refer to `reduce_multiview_result` for more details. - async fn submit_at( + fn submit_at( &self, _: ::Hash, source: TransactionSource, xts: Vec>, - ) -> Result, Self::Error>>, Self::Error> { + ) -> PoolFuture, Self::Error>>, Self::Error> { let view_store = self.view_store.clone(); log::debug!(target: LOG_TARGET, "fatp::submit_at count:{} views:{}", xts.len(), self.active_views_count()); log_xt_trace!(target: LOG_TARGET, xts.iter().map(|xt| self.tx_hash(xt)), "[{:?}] fatp::submit_at"); let xts = xts.into_iter().map(Arc::from).collect::>(); - let mempool_results = self.mempool.extend_unwatched(source, &xts); + let mempool_result = self.mempool.extend_unwatched(source, &xts); if view_store.is_empty() { - return Ok(mempool_results.into_iter().map(|r| r.map(|r| r.hash)).collect::>()) + return future::ready(Ok(mempool_result)).boxed() } - let to_be_submitted = mempool_results - .iter() - .zip(xts) - .filter_map(|(result, xt)| { - result.as_ref().ok().map(|insertion| (insertion.source.clone(), xt)) - }) - .collect::>(); + let (hashes, to_be_submitted): (Vec>, Vec>) = + mempool_result + .iter() + .zip(xts) + .filter_map(|(result, xt)| result.as_ref().ok().map(|xt_hash| (xt_hash, xt))) + .unzip(); self.metrics .report(|metrics| metrics.submitted_transactions.inc_by(to_be_submitted.len() as _)); let mempool = self.mempool.clone(); - let results_map = view_store.submit(to_be_submitted.into_iter()).await; - let mut submission_results = reduce_multiview_result(results_map).into_iter(); + async move { + let results_map = view_store.submit(source, to_be_submitted.into_iter(), hashes).await; + let mut submission_results = reduce_multiview_result(results_map).into_iter(); - Ok(mempool_results + Ok(mempool_result .into_iter() .map(|result| { - result.and_then(|insertion| { - submission_results + result.and_then(|xt_hash| { + let result = submission_results .next() - .expect("The number of Ok results in mempool is exactly the same as the size of to-views-submission result. qed.") - .inspect_err(|_| - mempool.remove(insertion.hash) - ) + .expect("The number of Ok results in mempool is exactly the same as the size of to-views-submission result. qed."); + result.or_else(|error| { + let error = error.into_pool_error(); + match error { + Ok( + // The transaction is still in mempool it may get included into the view for the next block. + Error::ImmediatelyDropped + ) => Ok(xt_hash), + Ok(e) => { + mempool.remove(xt_hash); + Err(e.into()) + }, + Err(e) => Err(e), + } + }) }) }) .collect::>()) + } + .boxed() } /// Submits a single transaction and returns a future resolving to the submission results. /// /// Actual transaction submission process is delegated to the `submit_at` function. - async fn submit_one( + fn submit_one( &self, _at: ::Hash, source: TransactionSource, xt: TransactionFor, - ) -> Result, Self::Error> { + ) -> PoolFuture, Self::Error> { log::trace!(target: LOG_TARGET, "[{:?}] fatp::submit_one views:{}", self.tx_hash(&xt), self.active_views_count()); - match self.submit_at(_at, source, vec![xt]).await { - Ok(mut v) => - v.pop().expect("There is exactly one element in result of submit_at. qed."), - Err(e) => Err(e), + let result_future = self.submit_at(_at, source, vec![xt]); + async move { + let result = result_future.await; + match result { + Ok(mut v) => + v.pop().expect("There is exactly one element in result of submit_at. qed."), + Err(e) => Err(e), + } } + .boxed() } /// Submits a transaction and starts to watch its progress in the pool, returning a stream of /// status updates. /// /// Actual transaction submission process is delegated to the `ViewStore` internal instance. - async fn submit_and_watch( + fn submit_and_watch( &self, at: ::Hash, source: TransactionSource, xt: TransactionFor, - ) -> Result>>, Self::Error> { + ) -> PoolFuture>>, Self::Error> { log::trace!(target: LOG_TARGET, "[{:?}] fatp::submit_and_watch views:{}", self.tx_hash(&xt), self.active_views_count()); let xt = Arc::from(xt); - let InsertionInfo { hash: xt_hash, source: timed_source } = - match self.mempool.push_watched(source, xt.clone()) { - Ok(result) => result, - Err(e) => return Err(e), - }; + let xt_hash = match self.mempool.push_watched(source, xt.clone()) { + Ok(xt_hash) => xt_hash, + Err(e) => return future::ready(Err(e)).boxed(), + }; self.metrics.report(|metrics| metrics.submitted_transactions.inc()); - self.view_store - .submit_and_watch(at, timed_source, xt) - .await - .inspect_err(|_| self.mempool.remove(xt_hash)) + let view_store = self.view_store.clone(); + let mempool = self.mempool.clone(); + async move { + let result = view_store.submit_and_watch(at, source, xt).await; + let result = result.or_else(|(e, maybe_watcher)| { + let error = e.into_pool_error(); + match (error, maybe_watcher) { + ( + Ok( + // The transaction is still in mempool it may get included into the + // view for the next block. + Error::ImmediatelyDropped, + ), + Some(watcher), + ) => Ok(watcher), + (Ok(e), _) => { + mempool.remove(xt_hash); + Err(e.into()) + }, + (Err(e), _) => Err(e), + } + }); + result + } + .boxed() } /// Intended to remove transactions identified by the given hashes, and any dependent @@ -794,9 +784,9 @@ where } /// Returns an iterator for ready transactions at a specific block, ordered by priority. - async fn ready_at(&self, at: ::Hash) -> ReadyIteratorFor { + fn ready_at(&self, at: ::Hash) -> PolledIterator { let (_, result) = self.ready_at_internal(at); - result.await + result } /// Returns an iterator for ready transactions, ordered by priority. @@ -819,12 +809,12 @@ where /// /// If the timeout expires before the maintain process is accomplished, a best-effort /// set of transactions is returned (refer to `ready_at_light`). - async fn ready_at_with_timeout( + fn ready_at_with_timeout( &self, at: ::Hash, timeout: std::time::Duration, - ) -> ReadyIteratorFor { - self.ready_at_with_timeout_internal(at, timeout).await + ) -> PolledIterator { + self.ready_at_with_timeout_internal(at, timeout) } } @@ -852,12 +842,12 @@ where ) -> Result { log::debug!(target: LOG_TARGET, "fatp::submit_local views:{}", self.active_views_count()); let xt = Arc::from(xt); - let InsertionInfo { hash: xt_hash, .. } = self + let result = self .mempool .extend_unwatched(TransactionSource::Local, &[xt.clone()]) .remove(0)?; - self.view_store.submit_local(xt).or_else(|_| Ok(xt_hash)) + self.view_store.submit_local(xt).or_else(|_| Ok(result)) } } @@ -965,9 +955,6 @@ where let start = Instant::now(); let watched_xts = self.register_listeners(&mut view).await; let duration = start.elapsed(); - // sync the transactions statuses and referencing views in all the listeners with newly - // cloned view. - view.pool.validated_pool().retrigger_notifications(); log::debug!(target: LOG_TARGET, "register_listeners: at {at:?} took {duration:?}"); // 2. Handle transactions from the tree route. Pruning transactions from the view first @@ -1069,7 +1056,7 @@ where future::join_all(results).await } - /// Updates the given view with the transactions from the internal mempol. + /// Updates the given view with the transaction from the internal mempol. /// /// All transactions from the mempool (excluding those which are either already imported or /// already included in blocks since recently finalized block) are submitted to the @@ -1095,43 +1082,69 @@ where self.active_views_count() ); let included_xts = self.extrinsics_included_since_finalized(view.at.hash).await; + let xts = self.mempool.clone_unwatched(); + + let mut all_submitted_count = 0; + if !xts.is_empty() { + let unwatched_count = xts.len(); + let mut buckets = HashMap::>>::default(); + xts.into_iter() + .filter(|(hash, _)| !view.pool.validated_pool().pool.read().is_imported(hash)) + .filter(|(hash, _)| !included_xts.contains(&hash)) + .map(|(_, tx)| (tx.source(), tx.tx())) + .for_each(|(source, tx)| buckets.entry(source).or_default().push(tx)); + + for (source, xts) in buckets { + all_submitted_count += xts.len(); + let _ = view.submit_many(source, xts).await; + } + log::debug!(target: LOG_TARGET, "update_view_with_mempool: at {:?} unwatched {}/{}", view.at.hash, all_submitted_count, unwatched_count); + } - let (hashes, xts_filtered): (Vec<_>, Vec<_>) = watched_xts - .into_iter() - .chain(self.mempool.clone_unwatched().into_iter()) - .filter(|(hash, _)| !view.is_imported(hash)) - .filter(|(hash, _)| !included_xts.contains(&hash)) - .map(|(tx_hash, tx)| (tx_hash, (tx.source(), tx.tx()))) - .unzip(); + let watched_submitted_count = watched_xts.len(); - let watched_results = view - .submit_many(xts_filtered) - .await + let mut buckets = HashMap::< + TransactionSource, + Vec<(ExtrinsicHash, ExtrinsicFor)>, + >::default(); + watched_xts .into_iter() - .zip(hashes) - .map(|(result, tx_hash)| result.or_else(|_| Err(tx_hash))) - .collect::>(); + .filter(|(hash, _)| !included_xts.contains(&hash)) + .map(|(tx_hash, tx)| (tx.source(), tx_hash, tx.tx())) + .for_each(|(source, tx_hash, tx)| { + buckets.entry(source).or_default().push((tx_hash, tx)) + }); - let submitted_count = watched_results.len(); + let mut watched_results = Vec::default(); + for (source, watched_xts) in buckets { + let hashes = watched_xts.iter().map(|i| i.0).collect::>(); + let results = view + .submit_many(source, watched_xts.into_iter().map(|i| i.1)) + .await + .into_iter() + .zip(hashes) + .map(|(result, tx_hash)| result.or_else(|_| Err(tx_hash))) + .collect::>(); + watched_results.extend(results); + } - log::debug!( - target: LOG_TARGET, - "update_view_with_mempool: at {:?} submitted {}/{}", - view.at.hash, - submitted_count, - self.mempool.len() - ); + log::debug!(target: LOG_TARGET, "update_view_with_mempool: at {:?} watched {}/{}", view.at.hash, watched_submitted_count, self.mempool_len().1); - self.metrics - .report(|metrics| metrics.submitted_from_mempool_txs.inc_by(submitted_count as _)); + all_submitted_count += watched_submitted_count; + let _ = all_submitted_count + .try_into() + .map(|v| self.metrics.report(|metrics| metrics.submitted_from_mempool_txs.inc_by(v))); // if there are no views yet, and a single newly created view is reporting error, just send // out the invalid event, and remove transaction. if self.view_store.is_empty() { for result in watched_results { - if let Err(tx_hash) = result { - self.view_store.listener.invalidate_transactions(&[tx_hash]); - self.mempool.remove(tx_hash); + match result { + Err(tx_hash) => { + self.view_store.listener.invalidate_transactions(&[tx_hash]); + self.mempool.remove(tx_hash); + }, + Ok(_) => {}, } } } @@ -1207,14 +1220,7 @@ where }) .map(|(tx_hash, tx)| { //find arc if tx is known - self.mempool - .get_by_hash(tx_hash) - .map(|tx| (tx.source(), tx.tx())) - .unwrap_or_else(|| { - // These transactions are coming from retracted blocks, we - // should simply consider them external. - (TimedTransactionSource::new_external(true), Arc::from(tx)) - }) + self.mempool.get_by_hash(tx_hash).unwrap_or_else(|| Arc::from(tx)) }), ); @@ -1223,7 +1229,16 @@ where }); } - let _ = view.pool.resubmit_at(&hash_and_number, resubmit_transactions).await; + let _ = view + .pool + .resubmit_at( + &hash_and_number, + // These transactions are coming from retracted blocks, we should + // simply consider them external. + TransactionSource::External, + resubmit_transactions, + ) + .await; } } diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/import_notification_sink.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/import_notification_sink.rs index f9a41673bb8f..7fbdcade63b8 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/import_notification_sink.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/import_notification_sink.rs @@ -326,7 +326,6 @@ mod tests { let j0 = tokio::spawn(runnable); let stream = ctrl.event_stream(); - let stream2 = ctrl.event_stream(); let mut v1 = View::new(vec![(10, 1), (10, 2), (10, 3)]); let mut v2 = View::new(vec![(20, 1), (20, 2), (20, 6)]); @@ -343,16 +342,20 @@ mod tests { ctrl.add_view(1000, o1); ctrl.add_view(2000, o2); - let out = stream.take(4).collect::>().await; - assert_eq!(out, vec![1, 2, 3, 6]); + let j4 = { + let ctrl = ctrl.clone(); + tokio::spawn(async move { + tokio::time::sleep(Duration::from_millis(70)).await; + ctrl.clean_notified_items(&vec![1, 3]); + ctrl.add_view(3000, o3.boxed()); + }) + }; - ctrl.clean_notified_items(&vec![1, 3]); - ctrl.add_view(3000, o3.boxed()); - let out = stream2.take(6).collect::>().await; + let out = stream.take(6).collect::>().await; assert_eq!(out, vec![1, 2, 3, 6, 1, 3]); - drop(ctrl); - futures::future::join_all(vec![j0, j1, j2, j3]).await; + + futures::future::join_all(vec![j0, j1, j2, j3, j4]).await; } #[tokio::test] diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/mod.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/mod.rs index 5f7294a24fd7..9f979e216b6d 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/mod.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/mod.rs @@ -201,12 +201,12 @@ //! required to accomplish it. //! //! ### Providing ready transactions: `ready_at` -//! The asynchronous [`ready_at`] function resolves to the [ready transactions -//! iterator][`ReadyTransactions`]. The block builder shall wait either for the future to be -//! resolved or for timeout to be hit. To avoid building empty blocks in case of timeout, the -//! waiting for timeout functionality was moved into the transaction pool, and new API function was -//! added: [`ready_at_with_timeout`]. This function also provides a fall back ready iterator which -//! is result of [light maintain](#light-maintain). +//! The [`ready_at`] function returns a [future][`crate::PolledIterator`] that resolves to the +//! [ready transactions iterator][`ReadyTransactions`]. The block builder shall wait either for the +//! future to be resolved or for timeout to be hit. To avoid building empty blocks in case of +//! timeout, the waiting for timeout functionality was moved into the transaction pool, and new API +//! function was added: [`ready_at_with_timeout`]. This function also provides a fall back ready +//! iterator which is result of [light maintain](#light-maintain). //! //! New function internally waits either for [maintain](#maintain) process triggered for requested //! block to be accomplished or for the timeout. If timeout hits then the result of [light diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/multi_view_listener.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/multi_view_listener.rs index a00234a99808..8d0e69db2e9a 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/multi_view_listener.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/multi_view_listener.rs @@ -36,8 +36,6 @@ use std::{ }; use tokio_stream::StreamMap; -use super::dropped_watcher::{DroppedReason, DroppedTransaction}; - /// A side channel allowing to control the external stream instance (one per transaction) with /// [`ControllerCommand`]. /// @@ -81,7 +79,7 @@ enum ControllerCommand { /// Notifies that a transaction was dropped from the pool. /// /// If all preconditions are met, an external dropped event will be sent out. - TransactionDropped(DroppedReason>), + TransactionDropped, } impl std::fmt::Debug for ControllerCommand @@ -101,8 +99,8 @@ where ControllerCommand::TransactionBroadcasted(_) => { write!(f, "ListenerAction::TransactionBroadcasted(...)") }, - ControllerCommand::TransactionDropped(r) => { - write!(f, "ListenerAction::TransactionDropped {r:?}") + ControllerCommand::TransactionDropped => { + write!(f, "ListenerAction::TransactionDropped") }, } } @@ -270,7 +268,6 @@ where /// stream map. fn remove_view(&mut self, block_hash: BlockHash) { self.status_stream_map.remove(&block_hash); - self.views_keeping_tx_valid.remove(&block_hash); trace!(target: LOG_TARGET, "[{:?}] RemoveView view: {:?} views:{:?}", self.tx_hash, block_hash, self.status_stream_map.keys().collect::>()); } } @@ -285,11 +282,6 @@ where Self { controllers: Default::default() } } - /// Returns `true` if the listener contains a stream controller for the specified hash. - pub fn contains_tx(&self, tx_hash: &ExtrinsicHash) -> bool { - self.controllers.read().contains_key(tx_hash) - } - /// Creates an external aggregated stream of events for given transaction. /// /// This method initializes an `ExternalWatcherContext` for the provided transaction hash, sets @@ -354,16 +346,11 @@ where log::trace!(target: LOG_TARGET, "[{:?}] mvl sending out: Broadcasted", ctx.tx_hash); return Some((TransactionStatus::Broadcast(peers), ctx)) }, - ControllerCommand::TransactionDropped(DroppedReason::LimitsEnforced) => { + ControllerCommand::TransactionDropped => { log::trace!(target: LOG_TARGET, "[{:?}] mvl sending out: Dropped", ctx.tx_hash); ctx.terminate = true; return Some((TransactionStatus::Dropped, ctx)) }, - ControllerCommand::TransactionDropped(DroppedReason::Usurped(by)) => { - log::trace!(target: LOG_TARGET, "[{:?}] mvl sending out: Usurped({:?})", ctx.tx_hash, by); - ctx.terminate = true; - return Some((TransactionStatus::Usurped(by), ctx)) - }, } }, }; @@ -458,15 +445,16 @@ where /// /// This method sends a `TransactionDropped` command to the controller of each requested /// transaction prompting and external `Broadcasted` event. - pub(crate) fn transaction_dropped(&self, dropped: DroppedTransaction>) { + pub(crate) fn transactions_dropped(&self, dropped: &[ExtrinsicHash]) { let mut controllers = self.controllers.write(); - debug!(target: LOG_TARGET, "mvl::transaction_dropped: {:?}", dropped); - if let Some(tx) = controllers.remove(&dropped.tx_hash) { - let DroppedTransaction { tx_hash, reason } = dropped; - debug!(target: LOG_TARGET, "[{:?}] transaction_dropped", tx_hash); - if let Err(e) = tx.unbounded_send(ControllerCommand::TransactionDropped(reason)) { - trace!(target: LOG_TARGET, "[{:?}] transaction_dropped: send message failed: {:?}", tx_hash, e); - }; + debug!(target: LOG_TARGET, "mvl::transactions_dropped: {:?}", dropped); + for tx_hash in dropped { + if let Some(tx) = controllers.remove(&tx_hash) { + debug!(target: LOG_TARGET, "[{:?}] transaction_dropped", tx_hash); + if let Err(e) = tx.unbounded_send(ControllerCommand::TransactionDropped) { + trace!(target: LOG_TARGET, "[{:?}] transactions_dropped: send message failed: {:?}", tx_hash, e); + }; + } } } diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/revalidation_worker.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/revalidation_worker.rs index e1c65a08a70b..9464ab3f5766 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/revalidation_worker.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/revalidation_worker.rs @@ -186,11 +186,11 @@ mod tests { use crate::{ common::tests::{uxt, TestApi}, fork_aware_txpool::view::FinishRevalidationLocalChannels, - TimedTransactionSource, }; use futures::executor::block_on; + use sc_transaction_pool_api::TransactionSource; use substrate_test_runtime::{AccountId, Transfer, H256}; - use substrate_test_runtime_client::Sr25519Keyring::Alice; + use substrate_test_runtime_client::AccountKeyring::Alice; #[test] fn revalidation_queue_works() { let api = Arc::new(TestApi::default()); @@ -212,10 +212,9 @@ mod tests { nonce: 0, }); - let _ = block_on(view.submit_many(std::iter::once(( - TimedTransactionSource::new_external(false), - uxt.clone().into(), - )))); + let _ = block_on( + view.submit_many(TransactionSource::External, std::iter::once(uxt.clone().into())), + ); assert_eq!(api.validation_requests().len(), 1); let (finish_revalidation_request_tx, finish_revalidation_request_rx) = diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs index 989ae4425dc4..989c7e8ef356 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs @@ -30,11 +30,12 @@ use super::{metrics::MetricsLink as PrometheusMetrics, multi_view_listener::Mult use crate::{ common::log_xt::log_xt_trace, graph, - graph::{base_pool::TimedTransactionSource, tracked_map::Size, ExtrinsicFor, ExtrinsicHash}, + graph::{ExtrinsicFor, ExtrinsicHash}, LOG_TARGET, }; use futures::FutureExt; use itertools::Itertools; +use parking_lot::RwLock; use sc_transaction_pool_api::TransactionSource; use sp_blockchain::HashAndNumber; use sp_runtime::{ @@ -42,7 +43,7 @@ use sp_runtime::{ transaction_validity::{InvalidTransaction, TransactionValidityError}, }; use std::{ - collections::HashMap, + collections::{hash_map::Entry, HashMap}, sync::{atomic, atomic::AtomicU64, Arc}, time::Instant, }; @@ -71,10 +72,8 @@ where watched: bool, /// Extrinsic actual body. tx: ExtrinsicFor, - /// Size of the extrinsics actual body. - bytes: usize, /// Transaction source. - source: TimedTransactionSource, + source: TransactionSource, /// When the transaction was revalidated, used to periodically revalidate the mem pool buffer. validated_at: AtomicU64, //todo: we need to add future / ready status at finalized block. @@ -95,30 +94,18 @@ where /// Shall the progress of transaction be watched. /// /// Was transaction sent with `submit_and_watch`. - pub(crate) fn is_watched(&self) -> bool { + fn is_watched(&self) -> bool { self.watched } /// Creates a new instance of wrapper for unwatched transaction. - fn new_unwatched(source: TransactionSource, tx: ExtrinsicFor, bytes: usize) -> Self { - Self { - watched: false, - tx, - source: TimedTransactionSource::from_transaction_source(source, true), - validated_at: AtomicU64::new(0), - bytes, - } + fn new_unwatched(source: TransactionSource, tx: ExtrinsicFor) -> Self { + Self { watched: false, tx, source, validated_at: AtomicU64::new(0) } } /// Creates a new instance of wrapper for watched transaction. - fn new_watched(source: TransactionSource, tx: ExtrinsicFor, bytes: usize) -> Self { - Self { - watched: true, - tx, - source: TimedTransactionSource::from_transaction_source(source, true), - validated_at: AtomicU64::new(0), - bytes, - } + fn new_watched(source: TransactionSource, tx: ExtrinsicFor) -> Self { + Self { watched: true, tx, source, validated_at: AtomicU64::new(0) } } /// Provides a clone of actual transaction body. @@ -129,23 +116,15 @@ where } /// Returns the source of the transaction. - pub(crate) fn source(&self) -> TimedTransactionSource { - self.source.clone() - } -} - -impl Size for Arc> -where - Block: BlockT, - ChainApi: graph::ChainApi + 'static, -{ - fn size(&self) -> usize { - self.bytes + pub(crate) fn source(&self) -> TransactionSource { + self.source } } type InternalTxMemPoolMap = - graph::tracked_map::TrackedMap, Arc>>; + HashMap, Arc>>; +type InternalTxMemPoolMapEntry<'a, ChainApi, Block> = + Entry<'a, ExtrinsicHash, Arc>>; /// An intermediary transactions buffer. /// @@ -174,29 +153,13 @@ where /// /// The key is the hash of the transaction, and the value is a wrapper /// structure, which contains the mempool specific details of the transaction. - transactions: InternalTxMemPoolMap, + transactions: RwLock>, /// Prometheus's metrics endpoint. metrics: PrometheusMetrics, /// Indicates the maximum number of transactions that can be maintained in the memory pool. max_transactions_count: usize, - - /// Maximal size of encodings of all transactions in the memory pool. - max_transactions_total_bytes: usize, -} - -/// Helper structure to encapsulate a result of [`TxMemPool::try_insert`]. -#[derive(Debug)] -pub(super) struct InsertionInfo { - pub(super) hash: Hash, - pub(super) source: TimedTransactionSource, -} - -impl InsertionInfo { - fn new(hash: Hash, source: TimedTransactionSource) -> Self { - Self { hash, source } - } } impl TxMemPool @@ -212,32 +175,19 @@ where listener: Arc>, metrics: PrometheusMetrics, max_transactions_count: usize, - max_transactions_total_bytes: usize, ) -> Self { - Self { - api, - listener, - transactions: Default::default(), - metrics, - max_transactions_count, - max_transactions_total_bytes, - } + Self { api, listener, transactions: Default::default(), metrics, max_transactions_count } } /// Creates a new `TxMemPool` instance for testing purposes. #[allow(dead_code)] - fn new_test( - api: Arc, - max_transactions_count: usize, - max_transactions_total_bytes: usize, - ) -> Self { + fn new_test(api: Arc, max_transactions_count: usize) -> Self { Self { api, listener: Arc::from(MultiViewListener::new()), transactions: Default::default(), metrics: Default::default(), max_transactions_count, - max_transactions_total_bytes, } } @@ -245,57 +195,37 @@ where pub(super) fn get_by_hash( &self, hash: ExtrinsicHash, - ) -> Option>> { - self.transactions.read().get(&hash).map(Clone::clone) + ) -> Option> { + self.transactions.read().get(&hash).map(|t| t.tx()) } /// Returns a tuple with the count of unwatched and watched transactions in the memory pool. - pub fn unwatched_and_watched_count(&self) -> (usize, usize) { + pub(super) fn unwatched_and_watched_count(&self) -> (usize, usize) { let transactions = self.transactions.read(); let watched_count = transactions.values().filter(|t| t.is_watched()).count(); (transactions.len() - watched_count, watched_count) } - /// Returns a total number of transactions kept within mempool. - pub fn len(&self) -> usize { - self.transactions.read().len() - } - - /// Returns the number of bytes used by all extrinsics in the the pool. - #[cfg(test)] - pub fn bytes(&self) -> usize { - return self.transactions.bytes() - } - - /// Returns true if provided values would exceed defined limits. - fn is_limit_exceeded(&self, length: usize, current_total_bytes: usize) -> bool { - length > self.max_transactions_count || - current_total_bytes > self.max_transactions_total_bytes - } - /// Attempts to insert a transaction into the memory pool, ensuring it does not /// exceed the maximum allowed transaction count. fn try_insert( &self, + current_len: usize, + entry: InternalTxMemPoolMapEntry<'_, ChainApi, Block>, hash: ExtrinsicHash, tx: TxInMemPool, - ) -> Result>, ChainApi::Error> { - let bytes = self.transactions.bytes(); - let mut transactions = self.transactions.write(); - let result = match ( - !self.is_limit_exceeded(transactions.len() + 1, bytes + tx.bytes), - transactions.contains_key(&hash), - ) { - (true, false) => { - let source = tx.source(); - transactions.insert(hash, Arc::from(tx)); - Ok(InsertionInfo::new(hash, source)) + ) -> Result, ChainApi::Error> { + //todo: obey size limits [#5476] + let result = match (current_len < self.max_transactions_count, entry) { + (true, Entry::Vacant(v)) => { + v.insert(Arc::from(tx)); + Ok(hash) }, - (_, true) => + (_, Entry::Occupied(_)) => Err(sc_transaction_pool_api::error::Error::AlreadyImported(Box::new(hash)).into()), (false, _) => Err(sc_transaction_pool_api::error::Error::ImmediatelyDropped.into()), }; - log::trace!(target: LOG_TARGET, "[{:?}] mempool::try_insert: {:?}", hash, result.as_ref().map(|r| r.hash)); + log::trace!(target: LOG_TARGET, "[{:?}] mempool::try_insert: {:?}", hash, result); result } @@ -308,12 +238,18 @@ where &self, source: TransactionSource, xts: &[ExtrinsicFor], - ) -> Vec>, ChainApi::Error>> { + ) -> Vec, ChainApi::Error>> { + let mut transactions = self.transactions.write(); let result = xts .iter() .map(|xt| { - let (hash, length) = self.api.hash_and_length(&xt); - self.try_insert(hash, TxInMemPool::new_unwatched(source, xt.clone(), length)) + let hash = self.api.hash_and_length(&xt).0; + self.try_insert( + transactions.len(), + transactions.entry(hash), + hash, + TxInMemPool::new_unwatched(source, xt.clone()), + ) }) .collect::>(); result @@ -325,18 +261,31 @@ where &self, source: TransactionSource, xt: ExtrinsicFor, - ) -> Result>, ChainApi::Error> { - let (hash, length) = self.api.hash_and_length(&xt); - self.try_insert(hash, TxInMemPool::new_watched(source, xt.clone(), length)) - } - - /// Removes transaction from the memory pool which are specified by the given list of hashes. - pub(super) async fn remove_dropped_transaction( + ) -> Result, ChainApi::Error> { + let mut transactions = self.transactions.write(); + let hash = self.api.hash_and_length(&xt).0; + self.try_insert( + transactions.len(), + transactions.entry(hash), + hash, + TxInMemPool::new_watched(source, xt.clone()), + ) + } + + /// Removes transactions from the memory pool which are specified by the given list of hashes + /// and send the `Dropped` event to the listeners of these transactions. + pub(super) async fn remove_dropped_transactions( &self, - dropped: &ExtrinsicHash, - ) -> Option>> { - log::debug!(target: LOG_TARGET, "[{:?}] mempool::remove_dropped_transaction", dropped); - self.transactions.write().remove(dropped) + to_be_removed: &[ExtrinsicHash], + ) { + log::debug!(target: LOG_TARGET, "remove_dropped_transactions count:{:?}", to_be_removed.len()); + log_xt_trace!(target: LOG_TARGET, to_be_removed, "[{:?}] mempool::remove_dropped_transactions"); + let mut transactions = self.transactions.write(); + to_be_removed.iter().for_each(|t| { + transactions.remove(t); + }); + + self.listener.transactions_dropped(to_be_removed); } /// Clones and returns a `HashMap` of references to all unwatched transactions in the memory @@ -375,11 +324,12 @@ where let start = Instant::now(); let (count, input) = { - let transactions = self.transactions.clone_map(); + let transactions = self.transactions.read(); ( transactions.len(), transactions + .clone() .into_iter() .filter(|xt| { let finalized_block_number = finalized_block.number.into().as_u64(); @@ -393,13 +343,13 @@ where }; let validations_futures = input.into_iter().map(|(xt_hash, xt)| { - self.api - .validate_transaction(finalized_block.hash, xt.source.clone().into(), xt.tx()) - .map(move |validation_result| { + self.api.validate_transaction(finalized_block.hash, xt.source, xt.tx()).map( + move |validation_result| { xt.validated_at .store(finalized_block.number.into().as_u64(), atomic::Ordering::Relaxed); (xt_hash, validation_result) - }) + }, + ) }); let validation_results = futures::future::join_all(validations_futures).await; let input_len = validation_results.len(); @@ -427,7 +377,7 @@ where log::debug!( target: LOG_TARGET, - "mempool::revalidate: at {finalized_block:?} count:{input_len}/{count} invalid_hashes:{} took {duration:?}", invalid_hashes.len(), + "mempool::revalidate: at {finalized_block:?} count:{input_len}/{count} purged:{} took {duration:?}", invalid_hashes.len(), ); invalid_hashes @@ -467,9 +417,9 @@ where #[cfg(test)] mod tx_mem_pool_tests { use super::*; - use crate::{common::tests::TestApi, graph::ChainApi}; - use substrate_test_runtime::{AccountId, Extrinsic, ExtrinsicBuilder, Transfer, H256}; - use substrate_test_runtime_client::Sr25519Keyring::*; + use crate::common::tests::TestApi; + use substrate_test_runtime::{AccountId, Extrinsic, Transfer, H256}; + use substrate_test_runtime_client::AccountKeyring::*; fn uxt(nonce: u64) -> Extrinsic { crate::common::tests::uxt(Transfer { from: Alice.into(), @@ -483,7 +433,7 @@ mod tx_mem_pool_tests { fn extend_unwatched_obeys_limit() { let max = 10; let api = Arc::from(TestApi::default()); - let mempool = TxMemPool::new_test(api, max, usize::MAX); + let mempool = TxMemPool::new_test(api, max); let xts = (0..max + 1).map(|x| Arc::from(uxt(x as _))).collect::>(); @@ -500,7 +450,7 @@ mod tx_mem_pool_tests { sp_tracing::try_init_simple(); let max = 10; let api = Arc::from(TestApi::default()); - let mempool = TxMemPool::new_test(api, max, usize::MAX); + let mempool = TxMemPool::new_test(api, max); let mut xts = (0..max - 1).map(|x| Arc::from(uxt(x as _))).collect::>(); xts.push(xts.iter().last().unwrap().clone()); @@ -517,7 +467,7 @@ mod tx_mem_pool_tests { fn push_obeys_limit() { let max = 10; let api = Arc::from(TestApi::default()); - let mempool = TxMemPool::new_test(api, max, usize::MAX); + let mempool = TxMemPool::new_test(api, max); let xts = (0..max).map(|x| Arc::from(uxt(x as _))).collect::>(); @@ -542,7 +492,7 @@ mod tx_mem_pool_tests { fn push_detects_already_imported() { let max = 10; let api = Arc::from(TestApi::default()); - let mempool = TxMemPool::new_test(api, 2 * max, usize::MAX); + let mempool = TxMemPool::new_test(api, 2 * max); let xts = (0..max).map(|x| Arc::from(uxt(x as _))).collect::>(); let xt0 = xts.iter().last().unwrap().clone(); @@ -567,7 +517,7 @@ mod tx_mem_pool_tests { fn count_works() { let max = 100; let api = Arc::from(TestApi::default()); - let mempool = TxMemPool::new_test(api, max, usize::MAX); + let mempool = TxMemPool::new_test(api, max); let xts0 = (0..10).map(|x| Arc::from(uxt(x as _))).collect::>(); @@ -582,39 +532,4 @@ mod tx_mem_pool_tests { assert!(results.iter().all(Result::is_ok)); assert_eq!(mempool.unwatched_and_watched_count(), (10, 5)); } - - fn large_uxt(x: usize) -> Extrinsic { - ExtrinsicBuilder::new_include_data(vec![x as u8; 1024]).build() - } - - #[test] - fn push_obeys_size_limit() { - sp_tracing::try_init_simple(); - let max = 10; - let api = Arc::from(TestApi::default()); - //size of large extrinsic is: 1129 - let mempool = TxMemPool::new_test(api.clone(), usize::MAX, max * 1129); - - let xts = (0..max).map(|x| Arc::from(large_uxt(x))).collect::>(); - - let total_xts_bytes = xts.iter().fold(0, |r, x| r + api.hash_and_length(&x).1); - - let results = mempool.extend_unwatched(TransactionSource::External, &xts); - assert!(results.iter().all(Result::is_ok)); - assert_eq!(mempool.bytes(), total_xts_bytes); - - let xt = Arc::from(large_uxt(98)); - let result = mempool.push_watched(TransactionSource::External, xt); - assert!(matches!( - result.unwrap_err(), - sc_transaction_pool_api::error::Error::ImmediatelyDropped - )); - - let xt = Arc::from(large_uxt(99)); - let mut result = mempool.extend_unwatched(TransactionSource::External, &[xt]); - assert!(matches!( - result.pop().unwrap().unwrap_err(), - sc_transaction_pool_api::error::Error::ImmediatelyDropped - )); - } } diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/view.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/view.rs index 3cbb8fa4871d..99095d88cb0a 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/view.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/view.rs @@ -27,13 +27,13 @@ use super::metrics::MetricsLink as PrometheusMetrics; use crate::{ common::log_xt::log_xt_trace, graph::{ - self, base_pool::TimedTransactionSource, watcher::Watcher, ExtrinsicFor, ExtrinsicHash, - IsValidator, ValidatedTransaction, ValidatedTransactionFor, + self, watcher::Watcher, ExtrinsicFor, ExtrinsicHash, IsValidator, ValidatedTransaction, + ValidatedTransactionFor, }, LOG_TARGET, }; use parking_lot::Mutex; -use sc_transaction_pool_api::{error::Error as TxPoolError, PoolStatus}; +use sc_transaction_pool_api::{error::Error as TxPoolError, PoolStatus, TransactionSource}; use sp_blockchain::HashAndNumber; use sp_runtime::{ generic::BlockId, traits::Block as BlockT, transaction_validity::TransactionValidityError, @@ -157,21 +157,22 @@ where /// Imports many unvalidated extrinsics into the view. pub(super) async fn submit_many( &self, - xts: impl IntoIterator)>, + source: TransactionSource, + xts: impl IntoIterator>, ) -> Vec, ChainApi::Error>> { if log::log_enabled!(target: LOG_TARGET, log::Level::Trace) { let xts = xts.into_iter().collect::>(); - log_xt_trace!(target: LOG_TARGET, xts.iter().map(|(_,xt)| self.pool.validated_pool().api().hash_and_length(xt).0), "[{:?}] view::submit_many at:{}", self.at.hash); - self.pool.submit_at(&self.at, xts).await + log_xt_trace!(target: LOG_TARGET, xts.iter().map(|xt| self.pool.validated_pool().api().hash_and_length(xt).0), "[{:?}] view::submit_many at:{}", self.at.hash); + self.pool.submit_at(&self.at, source, xts).await } else { - self.pool.submit_at(&self.at, xts).await + self.pool.submit_at(&self.at, source, xts).await } } /// Import a single extrinsic and starts to watch its progress in the view. pub(super) async fn submit_and_watch( &self, - source: TimedTransactionSource, + source: TransactionSource, xt: ExtrinsicFor, ) -> Result, ExtrinsicHash>, ChainApi::Error> { log::trace!(target: LOG_TARGET, "[{:?}] view::submit_and_watch at:{}", self.pool.validated_pool().api().hash_and_length(&xt).0, self.at.hash); @@ -192,7 +193,7 @@ where .api() .validate_transaction_blocking( self.at.hash, - sc_transaction_pool_api::TransactionSource::Local, + TransactionSource::Local, Arc::from(xt.clone()), )? .map_err(|e| { @@ -213,7 +214,7 @@ where let validated = ValidatedTransaction::valid_at( block_number.saturated_into::(), hash, - TimedTransactionSource::new_local(true), + TransactionSource::Local, Arc::from(xt), length, validity, @@ -284,7 +285,7 @@ where } _ = async { if let Some(tx) = batch_iter.next() { - let validation_result = (api.validate_transaction(self.at.hash, tx.source.clone().into(), tx.data.clone()).await, tx.hash, tx); + let validation_result = (api.validate_transaction(self.at.hash, tx.source, tx.data.clone()).await, tx.hash, tx); validation_results.push(validation_result); } else { self.revalidation_worker_channels.lock().as_mut().map(|ch| ch.remove_sender()); @@ -323,7 +324,7 @@ where ValidatedTransaction::valid_at( self.at.number.saturated_into::(), tx_hash, - tx.source.clone(), + tx.source, tx.data.clone(), api.hash_and_length(&tx.data).1, validity, @@ -454,10 +455,4 @@ where ); } } - - /// Returns true if the transaction with given hash is already imported into the view. - pub(super) fn is_imported(&self, tx_hash: &ExtrinsicHash) -> bool { - const IGNORE_BANNED: bool = false; - self.pool.validated_pool().check_is_known(tx_hash, IGNORE_BANNED).is_err() - } } diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs index a06c051f0a7e..413fca223242 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs @@ -24,51 +24,17 @@ use super::{ }; use crate::{ fork_aware_txpool::dropped_watcher::MultiViewDroppedWatcherController, - graph::{ - self, - base_pool::{TimedTransactionSource, Transaction}, - ExtrinsicFor, ExtrinsicHash, TransactionFor, - }, + graph, + graph::{base_pool::Transaction, ExtrinsicFor, ExtrinsicHash, TransactionFor}, ReadyIteratorFor, LOG_TARGET, }; use futures::prelude::*; use itertools::Itertools; use parking_lot::RwLock; -use sc_transaction_pool_api::{error::Error as PoolError, PoolStatus}; +use sc_transaction_pool_api::{error::Error as PoolError, PoolStatus, TransactionSource}; use sp_blockchain::TreeRoute; use sp_runtime::{generic::BlockId, traits::Block as BlockT}; -use std::{ - collections::{hash_map::Entry, HashMap}, - sync::Arc, - time::Instant, -}; - -/// Helper struct to keep the context for transaction replacements. -#[derive(Clone)] -struct PendingTxReplacement -where - ChainApi: graph::ChainApi, -{ - /// Indicates if the new transaction was already submitted to all the views in the view_store. - /// If true, it can be removed after inserting any new view. - processed: bool, - /// New transaction replacing the old one. - xt: ExtrinsicFor, - /// Source of the transaction. - source: TimedTransactionSource, - /// Inidicates if transaction is watched. - watched: bool, -} - -impl PendingTxReplacement -where - ChainApi: graph::ChainApi, -{ - /// Creates new unprocessed instance of pending transaction replacement. - fn new(xt: ExtrinsicFor, source: TimedTransactionSource, watched: bool) -> Self { - Self { processed: false, xt, source, watched } - } -} +use std::{collections::HashMap, sync::Arc, time::Instant}; /// The helper structure encapsulates all the views. pub(super) struct ViewStore @@ -96,13 +62,6 @@ where pub(super) most_recent_view: RwLock>, /// The controller of multi view dropped stream. pub(super) dropped_stream_controller: MultiViewDroppedWatcherController, - /// The map used to synchronize replacement of transactions between maintain and dropped - /// notifcication threads. It is meant to assure that replaced transaction is also removed from - /// newly built views in maintain process. - /// - /// The map's key is hash of replaced extrinsic. - pending_txs_replacements: - RwLock, PendingTxReplacement>>, } impl ViewStore @@ -124,14 +83,15 @@ where listener, most_recent_view: RwLock::from(None), dropped_stream_controller, - pending_txs_replacements: Default::default(), } } /// Imports a bunch of unverified extrinsics to every active view. pub(super) async fn submit( &self, - xts: impl IntoIterator)> + Clone, + source: TransactionSource, + xts: impl IntoIterator> + Clone, + xts_hashes: impl IntoIterator> + Clone, ) -> HashMap, ChainApi::Error>>> { let submit_futures = { let active_views = self.active_views.read(); @@ -140,7 +100,9 @@ where .map(|(_, view)| { let view = view.clone(); let xts = xts.clone(); - async move { (view.at.hash, view.submit_many(xts).await) } + self.dropped_stream_controller + .add_initial_views(xts_hashes.clone(), view.at.hash); + async move { (view.at.hash, view.submit_many(source, xts.clone()).await) } }) .collect::>() }; @@ -165,7 +127,11 @@ where let result = active_views .iter() - .map(|view| view.submit_local(xt.clone())) + .map(|view| { + self.dropped_stream_controller + .add_initial_views(std::iter::once(tx_hash), view.at.hash); + view.submit_local(xt.clone()) + }) .find_or_first(Result::is_ok); if let Some(Err(err)) = result { @@ -186,12 +152,12 @@ where pub(super) async fn submit_and_watch( &self, _at: Block::Hash, - source: TimedTransactionSource, + source: TransactionSource, xt: ExtrinsicFor, - ) -> Result, ChainApi::Error> { + ) -> Result, (ChainApi::Error, Option>)> { let tx_hash = self.api.hash_and_length(&xt).0; let Some(external_watcher) = self.listener.create_external_watcher_for_tx(tx_hash) else { - return Err(PoolError::AlreadyImported(Box::new(tx_hash)).into()) + return Err((PoolError::AlreadyImported(Box::new(tx_hash)).into(), None)) }; let submit_and_watch_futures = { let active_views = self.active_views.read(); @@ -200,7 +166,8 @@ where .map(|(_, view)| { let view = view.clone(); let xt = xt.clone(); - let source = source.clone(); + self.dropped_stream_controller + .add_initial_views(std::iter::once(tx_hash), view.at.hash); async move { match view.submit_and_watch(source, xt).await { Ok(watcher) => { @@ -224,7 +191,7 @@ where if let Some(Err(err)) = maybe_error { log::trace!(target: LOG_TARGET, "[{:?}] submit_and_watch: err: {}", tx_hash, err); - return Err(err); + return Err((err, Some(external_watcher))); }; Ok(external_watcher) @@ -303,18 +270,10 @@ where ) -> Vec, ExtrinsicFor>> { self.most_recent_view .read() - .map(|at| self.futures_at(at)) + .map(|at| self.get_view_at(at, true)) .flatten() - .unwrap_or_default() - } - - /// Returns a list of future transactions in the view at given block hash. - pub(super) fn futures_at( - &self, - at: Block::Hash, - ) -> Option, ExtrinsicFor>>> { - self.get_view_at(at, true) .map(|(v, _)| v.pool.validated_pool().pool.read().futures().cloned().collect()) + .unwrap_or_default() } /// Collects all the transactions included in the blocks on the provided `tree_route` and @@ -379,16 +338,12 @@ where /// - moved to the inactive views set (`inactive_views`), /// - removed from the multi view listeners. /// - /// The `most_recent_view` is updated with the reference to the newly inserted view. - /// - /// If there are any pending tx replacments, they are applied to the new view. + /// The `most_recent_view` is update with the reference to the newly inserted view. pub(super) async fn insert_new_view( &self, view: Arc>, tree_route: &TreeRoute, ) { - self.apply_pending_tx_replacements(view.clone()).await; - //note: most_recent_view must be synced with changes in in/active_views. { let mut most_recent_view_lock = self.most_recent_view.write(); @@ -440,10 +395,8 @@ where let mut removed_views = vec![]; { - let active_views = self.active_views.read(); - let inactive_views = self.inactive_views.read(); - - active_views + self.active_views + .read() .iter() .filter(|(hash, v)| !match finalized_number { Err(_) | Ok(None) => **hash == finalized_hash, @@ -452,8 +405,11 @@ where }) .map(|(_, v)| removed_views.push(v.at.hash)) .for_each(drop); + } - inactive_views + { + self.inactive_views + .read() .iter() .filter(|(_, v)| !match finalized_number { Err(_) | Ok(None) => false, @@ -491,48 +447,30 @@ where let finalized_xts = self.finalize_route(finalized_hash, tree_route).await; let finalized_number = self.api.block_id_to_number(&BlockId::Hash(finalized_hash)); - let mut dropped_views = vec![]; //clean up older then finalized { let mut active_views = self.active_views.write(); - let mut inactive_views = self.inactive_views.write(); - active_views.retain(|hash, v| { - let retain = match finalized_number { - Err(_) | Ok(None) => *hash == finalized_hash, - Ok(Some(n)) if v.at.number == n => *hash == finalized_hash, - Ok(Some(n)) => v.at.number > n, - }; - if !retain { - dropped_views.push(*hash); - } - retain + active_views.retain(|hash, v| match finalized_number { + Err(_) | Ok(None) => *hash == finalized_hash, + Ok(Some(n)) if v.at.number == n => *hash == finalized_hash, + Ok(Some(n)) => v.at.number > n, }); + } - inactive_views.retain(|hash, v| { - let retain = match finalized_number { - Err(_) | Ok(None) => false, - Ok(Some(n)) => v.at.number >= n, - }; - if !retain { - dropped_views.push(*hash); - } - retain + { + let mut inactive_views = self.inactive_views.write(); + inactive_views.retain(|_, v| match finalized_number { + Err(_) | Ok(None) => false, + Ok(Some(n)) => v.at.number >= n, }); log::trace!(target:LOG_TARGET,"handle_finalized: inactive_views: {:?}", inactive_views.keys()); } - log::trace!(target:LOG_TARGET,"handle_finalized: dropped_views: {:?}", dropped_views); - + self.listener.remove_view(finalized_hash); self.listener.remove_stale_controllers(); self.dropped_stream_controller.remove_finalized_txs(finalized_xts.clone()); - self.listener.remove_view(finalized_hash); - for view in dropped_views { - self.listener.remove_view(view); - self.dropped_stream_controller.remove_view(view); - } - finalized_xts } @@ -555,139 +493,4 @@ where futures::future::join_all(finish_revalidation_futures).await; log::trace!(target:LOG_TARGET,"finish_background_revalidations took {:?}", start.elapsed()); } - - /// Replaces an existing transaction in the view_store with a new one. - /// - /// Attempts to replace a transaction identified by `replaced` with a new transaction `xt`. - /// - /// Before submitting a transaction to the views, the new *unprocessed* transaction replacement - /// record will be inserted into a pending replacement map. Once the submission to all the views - /// is accomplished, the record is marked as *processed*. - /// - /// This map is later applied in `insert_new_view` method executed from different thread. - /// - /// If the transaction is already being replaced, it will simply return without making - /// changes. - pub(super) async fn replace_transaction( - &self, - source: TimedTransactionSource, - xt: ExtrinsicFor, - replaced: ExtrinsicHash, - watched: bool, - ) { - if let Entry::Vacant(entry) = self.pending_txs_replacements.write().entry(replaced) { - entry.insert(PendingTxReplacement::new(xt.clone(), source.clone(), watched)); - } else { - return - }; - - let xt_hash = self.api.hash_and_length(&xt).0; - log::trace!(target:LOG_TARGET,"[{replaced:?}] replace_transaction wtih {xt_hash:?}, w:{watched}"); - - self.replace_transaction_in_views(source, xt, xt_hash, replaced, watched).await; - - if let Some(replacement) = self.pending_txs_replacements.write().get_mut(&replaced) { - replacement.processed = true; - } - } - - /// Applies pending transaction replacements to the specified view. - /// - /// After application, all already processed replacements are removed. - async fn apply_pending_tx_replacements(&self, view: Arc>) { - let mut futures = vec![]; - for replacement in self.pending_txs_replacements.read().values() { - let xt_hash = self.api.hash_and_length(&replacement.xt).0; - futures.push(self.replace_transaction_in_view( - view.clone(), - replacement.source.clone(), - replacement.xt.clone(), - xt_hash, - replacement.watched, - )); - } - let _results = futures::future::join_all(futures).await; - self.pending_txs_replacements.write().retain(|_, r| r.processed); - } - - /// Submits `xt` to the given view. - /// - /// For watched transaction stream is added to the listener. - async fn replace_transaction_in_view( - &self, - view: Arc>, - source: TimedTransactionSource, - xt: ExtrinsicFor, - xt_hash: ExtrinsicHash, - watched: bool, - ) { - if watched { - match view.submit_and_watch(source, xt).await { - Ok(watcher) => { - self.listener.add_view_watcher_for_tx( - xt_hash, - view.at.hash, - watcher.into_stream().boxed(), - ); - }, - Err(e) => { - log::trace!( - target:LOG_TARGET, - "[{:?}] replace_transaction: submit_and_watch to {} failed {}", - xt_hash, view.at.hash, e - ); - }, - } - } else { - if let Some(Err(e)) = view.submit_many(std::iter::once((source, xt))).await.pop() { - log::trace!( - target:LOG_TARGET, - "[{:?}] replace_transaction: submit to {} failed {}", - xt_hash, view.at.hash, e - ); - } - } - } - - /// Sends `xt` to every view (both active and inactive) containing `replaced` extrinsics. - /// - /// It is assumed that transaction is already known by the pool. Intended to ba called when `xt` - /// is replacing `replaced` extrinsic. - async fn replace_transaction_in_views( - &self, - source: TimedTransactionSource, - xt: ExtrinsicFor, - xt_hash: ExtrinsicHash, - replaced: ExtrinsicHash, - watched: bool, - ) { - if watched && !self.listener.contains_tx(&xt_hash) { - log::trace!( - target:LOG_TARGET, - "error: replace_transaction_in_views: no listener for watched transaction {:?}", - xt_hash, - ); - return; - } - - let submit_futures = { - let active_views = self.active_views.read(); - let inactive_views = self.inactive_views.read(); - active_views - .iter() - .chain(inactive_views.iter()) - .filter(|(_, view)| view.is_imported(&replaced)) - .map(|(_, view)| { - self.replace_transaction_in_view( - view.clone(), - source.clone(), - xt.clone(), - xt_hash, - watched, - ) - }) - .collect::>() - }; - let _results = futures::future::join_all(submit_futures).await; - } } diff --git a/substrate/client/transaction-pool/src/graph/base_pool.rs b/substrate/client/transaction-pool/src/graph/base_pool.rs index 04eaa998f42e..e4c3a6c425a9 100644 --- a/substrate/client/transaction-pool/src/graph/base_pool.rs +++ b/substrate/client/transaction-pool/src/graph/base_pool.rs @@ -20,7 +20,7 @@ //! //! For a more full-featured pool, have a look at the `pool` module. -use std::{cmp::Ordering, collections::HashSet, fmt, hash, sync::Arc, time::Instant}; +use std::{cmp::Ordering, collections::HashSet, fmt, hash, sync::Arc}; use crate::LOG_TARGET; use log::{trace, warn}; @@ -30,8 +30,8 @@ use sp_core::hexdisplay::HexDisplay; use sp_runtime::{ traits::Member, transaction_validity::{ - TransactionLongevity as Longevity, TransactionPriority as Priority, TransactionSource, - TransactionTag as Tag, + TransactionLongevity as Longevity, TransactionPriority as Priority, + TransactionSource as Source, TransactionTag as Tag, }, }; @@ -83,44 +83,6 @@ pub struct PruneStatus { pub pruned: Vec>>, } -/// A transaction source that includes a timestamp indicating when the transaction was submitted. -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct TimedTransactionSource { - /// The original source of the transaction. - pub source: TransactionSource, - - /// The time at which the transaction was submitted. - pub timestamp: Option, -} - -impl From for TransactionSource { - fn from(value: TimedTransactionSource) -> Self { - value.source - } -} - -impl TimedTransactionSource { - /// Creates a new instance with an internal `TransactionSource::InBlock` source and an optional - /// timestamp. - pub fn new_in_block(with_timestamp: bool) -> Self { - Self { source: TransactionSource::InBlock, timestamp: with_timestamp.then(Instant::now) } - } - /// Creates a new instance with an internal `TransactionSource::External` source and an optional - /// timestamp. - pub fn new_external(with_timestamp: bool) -> Self { - Self { source: TransactionSource::External, timestamp: with_timestamp.then(Instant::now) } - } - /// Creates a new instance with an internal `TransactionSource::Local` source and an optional - /// timestamp. - pub fn new_local(with_timestamp: bool) -> Self { - Self { source: TransactionSource::Local, timestamp: with_timestamp.then(Instant::now) } - } - /// Creates a new instance with an given source and an optional timestamp. - pub fn from_transaction_source(source: TransactionSource, with_timestamp: bool) -> Self { - Self { source, timestamp: with_timestamp.then(Instant::now) } - } -} - /// Immutable transaction #[derive(PartialEq, Eq, Clone)] pub struct Transaction { @@ -140,8 +102,8 @@ pub struct Transaction { pub provides: Vec, /// Should that transaction be propagated. pub propagate: bool, - /// Timed source of that transaction. - pub source: TimedTransactionSource, + /// Source of that transaction. + pub source: Source, } impl AsRef for Transaction { @@ -195,7 +157,7 @@ impl Transaction { bytes: self.bytes, hash: self.hash.clone(), priority: self.priority, - source: self.source.clone(), + source: self.source, valid_till: self.valid_till, requires: self.requires.clone(), provides: self.provides.clone(), @@ -360,36 +322,22 @@ impl BasePool { if !first { - promoted.push(current_hash.clone()); + promoted.push(current_hash); } - // If there were conflicting future transactions promoted, removed them from - // promoted set. - promoted.retain(|hash| replaced.iter().all(|tx| *hash != tx.hash)); // The transactions were removed from the ready pool. We might attempt to // re-import them. removed.append(&mut replaced); }, - Err(e @ error::Error::TooLowPriority { .. }) => - if first { - trace!(target: LOG_TARGET, "[{:?}] Error importing {first}: {:?}", current_tx.hash, e); - return Err(e) - } else { - trace!(target: LOG_TARGET, "[{:?}] Error importing {first}: {:?}", current_tx.hash, e); - removed.push(current_tx); - promoted.retain(|hash| *hash != current_hash); - }, // transaction failed to be imported. Err(e) => if first { - trace!(target: LOG_TARGET, "[{:?}] Error importing {first}: {:?}", current_tx.hash, e); + trace!(target: LOG_TARGET, "[{:?}] Error importing: {:?}", current_hash, e); return Err(e) } else { - trace!(target: LOG_TARGET, "[{:?}] Error importing {first}: {:?}", current_tx.hash, e); - failed.push(current_tx.hash.clone()); + failed.push(current_hash); }, } first = false; @@ -486,24 +434,8 @@ impl BasePool Some(current.clone()), - Some(worst) => Some( - match (worst.transaction.source.timestamp, current.transaction.source.timestamp) - { - (Some(worst_timestamp), Some(current_timestamp)) => { - if worst_timestamp > current_timestamp { - current.clone() - } else { - worst - } - }, - _ => - if worst.imported_at > current.imported_at { - current.clone() - } else { - worst - }, - }, - ), + Some(ref tx) if tx.imported_at > current.imported_at => Some(current.clone()), + other => other, }); if let Some(worst) = worst { @@ -630,7 +562,7 @@ mod tests { requires: vec![], provides: vec![], propagate: true, - source: TimedTransactionSource::new_external(false), + source: Source::External, } } @@ -828,58 +760,6 @@ mod tests { ); } - #[test] - fn should_remove_conflicting_future() { - let mut pool = pool(); - pool.import(Transaction { - data: vec![3u8].into(), - hash: 3, - requires: vec![vec![1]], - priority: 50u64, - provides: vec![vec![3]], - ..default_tx().clone() - }) - .unwrap(); - assert_eq!(pool.ready().count(), 0); - assert_eq!(pool.ready.len(), 0); - - let tx2 = Transaction { - data: vec![2u8].into(), - hash: 2, - requires: vec![vec![1]], - provides: vec![vec![3]], - ..default_tx().clone() - }; - pool.import(tx2.clone()).unwrap(); - assert_eq!(pool.future.len(), 2); - - let res = pool - .import(Transaction { - data: vec![1u8].into(), - hash: 1, - provides: vec![vec![1]], - ..default_tx().clone() - }) - .unwrap(); - - assert_eq!( - res, - Imported::Ready { - hash: 1, - promoted: vec![3], - failed: vec![], - removed: vec![tx2.into()] - } - ); - - let mut it = pool.ready().into_iter().map(|tx| tx.data[0]); - assert_eq!(it.next(), Some(1)); - assert_eq!(it.next(), Some(3)); - assert_eq!(it.next(), None); - - assert_eq!(pool.future.len(), 0); - } - #[test] fn should_handle_a_cycle() { // given @@ -903,14 +783,14 @@ mod tests { assert_eq!(pool.ready.len(), 0); // when - let tx2 = Transaction { + pool.import(Transaction { data: vec![2u8].into(), hash: 2, requires: vec![vec![2]], provides: vec![vec![0]], ..default_tx().clone() - }; - pool.import(tx2.clone()).unwrap(); + }) + .unwrap(); // then { @@ -937,12 +817,7 @@ mod tests { assert_eq!(it.next(), None); assert_eq!( res, - Imported::Ready { - hash: 4, - promoted: vec![1, 3], - failed: vec![], - removed: vec![tx2.into()] - } + Imported::Ready { hash: 4, promoted: vec![1, 3], failed: vec![2], removed: vec![] } ); assert_eq!(pool.future.len(), 0); } @@ -1149,7 +1024,7 @@ mod tests { ), "Transaction { \ hash: 4, priority: 1000, valid_till: 64, bytes: 1, propagate: true, \ -source: TimedTransactionSource { source: TransactionSource::External, timestamp: None }, requires: [03, 02], provides: [04], data: [4]}" +source: TransactionSource::External, requires: [03, 02], provides: [04], data: [4]}" .to_owned() ); } diff --git a/substrate/client/transaction-pool/src/graph/listener.rs b/substrate/client/transaction-pool/src/graph/listener.rs index 41daf5491f70..a5593920eec4 100644 --- a/substrate/client/transaction-pool/src/graph/listener.rs +++ b/substrate/client/transaction-pool/src/graph/listener.rs @@ -36,7 +36,6 @@ pub type DroppedByLimitsStream = TracingUnboundedReceiver { - /// Map containing per-transaction sinks for emitting transaction status events. watchers: HashMap>>, finality_watchers: LinkedHashMap, Vec>, @@ -120,44 +119,32 @@ impl Listener, limits_enforced: bool) { trace!(target: LOG_TARGET, "[{:?}] Dropped (replaced with {:?})", tx, by); - self.fire(tx, |watcher| watcher.usurped(by.clone())); - - if let Some(ref sink) = self.dropped_by_limits_sink { - if let Err(e) = - sink.unbounded_send((tx.clone(), TransactionStatus::Usurped(by.clone()))) - { - trace!(target: LOG_TARGET, "[{:?}] dropped_sink: send message failed: {:?}", tx, e); + self.fire(tx, |watcher| match by { + Some(t) => watcher.usurped(t.clone()), + None => watcher.dropped(), + }); + + //note: LimitEnforced could be introduced as new status to get rid of this flag. + if limits_enforced { + if let Some(ref sink) = self.dropped_by_limits_sink { + if let Err(e) = sink.unbounded_send((tx.clone(), TransactionStatus::Dropped)) { + trace!(target: LOG_TARGET, "[{:?}] dropped_sink/future: send message failed: {:?}", tx, e); + } } } } - /// Transaction was dropped from the pool because of the failure during the resubmission of - /// revalidate transactions or failure during pruning tags. - pub fn dropped(&mut self, tx: &H) { - trace!(target: LOG_TARGET, "[{:?}] Dropped", tx); - self.fire(tx, |watcher| watcher.dropped()); - } - /// Transaction was removed as invalid. pub fn invalid(&mut self, tx: &H) { trace!(target: LOG_TARGET, "[{:?}] Extrinsic invalid", tx); diff --git a/substrate/client/transaction-pool/src/graph/mod.rs b/substrate/client/transaction-pool/src/graph/mod.rs index d93898b1b22a..c1225d7356d9 100644 --- a/substrate/client/transaction-pool/src/graph/mod.rs +++ b/substrate/client/transaction-pool/src/graph/mod.rs @@ -31,7 +31,7 @@ mod listener; mod pool; mod ready; mod rotator; -pub(crate) mod tracked_map; +mod tracked_map; mod validated_pool; pub mod base_pool; diff --git a/substrate/client/transaction-pool/src/graph/pool.rs b/substrate/client/transaction-pool/src/graph/pool.rs index ff9cc1541af4..2dd8de352c6b 100644 --- a/substrate/client/transaction-pool/src/graph/pool.rs +++ b/substrate/client/transaction-pool/src/graph/pool.rs @@ -181,8 +181,10 @@ impl Pool { pub async fn submit_at( &self, at: &HashAndNumber, - xts: impl IntoIterator)>, + source: TransactionSource, + xts: impl IntoIterator>, ) -> Vec, B::Error>> { + let xts = xts.into_iter().map(|xt| (source, xt)); let validated_transactions = self.verify(at, xts, CheckBannedBeforeVerify::Yes).await; self.validated_pool.submit(validated_transactions.into_values()) } @@ -193,8 +195,10 @@ impl Pool { pub async fn resubmit_at( &self, at: &HashAndNumber, - xts: impl IntoIterator)>, + source: TransactionSource, + xts: impl IntoIterator>, ) -> Vec, B::Error>> { + let xts = xts.into_iter().map(|xt| (source, xt)); let validated_transactions = self.verify(at, xts, CheckBannedBeforeVerify::No).await; self.validated_pool.submit(validated_transactions.into_values()) } @@ -203,10 +207,10 @@ impl Pool { pub async fn submit_one( &self, at: &HashAndNumber, - source: base::TimedTransactionSource, + source: TransactionSource, xt: ExtrinsicFor, ) -> Result, B::Error> { - let res = self.submit_at(at, std::iter::once((source, xt))).await.pop(); + let res = self.submit_at(at, source, std::iter::once(xt)).await.pop(); res.expect("One extrinsic passed; one result returned; qed") } @@ -214,7 +218,7 @@ impl Pool { pub async fn submit_and_watch( &self, at: &HashAndNumber, - source: base::TimedTransactionSource, + source: TransactionSource, xt: ExtrinsicFor, ) -> Result, ExtrinsicHash>, B::Error> { let (_, tx) = self @@ -364,7 +368,7 @@ impl Pool { // Try to re-validate pruned transactions since some of them might be still valid. // note that `known_imported_hashes` will be rejected here due to temporary ban. let pruned_transactions = - prune_status.pruned.into_iter().map(|tx| (tx.source.clone(), tx.data.clone())); + prune_status.pruned.into_iter().map(|tx| (tx.source, tx.data.clone())); let reverified_transactions = self.verify(at, pruned_transactions, CheckBannedBeforeVerify::Yes).await; @@ -392,7 +396,7 @@ impl Pool { async fn verify( &self, at: &HashAndNumber, - xts: impl IntoIterator)>, + xts: impl IntoIterator)>, check: CheckBannedBeforeVerify, ) -> IndexMap, ValidatedTransactionFor> { let HashAndNumber { number, hash } = *at; @@ -413,7 +417,7 @@ impl Pool { &self, block_hash: ::Hash, block_number: NumberFor, - source: base::TimedTransactionSource, + source: TransactionSource, xt: ExtrinsicFor, check: CheckBannedBeforeVerify, ) -> (ExtrinsicHash, ValidatedTransactionFor) { @@ -427,7 +431,7 @@ impl Pool { let validation_result = self .validated_pool .api() - .validate_transaction(block_hash, source.clone().into(), xt.clone()) + .validate_transaction(block_hash, source, xt.clone()) .await; let status = match validation_result { @@ -484,7 +488,6 @@ mod tests { use super::{super::base_pool::Limit, *}; use crate::common::tests::{pool, uxt, TestApi, INVALID_NONCE}; use assert_matches::assert_matches; - use base::TimedTransactionSource; use codec::Encode; use futures::executor::block_on; use parking_lot::Mutex; @@ -492,10 +495,9 @@ mod tests { use sp_runtime::transaction_validity::TransactionSource; use std::{collections::HashMap, time::Instant}; use substrate_test_runtime::{AccountId, ExtrinsicBuilder, Transfer, H256}; - use substrate_test_runtime_client::Sr25519Keyring::{Alice, Bob}; + use substrate_test_runtime_client::AccountKeyring::{Alice, Bob}; - const SOURCE: TimedTransactionSource = - TimedTransactionSource { source: TransactionSource::External, timestamp: None }; + const SOURCE: TransactionSource = TransactionSource::External; #[test] fn should_validate_and_import_transaction() { @@ -543,8 +545,8 @@ mod tests { let initial_hashes = txs.iter().map(|t| api.hash_and_length(t).0).collect::>(); // when - let txs = txs.into_iter().map(|x| (SOURCE, Arc::from(x))).collect::>(); - let hashes = block_on(pool.submit_at(&api.expect_hash_and_number(0), txs)); + let txs = txs.into_iter().map(|x| Arc::from(x)).collect::>(); + let hashes = block_on(pool.submit_at(&api.expect_hash_and_number(0), SOURCE, txs)); log::debug!("--> {hashes:#?}"); // then diff --git a/substrate/client/transaction-pool/src/graph/ready.rs b/substrate/client/transaction-pool/src/graph/ready.rs index 9061d0e25581..860bcff0bace 100644 --- a/substrate/client/transaction-pool/src/graph/ready.rs +++ b/substrate/client/transaction-pool/src/graph/ready.rs @@ -589,6 +589,7 @@ fn remove_item(vec: &mut Vec, item: &T) { #[cfg(test)] mod tests { use super::*; + use sp_runtime::transaction_validity::TransactionSource as Source; fn tx(id: u8) -> Transaction> { Transaction { @@ -600,7 +601,7 @@ mod tests { requires: vec![vec![1], vec![2]], provides: vec![vec![3], vec![4]], propagate: true, - source: crate::TimedTransactionSource::new_external(false), + source: Source::External, } } @@ -710,7 +711,7 @@ mod tests { requires: vec![tx1.provides[0].clone()], provides: vec![], propagate: true, - source: crate::TimedTransactionSource::new_external(false), + source: Source::External, }; // when diff --git a/substrate/client/transaction-pool/src/graph/rotator.rs b/substrate/client/transaction-pool/src/graph/rotator.rs index 9a2e269b5eed..61a26fb4138c 100644 --- a/substrate/client/transaction-pool/src/graph/rotator.rs +++ b/substrate/client/transaction-pool/src/graph/rotator.rs @@ -106,6 +106,7 @@ impl PoolRotator { #[cfg(test)] mod tests { use super::*; + use sp_runtime::transaction_validity::TransactionSource; type Hash = u64; type Ex = (); @@ -125,7 +126,7 @@ mod tests { requires: vec![], provides: vec![], propagate: true, - source: crate::TimedTransactionSource::new_external(false), + source: TransactionSource::External, }; (hash, tx) @@ -191,7 +192,7 @@ mod tests { requires: vec![], provides: vec![], propagate: true, - source: crate::TimedTransactionSource::new_external(false), + source: TransactionSource::External, } } diff --git a/substrate/client/transaction-pool/src/graph/tracked_map.rs b/substrate/client/transaction-pool/src/graph/tracked_map.rs index 6c3bbbf34b55..9e92dffc9f96 100644 --- a/substrate/client/transaction-pool/src/graph/tracked_map.rs +++ b/substrate/client/transaction-pool/src/graph/tracked_map.rs @@ -18,7 +18,7 @@ use parking_lot::{RwLock, RwLockReadGuard, RwLockWriteGuard}; use std::{ - collections::{hash_map::Iter, HashMap}, + collections::HashMap, sync::{ atomic::{AtomicIsize, Ordering as AtomicOrdering}, Arc, @@ -101,30 +101,20 @@ impl<'a, K, V> TrackedMapReadAccess<'a, K, V> where K: Eq + std::hash::Hash, { - /// Returns true if the map contains given key. + /// Returns true if map contains key. pub fn contains_key(&self, key: &K) -> bool { self.inner_guard.contains_key(key) } - /// Returns the reference to the contained value by key, if exists. + /// Returns reference to the contained value by key, if exists. pub fn get(&self, key: &K) -> Option<&V> { self.inner_guard.get(key) } - /// Returns an iterator over all values. + /// Returns iterator over all values. pub fn values(&self) -> std::collections::hash_map::Values { self.inner_guard.values() } - - /// Returns the number of elements in the map. - pub fn len(&self) -> usize { - self.inner_guard.len() - } - - /// Returns an iterator over all key-value pairs. - pub fn iter(&self) -> Iter<'_, K, V> { - self.inner_guard.iter() - } } pub struct TrackedMapWriteAccess<'a, K, V> { @@ -159,20 +149,10 @@ where val } - /// Returns `true` if the inner map contains a value for the specified key. - pub fn contains_key(&self, key: &K) -> bool { - self.inner_guard.contains_key(key) - } - /// Returns mutable reference to the contained value by key, if exists. pub fn get_mut(&mut self, key: &K) -> Option<&mut V> { self.inner_guard.get_mut(key) } - - /// Returns the number of elements in the map. - pub fn len(&mut self) -> usize { - self.inner_guard.len() - } } #[cfg(test)] diff --git a/substrate/client/transaction-pool/src/graph/validated_pool.rs b/substrate/client/transaction-pool/src/graph/validated_pool.rs index 14df63d9673e..d7f55198a40a 100644 --- a/substrate/client/transaction-pool/src/graph/validated_pool.rs +++ b/substrate/client/transaction-pool/src/graph/validated_pool.rs @@ -30,7 +30,7 @@ use serde::Serialize; use sp_blockchain::HashAndNumber; use sp_runtime::{ traits::{self, SaturatedConversion}, - transaction_validity::{TransactionTag as Tag, ValidTransaction}, + transaction_validity::{TransactionSource, TransactionTag as Tag, ValidTransaction}, }; use std::time::Instant; @@ -62,7 +62,7 @@ impl ValidatedTransaction { pub fn valid_at( at: u64, hash: Hash, - source: base::TimedTransactionSource, + source: TransactionSource, data: Ex, bytes: usize, validity: ValidTransaction, @@ -280,7 +280,7 @@ impl ValidatedPool { // run notifications let mut listener = self.listener.write(); for h in &removed { - listener.limit_enforced(h); + listener.dropped(h, None, true); } removed @@ -453,7 +453,7 @@ impl ValidatedPool { match final_status { Status::Future => listener.future(&hash), Status::Ready => listener.ready(&hash, None), - Status::Dropped => listener.dropped(&hash), + Status::Dropped => listener.dropped(&hash, None, false), Status::Failed => listener.invalid(&hash), } } @@ -492,7 +492,7 @@ impl ValidatedPool { fire_events(&mut *listener, promoted); } for f in &status.failed { - listener.dropped(f); + listener.dropped(f, None, false); } } @@ -671,21 +671,6 @@ impl ValidatedPool { ) -> super::listener::DroppedByLimitsStream, BlockHash> { self.listener.write().create_dropped_by_limits_stream() } - - /// Resends ready and future events for all the ready and future transactions that are already - /// in the pool. - /// - /// Intended to be called after cloning the instance of `ValidatedPool`. - pub fn retrigger_notifications(&self) { - let pool = self.pool.read(); - let mut listener = self.listener.write(); - pool.ready().for_each(|r| { - listener.ready(&r.hash, None); - }); - pool.futures().for_each(|f| { - listener.future(&f.hash); - }); - } } fn fire_events(listener: &mut Listener, imported: &base::Imported) @@ -697,7 +682,7 @@ where base::Imported::Ready { ref promoted, ref failed, ref removed, ref hash } => { listener.ready(hash, None); failed.iter().for_each(|f| listener.invalid(f)); - removed.iter().for_each(|r| listener.usurped(&r.hash, hash)); + removed.iter().for_each(|r| listener.dropped(&r.hash, Some(hash), false)); promoted.iter().for_each(|p| listener.ready(p, None)); }, base::Imported::Future { ref hash } => listener.future(hash), diff --git a/substrate/client/transaction-pool/src/graph/watcher.rs b/substrate/client/transaction-pool/src/graph/watcher.rs index 2fd31e772fd8..fb7cf99d4dc6 100644 --- a/substrate/client/transaction-pool/src/graph/watcher.rs +++ b/substrate/client/transaction-pool/src/graph/watcher.rs @@ -113,12 +113,6 @@ impl Sender { } /// Transaction has been dropped from the pool because of the limit. - pub fn limit_enforced(&mut self) { - self.send(TransactionStatus::Dropped); - self.is_finalized = true; - } - - /// Transaction has been dropped from the pool. pub fn dropped(&mut self) { self.send(TransactionStatus::Dropped); self.is_finalized = true; diff --git a/substrate/client/transaction-pool/src/lib.rs b/substrate/client/transaction-pool/src/lib.rs index 366d91a973d2..888d25d3a0d2 100644 --- a/substrate/client/transaction-pool/src/lib.rs +++ b/substrate/client/transaction-pool/src/lib.rs @@ -30,16 +30,13 @@ mod single_state_txpool; mod transaction_pool_wrapper; use common::{api, enactment_state}; -use std::sync::Arc; +use std::{future::Future, pin::Pin, sync::Arc}; pub use api::FullChainApi; pub use builder::{Builder, TransactionPoolHandle, TransactionPoolOptions, TransactionPoolType}; pub use common::notification_future; pub use fork_aware_txpool::{ForkAwareTxPool, ForkAwareTxPoolTask}; -pub use graph::{ - base_pool::{Limit as PoolLimit, TimedTransactionSource}, - ChainApi, Options, Pool, -}; +pub use graph::{base_pool::Limit as PoolLimit, ChainApi, Options, Pool}; use single_state_txpool::prune_known_txs_for_block; pub use single_state_txpool::{BasicPool, RevalidationType}; pub use transaction_pool_wrapper::TransactionPoolWrapper; @@ -53,6 +50,8 @@ type BoxedReadyIterator = Box< type ReadyIteratorFor = BoxedReadyIterator, graph::ExtrinsicFor>; +type PolledIterator = Pin> + Send>>; + /// Log target for transaction pool. /// /// It can be used by other components for logging functionality strictly related to txpool (e.g. diff --git a/substrate/client/transaction-pool/src/single_state_txpool/revalidation.rs b/substrate/client/transaction-pool/src/single_state_txpool/revalidation.rs index f22fa2ddabde..5ef726c9f7d3 100644 --- a/substrate/client/transaction-pool/src/single_state_txpool/revalidation.rs +++ b/substrate/client/transaction-pool/src/single_state_txpool/revalidation.rs @@ -88,7 +88,7 @@ async fn batch_revalidate( let validation_results = futures::future::join_all(batch.into_iter().filter_map(|ext_hash| { pool.validated_pool().ready_by_hash(&ext_hash).map(|ext| { - api.validate_transaction(at, ext.source.clone().into(), ext.data.clone()) + api.validate_transaction(at, ext.source, ext.data.clone()) .map(move |validation_result| (validation_result, ext_hash, ext)) }) })) @@ -121,7 +121,7 @@ async fn batch_revalidate( ValidatedTransaction::valid_at( block_number.saturated_into::(), ext_hash, - ext.source.clone(), + ext.source, ext.data.clone(), api.hash_and_length(&ext.data).1, validity, @@ -375,11 +375,11 @@ mod tests { use crate::{ common::tests::{uxt, TestApi}, graph::Pool, - TimedTransactionSource, }; use futures::executor::block_on; + use sc_transaction_pool_api::TransactionSource; use substrate_test_runtime::{AccountId, Transfer, H256}; - use substrate_test_runtime_client::Sr25519Keyring::{Alice, Bob}; + use substrate_test_runtime_client::AccountKeyring::{Alice, Bob}; #[test] fn revalidation_queue_works() { @@ -398,7 +398,7 @@ mod tests { let uxt_hash = block_on(pool.submit_one( &han_of_block0, - TimedTransactionSource::new_external(false), + TransactionSource::External, uxt.clone().into(), )) .expect("Should be valid"); @@ -433,15 +433,14 @@ mod tests { let han_of_block0 = api.expect_hash_and_number(0); let unknown_block = H256::repeat_byte(0x13); - let source = TimedTransactionSource::new_external(false); - let uxt_hashes = - block_on(pool.submit_at( - &han_of_block0, - vec![(source.clone(), uxt0.into()), (source, uxt1.into())], - )) - .into_iter() - .map(|r| r.expect("Should be valid")) - .collect::>(); + let uxt_hashes = block_on(pool.submit_at( + &han_of_block0, + TransactionSource::External, + vec![uxt0.into(), uxt1.into()], + )) + .into_iter() + .map(|r| r.expect("Should be valid")) + .collect::>(); assert_eq!(api.validation_requests().len(), 2); assert_eq!(pool.validated_pool().status().ready, 2); diff --git a/substrate/client/transaction-pool/src/single_state_txpool/single_state_txpool.rs b/substrate/client/transaction-pool/src/single_state_txpool/single_state_txpool.rs index e7504012ca67..0826b95cf070 100644 --- a/substrate/client/transaction-pool/src/single_state_txpool/single_state_txpool.rs +++ b/substrate/client/transaction-pool/src/single_state_txpool/single_state_txpool.rs @@ -29,8 +29,9 @@ use crate::{ error, log_xt::log_xt_trace, }, - graph::{self, base_pool::TimedTransactionSource, ExtrinsicHash, IsValidator}, - ReadyIteratorFor, LOG_TARGET, + graph, + graph::{ExtrinsicHash, IsValidator}, + PolledIterator, ReadyIteratorFor, LOG_TARGET, }; use async_trait::async_trait; use futures::{channel::oneshot, future, prelude::*, Future, FutureExt}; @@ -38,8 +39,8 @@ use parking_lot::Mutex; use prometheus_endpoint::Registry as PrometheusRegistry; use sc_transaction_pool_api::{ error::Error as TxPoolError, ChainEvent, ImportNotificationStream, MaintainedTransactionPool, - PoolStatus, TransactionFor, TransactionPool, TransactionSource, TransactionStatusStreamFor, - TxHash, + PoolFuture, PoolStatus, TransactionFor, TransactionPool, TransactionSource, + TransactionStatusStreamFor, TxHash, }; use sp_blockchain::{HashAndNumber, TreeRoute}; use sp_core::traits::SpawnEssentialNamed; @@ -223,19 +224,26 @@ where &self.api } - async fn ready_at_with_timeout_internal( + fn ready_at_with_timeout_internal( &self, at: Block::Hash, timeout: std::time::Duration, - ) -> ReadyIteratorFor { - select! { - ready = self.ready_at(at)=> ready, - _ = futures_timer::Delay::new(timeout)=> self.ready() - } + ) -> PolledIterator { + let timeout = futures_timer::Delay::new(timeout); + let ready_maintained = self.ready_at(at); + let ready_current = self.ready(); + + let ready = async { + select! { + ready = ready_maintained => ready, + _ = timeout => ready_current + } + }; + + Box::pin(ready) } } -#[async_trait] impl TransactionPool for BasicPool where Block: BlockT, @@ -247,51 +255,51 @@ where graph::base_pool::Transaction, graph::ExtrinsicFor>; type Error = PoolApi::Error; - async fn submit_at( + fn submit_at( &self, at: ::Hash, source: TransactionSource, xts: Vec>, - ) -> Result, Self::Error>>, Self::Error> { + ) -> PoolFuture, Self::Error>>, Self::Error> { let pool = self.pool.clone(); - let xts = xts - .into_iter() - .map(|xt| { - (TimedTransactionSource::from_transaction_source(source, false), Arc::from(xt)) - }) - .collect::>(); + let xts = xts.into_iter().map(Arc::from).collect::>(); self.metrics .report(|metrics| metrics.submitted_transactions.inc_by(xts.len() as u64)); let number = self.api.resolve_block_number(at); - let at = HashAndNumber { hash: at, number: number? }; - Ok(pool.submit_at(&at, xts).await) + async move { + let at = HashAndNumber { hash: at, number: number? }; + Ok(pool.submit_at(&at, source, xts).await) + } + .boxed() } - async fn submit_one( + fn submit_one( &self, at: ::Hash, source: TransactionSource, xt: TransactionFor, - ) -> Result, Self::Error> { + ) -> PoolFuture, Self::Error> { let pool = self.pool.clone(); let xt = Arc::from(xt); self.metrics.report(|metrics| metrics.submitted_transactions.inc()); let number = self.api.resolve_block_number(at); - let at = HashAndNumber { hash: at, number: number? }; - pool.submit_one(&at, TimedTransactionSource::from_transaction_source(source, false), xt) - .await + async move { + let at = HashAndNumber { hash: at, number: number? }; + pool.submit_one(&at, source, xt).await + } + .boxed() } - async fn submit_and_watch( + fn submit_and_watch( &self, at: ::Hash, source: TransactionSource, xt: TransactionFor, - ) -> Result>>, Self::Error> { + ) -> PoolFuture>>, Self::Error> { let pool = self.pool.clone(); let xt = Arc::from(xt); @@ -299,16 +307,13 @@ where let number = self.api.resolve_block_number(at); - let at = HashAndNumber { hash: at, number: number? }; - let watcher = pool - .submit_and_watch( - &at, - TimedTransactionSource::from_transaction_source(source, false), - xt, - ) - .await?; + async move { + let at = HashAndNumber { hash: at, number: number? }; + let watcher = pool.submit_and_watch(&at, source, xt).await?; - Ok(watcher.into_stream().boxed()) + Ok(watcher.into_stream().boxed()) + } + .boxed() } fn remove_invalid(&self, hashes: &[TxHash]) -> Vec> { @@ -338,9 +343,9 @@ where self.pool.validated_pool().ready_by_hash(hash) } - async fn ready_at(&self, at: ::Hash) -> ReadyIteratorFor { + fn ready_at(&self, at: ::Hash) -> PolledIterator { let Ok(at) = self.api.resolve_block_number(at) else { - return Box::new(std::iter::empty()) as Box<_> + return async { Box::new(std::iter::empty()) as Box<_> }.boxed() }; let status = self.status(); @@ -349,23 +354,25 @@ where // There could be transaction being added because of some re-org happening at the relevant // block, but this is relative unlikely. if status.ready == 0 && status.future == 0 { - return Box::new(std::iter::empty()) as Box<_> + return async { Box::new(std::iter::empty()) as Box<_> }.boxed() } if self.ready_poll.lock().updated_at() >= at { log::trace!(target: LOG_TARGET, "Transaction pool already processed block #{}", at); let iterator: ReadyIteratorFor = Box::new(self.pool.validated_pool().ready()); - return iterator + return async move { iterator }.boxed() } - let result = self.ready_poll.lock().add(at).map(|received| { - received.unwrap_or_else(|e| { - log::warn!(target: LOG_TARGET, "Error receiving pending set: {:?}", e); - Box::new(std::iter::empty()) + self.ready_poll + .lock() + .add(at) + .map(|received| { + received.unwrap_or_else(|e| { + log::warn!(target: LOG_TARGET, "Error receiving pending set: {:?}", e); + Box::new(std::iter::empty()) + }) }) - }); - - result.await + .boxed() } fn ready(&self) -> ReadyIteratorFor { @@ -377,12 +384,12 @@ where pool.futures().cloned().collect::>() } - async fn ready_at_with_timeout( + fn ready_at_with_timeout( &self, at: ::Hash, timeout: std::time::Duration, - ) -> ReadyIteratorFor { - self.ready_at_with_timeout_internal(at, timeout).await + ) -> PolledIterator { + self.ready_at_with_timeout_internal(at, timeout) } } @@ -470,7 +477,7 @@ where let validated = ValidatedTransaction::valid_at( block_number.saturated_into::(), hash, - TimedTransactionSource::new_local(false), + TransactionSource::Local, Arc::from(xt), bytes, validity, @@ -674,8 +681,8 @@ where resubmit_transactions.extend( //todo: arctx - we need to get ref from somewhere - block_transactions.into_iter().map(Arc::from).filter_map(|tx| { - let tx_hash = pool.hash_of(&tx); + block_transactions.into_iter().map(Arc::from).filter(|tx| { + let tx_hash = pool.hash_of(tx); let contains = pruned_log.contains(&tx_hash); // need to count all transactions, not just filtered, here @@ -688,15 +695,8 @@ where tx_hash, hash, ); - Some(( - // These transactions are coming from retracted blocks, we should - // simply consider them external. - TimedTransactionSource::new_external(false), - tx, - )) - } else { - None } + !contains }), ); @@ -705,7 +705,14 @@ where }); } - pool.resubmit_at(&hash_and_number, resubmit_transactions).await; + pool.resubmit_at( + &hash_and_number, + // These transactions are coming from retracted blocks, we should + // simply consider them external. + TransactionSource::External, + resubmit_transactions, + ) + .await; } let extra_pool = pool.clone(); diff --git a/substrate/client/transaction-pool/src/transaction_pool_wrapper.rs b/substrate/client/transaction-pool/src/transaction_pool_wrapper.rs index e373c0278d80..4e1b53833b8f 100644 --- a/substrate/client/transaction-pool/src/transaction_pool_wrapper.rs +++ b/substrate/client/transaction-pool/src/transaction_pool_wrapper.rs @@ -22,16 +22,16 @@ use crate::{ builder::FullClientTransactionPool, graph::{base_pool::Transaction, ExtrinsicFor, ExtrinsicHash}, - ChainApi, FullChainApi, ReadyIteratorFor, + ChainApi, FullChainApi, }; use async_trait::async_trait; use sc_transaction_pool_api::{ ChainEvent, ImportNotificationStream, LocalTransactionFor, LocalTransactionPool, - MaintainedTransactionPool, PoolStatus, ReadyTransactions, TransactionFor, TransactionPool, - TransactionSource, TransactionStatusStreamFor, TxHash, + MaintainedTransactionPool, PoolFuture, PoolStatus, ReadyTransactions, TransactionFor, + TransactionPool, TransactionSource, TransactionStatusStreamFor, TxHash, }; use sp_runtime::traits::Block as BlockT; -use std::{collections::HashMap, pin::Pin, sync::Arc}; +use std::{collections::HashMap, future::Future, pin::Pin, sync::Arc}; /// The wrapper for actual object providing implementation of TransactionPool. /// @@ -49,7 +49,6 @@ where + 'static, Client::Api: sp_transaction_pool::runtime_api::TaggedTransactionQueue; -#[async_trait] impl TransactionPool for TransactionPoolWrapper where Block: BlockT, @@ -69,38 +68,44 @@ where >; type Error = as ChainApi>::Error; - async fn submit_at( + fn submit_at( &self, at: ::Hash, source: TransactionSource, xts: Vec>, - ) -> Result, Self::Error>>, Self::Error> { - self.0.submit_at(at, source, xts).await + ) -> PoolFuture, Self::Error>>, Self::Error> { + self.0.submit_at(at, source, xts) } - async fn submit_one( + fn submit_one( &self, at: ::Hash, source: TransactionSource, xt: TransactionFor, - ) -> Result, Self::Error> { - self.0.submit_one(at, source, xt).await + ) -> PoolFuture, Self::Error> { + self.0.submit_one(at, source, xt) } - async fn submit_and_watch( + fn submit_and_watch( &self, at: ::Hash, source: TransactionSource, xt: TransactionFor, - ) -> Result>>, Self::Error> { - self.0.submit_and_watch(at, source, xt).await + ) -> PoolFuture>>, Self::Error> { + self.0.submit_and_watch(at, source, xt) } - async fn ready_at( + fn ready_at( &self, at: ::Hash, - ) -> ReadyIteratorFor> { - self.0.ready_at(at).await + ) -> Pin< + Box< + dyn Future< + Output = Box> + Send>, + > + Send, + >, + > { + self.0.ready_at(at) } fn ready(&self) -> Box> + Send> { @@ -135,12 +140,19 @@ where self.0.ready_transaction(hash) } - async fn ready_at_with_timeout( + fn ready_at_with_timeout( &self, at: ::Hash, timeout: std::time::Duration, - ) -> ReadyIteratorFor> { - self.0.ready_at_with_timeout(at, timeout).await + ) -> Pin< + Box< + dyn Future< + Output = Box> + Send>, + > + Send + + '_, + >, + > { + self.0.ready_at_with_timeout(at, timeout) } } diff --git a/substrate/client/transaction-pool/tests/fatp.rs b/substrate/client/transaction-pool/tests/fatp.rs index 8bf08122995c..9f343a9bd029 100644 --- a/substrate/client/transaction-pool/tests/fatp.rs +++ b/substrate/client/transaction-pool/tests/fatp.rs @@ -30,7 +30,7 @@ use sc_transaction_pool_api::{ }; use sp_runtime::transaction_validity::InvalidTransaction; use std::{sync::Arc, time::Duration}; -use substrate_test_runtime_client::Sr25519Keyring::*; +use substrate_test_runtime_client::AccountKeyring::*; use substrate_test_runtime_transaction_pool::uxt; pub mod fatp_common; @@ -2267,13 +2267,19 @@ fn fatp_avoid_stuck_transaction() { assert_pool_status!(header06.hash(), &pool, 0, 0); - let header07 = api.push_block(7, vec![], true); - let event = finalized_block_event(&pool, header03.hash(), header07.hash()); - block_on(pool.maintain(event)); + // Import enough blocks to make xt4i revalidated + let mut prev_header = header03; + // wait 10 blocks for revalidation + for n in 7..=11 { + let header = api.push_block(n, vec![], true); + let event = finalized_block_event(&pool, prev_header.hash(), header.hash()); + block_on(pool.maintain(event)); + prev_header = header; + } let xt4i_events = futures::executor::block_on_stream(xt4i_watcher).collect::>(); log::debug!("xt4i_events: {:#?}", xt4i_events); - assert_eq!(xt4i_events, vec![TransactionStatus::Future, TransactionStatus::Dropped]); + assert_eq!(xt4i_events, vec![TransactionStatus::Future, TransactionStatus::Invalid]); assert_eq!(pool.mempool_len(), (0, 0)); } diff --git a/substrate/client/transaction-pool/tests/fatp_common/mod.rs b/substrate/client/transaction-pool/tests/fatp_common/mod.rs index aaffebc0db0a..63af729b8b73 100644 --- a/substrate/client/transaction-pool/tests/fatp_common/mod.rs +++ b/substrate/client/transaction-pool/tests/fatp_common/mod.rs @@ -24,7 +24,7 @@ use sp_runtime::transaction_validity::TransactionSource; use std::sync::Arc; use substrate_test_runtime_client::{ runtime::{Block, Hash, Header}, - Sr25519Keyring::*, + AccountKeyring::*, }; use substrate_test_runtime_transaction_pool::{uxt, TestApi}; pub const LOG_TARGET: &str = "txpool"; @@ -186,9 +186,9 @@ macro_rules! assert_pool_status { #[macro_export] macro_rules! assert_ready_iterator { - ($hash:expr, $pool:expr, [$( $xt:expr ),*]) => {{ + ($hash:expr, $pool:expr, [$( $xt:expr ),+]) => {{ let ready_iterator = $pool.ready_at($hash).now_or_never().unwrap(); - let expected = vec![ $($pool.api().hash_and_length(&$xt).0),*]; + let expected = vec![ $($pool.api().hash_and_length(&$xt).0),+]; let output: Vec<_> = ready_iterator.collect(); log::debug!(target:LOG_TARGET, "expected: {:#?}", expected); log::debug!(target:LOG_TARGET, "output: {:#?}", output); @@ -201,20 +201,6 @@ macro_rules! assert_ready_iterator { }}; } -#[macro_export] -macro_rules! assert_future_iterator { - ($hash:expr, $pool:expr, [$( $xt:expr ),*]) => {{ - let futures = $pool.futures_at($hash).unwrap(); - let expected = vec![ $($pool.api().hash_and_length(&$xt).0),*]; - log::debug!(target:LOG_TARGET, "expected: {:#?}", futures); - log::debug!(target:LOG_TARGET, "output: {:#?}", expected); - assert_eq!(expected.len(), futures.len()); - let hsf = futures.iter().map(|a| a.hash).collect::>(); - let hse = expected.into_iter().collect::>(); - assert_eq!(hse,hsf); - }}; -} - pub const SOURCE: TransactionSource = TransactionSource::External; #[cfg(test)] diff --git a/substrate/client/transaction-pool/tests/fatp_limits.rs b/substrate/client/transaction-pool/tests/fatp_limits.rs index fb02b21ebc2b..6fd5f93ed070 100644 --- a/substrate/client/transaction-pool/tests/fatp_limits.rs +++ b/substrate/client/transaction-pool/tests/fatp_limits.rs @@ -19,7 +19,6 @@ //! Tests of limits for fork-aware transaction pool. pub mod fatp_common; - use fatp_common::{ finalized_block_event, invalid_hash, new_best_block_event, TestPoolBuilder, LOG_TARGET, SOURCE, }; @@ -28,8 +27,7 @@ use sc_transaction_pool::ChainApi; use sc_transaction_pool_api::{ error::Error as TxPoolError, MaintainedTransactionPool, TransactionPool, TransactionStatus, }; -use std::thread::sleep; -use substrate_test_runtime_client::Sr25519Keyring::*; +use substrate_test_runtime_client::AccountKeyring::*; use substrate_test_runtime_transaction_pool::uxt; #[test] @@ -94,103 +92,25 @@ fn fatp_limits_ready_count_works() { //charlie was not included into view: assert_pool_status!(header01.hash(), &pool, 2, 0); assert_ready_iterator!(header01.hash(), pool, [xt1, xt2]); - //todo: can we do better? We don't have API to check if event was processed internally. - let mut counter = 0; - while pool.mempool_len().0 == 3 { - sleep(std::time::Duration::from_millis(1)); - counter = counter + 1; - if counter > 20 { - assert!(false, "timeout"); - } - } - assert_eq!(pool.mempool_len().0, 2); //branch with alice transactions: let header02b = api.push_block(2, vec![xt1.clone(), xt2.clone()], true); let event = new_best_block_event(&pool, Some(header01.hash()), header02b.hash()); block_on(pool.maintain(event)); - assert_eq!(pool.mempool_len().0, 2); - assert_pool_status!(header02b.hash(), &pool, 0, 0); - assert_ready_iterator!(header02b.hash(), pool, []); + assert_eq!(pool.mempool_len().0, 3); + //charlie was resubmitted from mmepool into the view: + assert_pool_status!(header02b.hash(), &pool, 1, 0); + assert_ready_iterator!(header02b.hash(), pool, [xt0]); //branch with alice/charlie transactions shall also work: let header02a = api.push_block(2, vec![xt0.clone(), xt1.clone()], true); - api.set_nonce(header02a.hash(), Alice.into(), 201); let event = new_best_block_event(&pool, Some(header02b.hash()), header02a.hash()); block_on(pool.maintain(event)); - assert_eq!(pool.mempool_len().0, 2); - // assert_pool_status!(header02a.hash(), &pool, 1, 0); + assert_eq!(pool.mempool_len().0, 3); + assert_pool_status!(header02a.hash(), &pool, 1, 0); assert_ready_iterator!(header02a.hash(), pool, [xt2]); } -#[test] -fn fatp_limits_ready_count_works_for_submit_at() { - sp_tracing::try_init_simple(); - - let builder = TestPoolBuilder::new(); - let (pool, api, _) = builder.with_mempool_count_limit(3).with_ready_count(2).build(); - api.set_nonce(api.genesis_hash(), Bob.into(), 200); - api.set_nonce(api.genesis_hash(), Charlie.into(), 500); - - let header01 = api.push_block(1, vec![], true); - - let event = new_best_block_event(&pool, None, header01.hash()); - block_on(pool.maintain(event)); - - let xt0 = uxt(Charlie, 500); - let xt1 = uxt(Alice, 200); - let xt2 = uxt(Alice, 201); - - let results = block_on(pool.submit_at( - header01.hash(), - SOURCE, - vec![xt0.clone(), xt1.clone(), xt2.clone()], - )) - .unwrap(); - - assert!(matches!(results[0].as_ref().unwrap_err().0, TxPoolError::ImmediatelyDropped)); - assert!(results[1].as_ref().is_ok()); - assert!(results[2].as_ref().is_ok()); - assert_eq!(pool.mempool_len().0, 2); - //charlie was not included into view: - assert_pool_status!(header01.hash(), &pool, 2, 0); - assert_ready_iterator!(header01.hash(), pool, [xt1, xt2]); -} - -#[test] -fn fatp_limits_ready_count_works_for_submit_and_watch() { - sp_tracing::try_init_simple(); - - let builder = TestPoolBuilder::new(); - let (pool, api, _) = builder.with_mempool_count_limit(3).with_ready_count(2).build(); - api.set_nonce(api.genesis_hash(), Bob.into(), 300); - api.set_nonce(api.genesis_hash(), Charlie.into(), 500); - - let header01 = api.push_block(1, vec![], true); - - let event = new_best_block_event(&pool, None, header01.hash()); - block_on(pool.maintain(event)); - - let xt0 = uxt(Charlie, 500); - let xt1 = uxt(Alice, 200); - let xt2 = uxt(Bob, 300); - api.set_priority(&xt0, 2); - api.set_priority(&xt1, 2); - api.set_priority(&xt2, 1); - - let result0 = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt0.clone())); - let result1 = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt1.clone())); - let result2 = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt2.clone())).map(|_| ()); - - assert!(matches!(result2.unwrap_err().0, TxPoolError::ImmediatelyDropped)); - assert!(result0.is_ok()); - assert!(result1.is_ok()); - assert_eq!(pool.mempool_len().1, 2); - //charlie was not included into view: - assert_pool_status!(header01.hash(), &pool, 2, 0); - assert_ready_iterator!(header01.hash(), pool, [xt0, xt1]); -} - #[test] fn fatp_limits_future_count_works() { sp_tracing::try_init_simple(); @@ -211,33 +131,29 @@ fn fatp_limits_future_count_works() { let xt2 = uxt(Alice, 201); let xt3 = uxt(Alice, 202); - block_on(pool.submit_one(header01.hash(), SOURCE, xt1.clone())).unwrap(); - block_on(pool.submit_one(header01.hash(), SOURCE, xt2.clone())).unwrap(); - block_on(pool.submit_one(header01.hash(), SOURCE, xt3.clone())).unwrap(); + let submissions = vec![ + pool.submit_one(header01.hash(), SOURCE, xt1.clone()), + pool.submit_one(header01.hash(), SOURCE, xt2.clone()), + pool.submit_one(header01.hash(), SOURCE, xt3.clone()), + ]; + let results = block_on(futures::future::join_all(submissions)); + assert!(results.iter().all(Result::is_ok)); //charlie was not included into view due to limits: assert_pool_status!(header01.hash(), &pool, 0, 2); - //todo: can we do better? We don't have API to check if event was processed internally. - let mut counter = 0; - while pool.mempool_len().0 != 2 { - sleep(std::time::Duration::from_millis(1)); - counter = counter + 1; - if counter > 20 { - assert!(false, "timeout"); - } - } let header02 = api.push_block(2, vec![xt0], true); api.set_nonce(header02.hash(), Alice.into(), 201); //redundant let event = new_best_block_event(&pool, Some(header01.hash()), header02.hash()); block_on(pool.maintain(event)); - assert_pool_status!(header02.hash(), &pool, 2, 0); - assert_eq!(pool.mempool_len().0, 2); + //charlie was resubmitted from mmepool into the view: + assert_pool_status!(header02.hash(), &pool, 2, 1); + assert_eq!(pool.mempool_len().0, 3); } #[test] -fn fatp_limits_watcher_mempool_doesnt_prevent_dropping() { +fn fatp_limits_watcher_mempool_prevents_dropping() { sp_tracing::try_init_simple(); let builder = TestPoolBuilder::new(); @@ -253,15 +169,23 @@ fn fatp_limits_watcher_mempool_doesnt_prevent_dropping() { let xt1 = uxt(Bob, 300); let xt2 = uxt(Alice, 200); - let xt0_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt0.clone())).unwrap(); - let xt1_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt1.clone())).unwrap(); - let xt2_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt2.clone())).unwrap(); + let submissions = vec![ + pool.submit_and_watch(invalid_hash(), SOURCE, xt0.clone()), + pool.submit_and_watch(invalid_hash(), SOURCE, xt1.clone()), + pool.submit_and_watch(invalid_hash(), SOURCE, xt2.clone()), + ]; + let mut submissions = block_on(futures::future::join_all(submissions)); + let xt2_watcher = submissions.remove(2).unwrap(); + let xt1_watcher = submissions.remove(1).unwrap(); + let xt0_watcher = submissions.remove(0).unwrap(); assert_pool_status!(header01.hash(), &pool, 2, 0); - let xt0_status = futures::executor::block_on_stream(xt0_watcher).take(2).collect::>(); + let xt0_status = futures::executor::block_on_stream(xt0_watcher).take(1).collect::>(); + log::debug!("xt0_status: {:#?}", xt0_status); - assert_eq!(xt0_status, vec![TransactionStatus::Ready, TransactionStatus::Dropped]); + + assert_eq!(xt0_status, vec![TransactionStatus::Ready]); let xt1_status = futures::executor::block_on_stream(xt1_watcher).take(1).collect::>(); assert_eq!(xt1_status, vec![TransactionStatus::Ready]); @@ -290,23 +214,28 @@ fn fatp_limits_watcher_non_intial_view_drops_transaction() { let xt1 = uxt(Charlie, 400); let xt2 = uxt(Bob, 300); - let xt0_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt0.clone())).unwrap(); - let xt1_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt1.clone())).unwrap(); - let xt2_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt2.clone())).unwrap(); - - // make sure tx0 is actually dropped before checking iterator - let xt0_status = futures::executor::block_on_stream(xt0_watcher).take(2).collect::>(); - assert_eq!(xt0_status, vec![TransactionStatus::Ready, TransactionStatus::Dropped]); + let submissions = vec![ + pool.submit_and_watch(invalid_hash(), SOURCE, xt0.clone()), + pool.submit_and_watch(invalid_hash(), SOURCE, xt1.clone()), + pool.submit_and_watch(invalid_hash(), SOURCE, xt2.clone()), + ]; + let mut submissions = block_on(futures::future::join_all(submissions)); + let xt2_watcher = submissions.remove(2).unwrap(); + let xt1_watcher = submissions.remove(1).unwrap(); + let xt0_watcher = submissions.remove(0).unwrap(); assert_ready_iterator!(header01.hash(), pool, [xt1, xt2]); let header02 = api.push_block_with_parent(header01.hash(), vec![], true); block_on(pool.maintain(finalized_block_event(&pool, api.genesis_hash(), header02.hash()))); assert_pool_status!(header02.hash(), &pool, 2, 0); - assert_ready_iterator!(header02.hash(), pool, [xt1, xt2]); + assert_ready_iterator!(header02.hash(), pool, [xt2, xt0]); - let xt1_status = futures::executor::block_on_stream(xt1_watcher).take(1).collect::>(); - assert_eq!(xt1_status, vec![TransactionStatus::Ready]); + let xt0_status = futures::executor::block_on_stream(xt0_watcher).take(1).collect::>(); + assert_eq!(xt0_status, vec![TransactionStatus::Ready]); + + let xt1_status = futures::executor::block_on_stream(xt1_watcher).take(2).collect::>(); + assert_eq!(xt1_status, vec![TransactionStatus::Ready, TransactionStatus::Dropped]); let xt2_status = futures::executor::block_on_stream(xt2_watcher).take(1).collect::>(); assert_eq!(xt2_status, vec![TransactionStatus::Ready]); @@ -330,19 +259,32 @@ fn fatp_limits_watcher_finalized_transaction_frees_ready_space() { let xt1 = uxt(Charlie, 400); let xt2 = uxt(Bob, 300); - let xt0_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt0.clone())).unwrap(); - let xt1_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt1.clone())).unwrap(); - let xt2_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt2.clone())).unwrap(); + let submissions = vec![ + pool.submit_and_watch(invalid_hash(), SOURCE, xt0.clone()), + pool.submit_and_watch(invalid_hash(), SOURCE, xt1.clone()), + pool.submit_and_watch(invalid_hash(), SOURCE, xt2.clone()), + ]; + let mut submissions = block_on(futures::future::join_all(submissions)); + let xt2_watcher = submissions.remove(2).unwrap(); + let xt1_watcher = submissions.remove(1).unwrap(); + let xt0_watcher = submissions.remove(0).unwrap(); assert_ready_iterator!(header01.hash(), pool, [xt1, xt2]); - let xt0_status = futures::executor::block_on_stream(xt0_watcher).take(2).collect::>(); - assert_eq!(xt0_status, vec![TransactionStatus::Ready, TransactionStatus::Dropped]); - let header02 = api.push_block_with_parent(header01.hash(), vec![xt0.clone()], true); block_on(pool.maintain(finalized_block_event(&pool, api.genesis_hash(), header02.hash()))); assert_pool_status!(header02.hash(), &pool, 2, 0); assert_ready_iterator!(header02.hash(), pool, [xt1, xt2]); + let xt0_status = futures::executor::block_on_stream(xt0_watcher).take(3).collect::>(); + assert_eq!( + xt0_status, + vec![ + TransactionStatus::Ready, + TransactionStatus::InBlock((header02.hash(), 0)), + TransactionStatus::Finalized((header02.hash(), 0)) + ] + ); + let xt1_status = futures::executor::block_on_stream(xt1_watcher).take(1).collect::>(); assert_eq!(xt1_status, vec![TransactionStatus::Ready]); @@ -369,464 +311,43 @@ fn fatp_limits_watcher_view_can_drop_transcation() { let xt2 = uxt(Bob, 300); let xt3 = uxt(Alice, 200); - let xt0_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt0.clone())).unwrap(); - let xt1_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt1.clone())).unwrap(); - let xt2_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt2.clone())).unwrap(); - - let xt0_status = futures::executor::block_on_stream(xt0_watcher).take(2).collect::>(); - assert_eq!(xt0_status, vec![TransactionStatus::Ready, TransactionStatus::Dropped,]); + let submissions = vec![ + pool.submit_and_watch(invalid_hash(), SOURCE, xt0.clone()), + pool.submit_and_watch(invalid_hash(), SOURCE, xt1.clone()), + pool.submit_and_watch(invalid_hash(), SOURCE, xt2.clone()), + ]; + let mut submissions = block_on(futures::future::join_all(submissions)); + let xt2_watcher = submissions.remove(2).unwrap(); + let xt1_watcher = submissions.remove(1).unwrap(); + let xt0_watcher = submissions.remove(0).unwrap(); assert_ready_iterator!(header01.hash(), pool, [xt1, xt2]); - let header02 = api.push_block_with_parent(header01.hash(), vec![], true); + let header02 = api.push_block_with_parent(header01.hash(), vec![xt0.clone()], true); block_on(pool.maintain(finalized_block_event(&pool, api.genesis_hash(), header02.hash()))); - let xt3_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt3.clone())).unwrap(); - - let xt1_status = futures::executor::block_on_stream(xt1_watcher).take(2).collect::>(); - assert_eq!(xt1_status, vec![TransactionStatus::Ready, TransactionStatus::Dropped]); + let submission = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt3.clone())); + let xt3_watcher = submission.unwrap(); assert_pool_status!(header02.hash(), pool, 2, 0); assert_ready_iterator!(header02.hash(), pool, [xt2, xt3]); - let xt2_status = futures::executor::block_on_stream(xt2_watcher).take(1).collect::>(); - assert_eq!(xt2_status, vec![TransactionStatus::Ready]); - - let xt3_status = futures::executor::block_on_stream(xt3_watcher).take(1).collect::>(); - assert_eq!(xt3_status, vec![TransactionStatus::Ready]); -} - -#[test] -fn fatp_limits_watcher_empty_and_full_view_immediately_drops() { - sp_tracing::try_init_simple(); - - let builder = TestPoolBuilder::new(); - let (pool, api, _) = builder.with_mempool_count_limit(4).with_ready_count(2).build(); - api.set_nonce(api.genesis_hash(), Bob.into(), 300); - api.set_nonce(api.genesis_hash(), Charlie.into(), 400); - api.set_nonce(api.genesis_hash(), Dave.into(), 500); - api.set_nonce(api.genesis_hash(), Eve.into(), 600); - api.set_nonce(api.genesis_hash(), Ferdie.into(), 700); - - let header01 = api.push_block(1, vec![], true); - let event = new_best_block_event(&pool, None, header01.hash()); - block_on(pool.maintain(event)); - - let xt0 = uxt(Alice, 200); - let xt1 = uxt(Bob, 300); - let xt2 = uxt(Charlie, 400); - - let xt3 = uxt(Dave, 500); - let xt4 = uxt(Eve, 600); - let xt5 = uxt(Ferdie, 700); - - let xt0_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt0.clone())).unwrap(); - let xt1_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt1.clone())).unwrap(); - let xt2_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt2.clone())).unwrap(); - - let xt0_status = futures::executor::block_on_stream(xt0_watcher).take(2).collect::>(); - assert_eq!(xt0_status, vec![TransactionStatus::Ready, TransactionStatus::Dropped]); - - assert_pool_status!(header01.hash(), &pool, 2, 0); - assert_eq!(pool.mempool_len().1, 2); - - let header02e = api.push_block_with_parent( - header01.hash(), - vec![xt0.clone(), xt1.clone(), xt2.clone()], - true, + let xt0_status = futures::executor::block_on_stream(xt0_watcher).take(3).collect::>(); + assert_eq!( + xt0_status, + vec![ + TransactionStatus::Ready, + TransactionStatus::InBlock((header02.hash(), 0)), + TransactionStatus::Finalized((header02.hash(), 0)) + ] ); - api.set_nonce(header02e.hash(), Alice.into(), 201); - api.set_nonce(header02e.hash(), Bob.into(), 301); - api.set_nonce(header02e.hash(), Charlie.into(), 401); - block_on(pool.maintain(new_best_block_event(&pool, Some(header01.hash()), header02e.hash()))); - - assert_pool_status!(header02e.hash(), &pool, 0, 0); - - let header02f = api.push_block_with_parent(header01.hash(), vec![], true); - block_on(pool.maintain(new_best_block_event(&pool, Some(header01.hash()), header02f.hash()))); - assert_pool_status!(header02f.hash(), &pool, 2, 0); - assert_ready_iterator!(header02f.hash(), pool, [xt1, xt2]); - - let xt3_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt3.clone())).unwrap(); - let xt4_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt4.clone())).unwrap(); - let result5 = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt5.clone())).map(|_| ()); - - //xt5 hits internal mempool limit - assert!(matches!(result5.unwrap_err().0, TxPoolError::ImmediatelyDropped)); - - assert_pool_status!(header02e.hash(), &pool, 2, 0); - assert_ready_iterator!(header02e.hash(), pool, [xt3, xt4]); - assert_eq!(pool.mempool_len().1, 4); let xt1_status = futures::executor::block_on_stream(xt1_watcher).take(2).collect::>(); - assert_eq!( - xt1_status, - vec![TransactionStatus::Ready, TransactionStatus::InBlock((header02e.hash(), 1))] - ); + assert_eq!(xt1_status, vec![TransactionStatus::Ready, TransactionStatus::Dropped]); - let xt2_status = futures::executor::block_on_stream(xt2_watcher).take(2).collect::>(); - assert_eq!( - xt2_status, - vec![TransactionStatus::Ready, TransactionStatus::InBlock((header02e.hash(), 2))] - ); + let xt2_status = futures::executor::block_on_stream(xt2_watcher).take(1).collect::>(); + assert_eq!(xt2_status, vec![TransactionStatus::Ready]); let xt3_status = futures::executor::block_on_stream(xt3_watcher).take(1).collect::>(); assert_eq!(xt3_status, vec![TransactionStatus::Ready]); - let xt4_status = futures::executor::block_on_stream(xt4_watcher).take(1).collect::>(); - assert_eq!(xt4_status, vec![TransactionStatus::Ready]); -} - -#[test] -fn fatp_limits_watcher_empty_and_full_view_drops_with_event() { - // it is almost copy of fatp_limits_watcher_empty_and_full_view_immediately_drops, but the - // mempool_count limit is set to 5 (vs 4). - sp_tracing::try_init_simple(); - - let builder = TestPoolBuilder::new(); - let (pool, api, _) = builder.with_mempool_count_limit(5).with_ready_count(2).build(); - api.set_nonce(api.genesis_hash(), Bob.into(), 300); - api.set_nonce(api.genesis_hash(), Charlie.into(), 400); - api.set_nonce(api.genesis_hash(), Dave.into(), 500); - api.set_nonce(api.genesis_hash(), Eve.into(), 600); - api.set_nonce(api.genesis_hash(), Ferdie.into(), 700); - - let header01 = api.push_block(1, vec![], true); - let event = new_best_block_event(&pool, None, header01.hash()); - block_on(pool.maintain(event)); - - let xt0 = uxt(Alice, 200); - let xt1 = uxt(Bob, 300); - let xt2 = uxt(Charlie, 400); - - let xt3 = uxt(Dave, 500); - let xt4 = uxt(Eve, 600); - let xt5 = uxt(Ferdie, 700); - - let xt0_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt0.clone())).unwrap(); - let xt1_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt1.clone())).unwrap(); - let xt2_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt2.clone())).unwrap(); - - let xt0_status = futures::executor::block_on_stream(xt0_watcher).take(2).collect::>(); - assert_eq!(xt0_status, vec![TransactionStatus::Ready, TransactionStatus::Dropped]); - - assert_pool_status!(header01.hash(), &pool, 2, 0); - assert_eq!(pool.mempool_len().1, 2); - - let header02e = api.push_block_with_parent( - header01.hash(), - vec![xt0.clone(), xt1.clone(), xt2.clone()], - true, - ); - api.set_nonce(header02e.hash(), Alice.into(), 201); - api.set_nonce(header02e.hash(), Bob.into(), 301); - api.set_nonce(header02e.hash(), Charlie.into(), 401); - block_on(pool.maintain(new_best_block_event(&pool, Some(header01.hash()), header02e.hash()))); - - assert_pool_status!(header02e.hash(), &pool, 0, 0); - - let header02f = api.push_block_with_parent(header01.hash(), vec![], true); - block_on(pool.maintain(new_best_block_event(&pool, Some(header01.hash()), header02f.hash()))); - assert_pool_status!(header02f.hash(), &pool, 2, 0); - assert_ready_iterator!(header02f.hash(), pool, [xt1, xt2]); - - let xt3_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt3.clone())).unwrap(); - let xt4_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt4.clone())).unwrap(); - let xt5_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt5.clone())).unwrap(); - - assert_pool_status!(header02e.hash(), &pool, 2, 0); - assert_ready_iterator!(header02e.hash(), pool, [xt4, xt5]); - - let xt3_status = futures::executor::block_on_stream(xt3_watcher).take(2).collect::>(); - assert_eq!(xt3_status, vec![TransactionStatus::Ready, TransactionStatus::Dropped]); - - //xt5 got dropped - assert_eq!(pool.mempool_len().1, 4); - - let xt1_status = futures::executor::block_on_stream(xt1_watcher).take(2).collect::>(); - assert_eq!( - xt1_status, - vec![TransactionStatus::Ready, TransactionStatus::InBlock((header02e.hash(), 1))] - ); - - let xt2_status = futures::executor::block_on_stream(xt2_watcher).take(2).collect::>(); - assert_eq!( - xt2_status, - vec![TransactionStatus::Ready, TransactionStatus::InBlock((header02e.hash(), 2))] - ); - - let xt4_status = futures::executor::block_on_stream(xt4_watcher).take(1).collect::>(); - assert_eq!(xt4_status, vec![TransactionStatus::Ready]); - - let xt5_status = futures::executor::block_on_stream(xt5_watcher).take(1).collect::>(); - assert_eq!(xt5_status, vec![TransactionStatus::Ready]); -} - -fn large_uxt(x: usize) -> substrate_test_runtime::Extrinsic { - substrate_test_runtime::ExtrinsicBuilder::new_include_data(vec![x as u8; 1024]).build() -} - -#[test] -fn fatp_limits_ready_size_works() { - sp_tracing::try_init_simple(); - - let builder = TestPoolBuilder::new(); - let (pool, api, _) = builder.with_ready_bytes_size(3390).with_future_bytes_size(0).build(); - - let header01 = api.push_block(1, vec![], true); - let event = new_best_block_event(&pool, None, header01.hash()); - block_on(pool.maintain(event)); - - let xt0 = large_uxt(0); - let xt1 = large_uxt(1); - let xt2 = large_uxt(2); - - let submissions = vec![ - pool.submit_one(header01.hash(), SOURCE, xt0.clone()), - pool.submit_one(header01.hash(), SOURCE, xt1.clone()), - pool.submit_one(header01.hash(), SOURCE, xt2.clone()), - ]; - - let results = block_on(futures::future::join_all(submissions)); - assert!(results.iter().all(Result::is_ok)); - //charlie was not included into view: - assert_pool_status!(header01.hash(), &pool, 3, 0); - assert_ready_iterator!(header01.hash(), pool, [xt0, xt1, xt2]); - - let xt3 = large_uxt(3); - let result3 = block_on(pool.submit_one(header01.hash(), SOURCE, xt3.clone())); - assert!(matches!(result3.as_ref().unwrap_err().0, TxPoolError::ImmediatelyDropped)); -} - -#[test] -fn fatp_limits_future_size_works() { - sp_tracing::try_init_simple(); - const UXT_SIZE: usize = 137; - - let builder = TestPoolBuilder::new(); - let (pool, api, _) = builder - .with_ready_bytes_size(UXT_SIZE) - .with_future_bytes_size(3 * UXT_SIZE) - .build(); - api.set_nonce(api.genesis_hash(), Bob.into(), 200); - api.set_nonce(api.genesis_hash(), Charlie.into(), 500); - - let header01 = api.push_block(1, vec![], true); - - let event = new_best_block_event(&pool, None, header01.hash()); - block_on(pool.maintain(event)); - - let xt0 = uxt(Bob, 201); - let xt1 = uxt(Charlie, 501); - let xt2 = uxt(Alice, 201); - let xt3 = uxt(Alice, 202); - assert_eq!(api.hash_and_length(&xt0).1, UXT_SIZE); - assert_eq!(api.hash_and_length(&xt1).1, UXT_SIZE); - assert_eq!(api.hash_and_length(&xt2).1, UXT_SIZE); - assert_eq!(api.hash_and_length(&xt3).1, UXT_SIZE); - - let _ = block_on(pool.submit_one(header01.hash(), SOURCE, xt0.clone())).unwrap(); - let _ = block_on(pool.submit_one(header01.hash(), SOURCE, xt1.clone())).unwrap(); - let _ = block_on(pool.submit_one(header01.hash(), SOURCE, xt2.clone())).unwrap(); - let _ = block_on(pool.submit_one(header01.hash(), SOURCE, xt3.clone())).unwrap(); - - //todo: can we do better? We don't have API to check if event was processed internally. - let mut counter = 0; - while pool.mempool_len().0 == 4 { - sleep(std::time::Duration::from_millis(1)); - counter = counter + 1; - if counter > 20 { - assert!(false, "timeout"); - } - } - assert_pool_status!(header01.hash(), &pool, 0, 3); - assert_eq!(pool.mempool_len().0, 3); -} - -#[test] -fn fatp_limits_watcher_ready_transactions_are_not_droped_when_view_is_dropped() { - sp_tracing::try_init_simple(); - - let builder = TestPoolBuilder::new(); - let (pool, api, _) = builder.with_mempool_count_limit(6).with_ready_count(2).build(); - api.set_nonce(api.genesis_hash(), Bob.into(), 300); - api.set_nonce(api.genesis_hash(), Charlie.into(), 400); - api.set_nonce(api.genesis_hash(), Dave.into(), 500); - api.set_nonce(api.genesis_hash(), Eve.into(), 600); - api.set_nonce(api.genesis_hash(), Ferdie.into(), 700); - - let header01 = api.push_block(1, vec![], true); - let event = new_best_block_event(&pool, None, header01.hash()); - block_on(pool.maintain(event)); - - let xt0 = uxt(Alice, 200); - let xt1 = uxt(Bob, 300); - let xt2 = uxt(Charlie, 400); - - let xt3 = uxt(Dave, 500); - let xt4 = uxt(Eve, 600); - let xt5 = uxt(Ferdie, 700); - - let _xt0_watcher = - block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt0.clone())).unwrap(); - let _xt1_watcher = - block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt1.clone())).unwrap(); - - assert_pool_status!(header01.hash(), &pool, 2, 0); - assert_eq!(pool.mempool_len().1, 2); - - let header02 = api.push_block_with_parent(header01.hash(), vec![], true); - block_on(pool.maintain(new_best_block_event(&pool, Some(header01.hash()), header02.hash()))); - - let _xt2_watcher = - block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt2.clone())).unwrap(); - let _xt3_watcher = - block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt3.clone())).unwrap(); - - assert_pool_status!(header02.hash(), &pool, 2, 0); - assert_eq!(pool.mempool_len().1, 4); - - let header03 = api.push_block_with_parent(header02.hash(), vec![], true); - block_on(pool.maintain(new_best_block_event(&pool, Some(header02.hash()), header03.hash()))); - - let _xt4_watcher = - block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt4.clone())).unwrap(); - let _xt5_watcher = - block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt5.clone())).unwrap(); - - assert_pool_status!(header03.hash(), &pool, 2, 0); - assert_eq!(pool.mempool_len().1, 6); - - let header04 = - api.push_block_with_parent(header03.hash(), vec![xt4.clone(), xt5.clone()], true); - api.set_nonce(header04.hash(), Alice.into(), 201); - api.set_nonce(header04.hash(), Bob.into(), 301); - api.set_nonce(header04.hash(), Charlie.into(), 401); - api.set_nonce(header04.hash(), Dave.into(), 501); - api.set_nonce(header04.hash(), Eve.into(), 601); - api.set_nonce(header04.hash(), Ferdie.into(), 701); - block_on(pool.maintain(new_best_block_event(&pool, Some(header03.hash()), header04.hash()))); - - assert_ready_iterator!(header01.hash(), pool, [xt0, xt1]); - assert_ready_iterator!(header02.hash(), pool, [xt2, xt3]); - assert_ready_iterator!(header03.hash(), pool, [xt4, xt5]); - assert_ready_iterator!(header04.hash(), pool, []); - - block_on(pool.maintain(finalized_block_event(&pool, api.genesis_hash(), header01.hash()))); - assert!(!pool.status_all().contains_key(&header01.hash())); - - block_on(pool.maintain(finalized_block_event(&pool, header01.hash(), header02.hash()))); - assert!(!pool.status_all().contains_key(&header02.hash())); - - //view 01 was dropped - assert!(pool.ready_at(header01.hash()).now_or_never().is_none()); - assert_eq!(pool.mempool_len().1, 6); - - block_on(pool.maintain(finalized_block_event(&pool, header02.hash(), header03.hash()))); - - //no revalidation has happened yet, all txs are kept - assert_eq!(pool.mempool_len().1, 6); - - //view 03 is still there - assert!(!pool.status_all().contains_key(&header03.hash())); - - //view 02 was dropped - assert!(pool.ready_at(header02.hash()).now_or_never().is_none()); - - let mut prev_header = header03; - for n in 5..=11 { - let header = api.push_block(n, vec![], true); - let event = finalized_block_event(&pool, prev_header.hash(), header.hash()); - block_on(pool.maintain(event)); - prev_header = header; - } - - //now revalidation has happened, all txs are dropped - assert_eq!(pool.mempool_len().1, 0); -} - -#[test] -fn fatp_limits_watcher_future_transactions_are_droped_when_view_is_dropped() { - sp_tracing::try_init_simple(); - - let builder = TestPoolBuilder::new(); - let (pool, api, _) = builder.with_mempool_count_limit(6).with_future_count(2).build(); - api.set_nonce(api.genesis_hash(), Bob.into(), 300); - api.set_nonce(api.genesis_hash(), Charlie.into(), 400); - api.set_nonce(api.genesis_hash(), Dave.into(), 500); - api.set_nonce(api.genesis_hash(), Eve.into(), 600); - api.set_nonce(api.genesis_hash(), Ferdie.into(), 700); - - let header01 = api.push_block(1, vec![], true); - let event = new_best_block_event(&pool, None, header01.hash()); - block_on(pool.maintain(event)); - - let xt0 = uxt(Alice, 201); - let xt1 = uxt(Bob, 301); - let xt2 = uxt(Charlie, 401); - - let xt3 = uxt(Dave, 501); - let xt4 = uxt(Eve, 601); - let xt5 = uxt(Ferdie, 701); - - let xt0_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt0.clone())).unwrap(); - let xt1_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt1.clone())).unwrap(); - - assert_pool_status!(header01.hash(), &pool, 0, 2); - assert_eq!(pool.mempool_len().1, 2); - assert_future_iterator!(header01.hash(), pool, [xt0, xt1]); - - let header02 = api.push_block_with_parent(header01.hash(), vec![], true); - block_on(pool.maintain(new_best_block_event(&pool, Some(header01.hash()), header02.hash()))); - - let xt2_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt2.clone())).unwrap(); - let xt3_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt3.clone())).unwrap(); - - assert_pool_status!(header02.hash(), &pool, 0, 2); - assert_eq!(pool.mempool_len().1, 4); - assert_future_iterator!(header02.hash(), pool, [xt2, xt3]); - - let header03 = api.push_block_with_parent(header02.hash(), vec![], true); - block_on(pool.maintain(new_best_block_event(&pool, Some(header02.hash()), header03.hash()))); - - let xt4_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt4.clone())).unwrap(); - let xt5_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt5.clone())).unwrap(); - - assert_pool_status!(header03.hash(), &pool, 0, 2); - assert_eq!(pool.mempool_len().1, 6); - assert_future_iterator!(header03.hash(), pool, [xt4, xt5]); - - let header04 = api.push_block_with_parent(header03.hash(), vec![], true); - block_on(pool.maintain(new_best_block_event(&pool, Some(header03.hash()), header04.hash()))); - - assert_pool_status!(header04.hash(), &pool, 0, 2); - assert_eq!(pool.futures().len(), 2); - assert_future_iterator!(header04.hash(), pool, [xt4, xt5]); - - block_on(pool.maintain(finalized_block_event(&pool, api.genesis_hash(), header04.hash()))); - assert_eq!(pool.active_views_count(), 1); - assert_eq!(pool.inactive_views_count(), 0); - //todo: can we do better? We don't have API to check if event was processed internally. - let mut counter = 0; - while pool.mempool_len().1 != 2 { - sleep(std::time::Duration::from_millis(1)); - counter = counter + 1; - if counter > 20 { - assert!(false, "timeout {}", pool.mempool_len().1); - } - } - assert_eq!(pool.mempool_len().1, 2); - assert_pool_status!(header04.hash(), &pool, 0, 2); - assert_eq!(pool.futures().len(), 2); - - let to_be_checked = vec![xt0_watcher, xt1_watcher, xt2_watcher, xt3_watcher]; - for x in to_be_checked { - let x_status = futures::executor::block_on_stream(x).take(2).collect::>(); - assert_eq!(x_status, vec![TransactionStatus::Future, TransactionStatus::Dropped]); - } - - let to_be_checked = vec![xt4_watcher, xt5_watcher]; - for x in to_be_checked { - let x_status = futures::executor::block_on_stream(x).take(1).collect::>(); - assert_eq!(x_status, vec![TransactionStatus::Future]); - } } diff --git a/substrate/client/transaction-pool/tests/fatp_prios.rs b/substrate/client/transaction-pool/tests/fatp_prios.rs deleted file mode 100644 index 4ed9b4503861..000000000000 --- a/substrate/client/transaction-pool/tests/fatp_prios.rs +++ /dev/null @@ -1,249 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! Tests of priorities for fork-aware transaction pool. - -pub mod fatp_common; - -use fatp_common::{new_best_block_event, TestPoolBuilder, LOG_TARGET, SOURCE}; -use futures::{executor::block_on, FutureExt}; -use sc_transaction_pool::ChainApi; -use sc_transaction_pool_api::{MaintainedTransactionPool, TransactionPool, TransactionStatus}; -use substrate_test_runtime_client::Sr25519Keyring::*; -use substrate_test_runtime_transaction_pool::uxt; - -#[test] -fn fatp_prio_ready_higher_evicts_lower() { - sp_tracing::try_init_simple(); - - let builder = TestPoolBuilder::new(); - let (pool, api, _) = builder.with_mempool_count_limit(3).with_ready_count(2).build(); - - let header01 = api.push_block(1, vec![], true); - - let event = new_best_block_event(&pool, None, header01.hash()); - block_on(pool.maintain(event)); - - let xt0 = uxt(Alice, 200); - let xt1 = uxt(Alice, 200); - - api.set_priority(&xt0, 2); - api.set_priority(&xt1, 3); - - let result0 = block_on(pool.submit_one(header01.hash(), SOURCE, xt0.clone())); - let result1 = block_on(pool.submit_one(header01.hash(), SOURCE, xt1.clone())); - - log::info!("r0 => {:?}", result0); - log::info!("r1 => {:?}", result1); - log::info!("len: {:?}", pool.mempool_len()); - log::info!("len: {:?}", pool.status_all()[&header01.hash()]); - assert_ready_iterator!(header01.hash(), pool, [xt1]); - assert_pool_status!(header01.hash(), &pool, 1, 0); -} - -#[test] -fn fatp_prio_watcher_ready_higher_evicts_lower() { - sp_tracing::try_init_simple(); - - let builder = TestPoolBuilder::new(); - let (pool, api, _) = builder.with_mempool_count_limit(3).with_ready_count(2).build(); - - let header01 = api.push_block(1, vec![], true); - - let event = new_best_block_event(&pool, None, header01.hash()); - block_on(pool.maintain(event)); - - let xt0 = uxt(Alice, 200); - let xt1 = uxt(Alice, 200); - - api.set_priority(&xt0, 2); - api.set_priority(&xt1, 3); - - let xt0_watcher = - block_on(pool.submit_and_watch(header01.hash(), SOURCE, xt0.clone())).unwrap(); - let xt1_watcher = - block_on(pool.submit_and_watch(header01.hash(), SOURCE, xt1.clone())).unwrap(); - - let xt0_status = futures::executor::block_on_stream(xt0_watcher).take(2).collect::>(); - assert_eq!( - xt0_status, - vec![TransactionStatus::Ready, TransactionStatus::Usurped(api.hash_and_length(&xt1).0)] - ); - let xt1_status = futures::executor::block_on_stream(xt1_watcher).take(1).collect::>(); - assert_eq!(xt1_status, vec![TransactionStatus::Ready]); - - log::info!("len: {:?}", pool.mempool_len()); - log::info!("len: {:?}", pool.status_all()[&header01.hash()]); - assert_ready_iterator!(header01.hash(), pool, [xt1]); - assert_pool_status!(header01.hash(), &pool, 1, 0); -} - -#[test] -fn fatp_prio_watcher_future_higher_evicts_lower() { - sp_tracing::try_init_simple(); - - let builder = TestPoolBuilder::new(); - let (pool, api, _) = builder.with_mempool_count_limit(3).with_ready_count(3).build(); - - let header01 = api.push_block(1, vec![], true); - - let event = new_best_block_event(&pool, None, header01.hash()); - block_on(pool.maintain(event)); - - let xt0 = uxt(Alice, 201); - let xt1 = uxt(Alice, 201); - let xt2 = uxt(Alice, 200); - - api.set_priority(&xt0, 2); - api.set_priority(&xt1, 3); - - let xt0_watcher = - block_on(pool.submit_and_watch(header01.hash(), SOURCE, xt0.clone())).unwrap(); - let xt1_watcher = - block_on(pool.submit_and_watch(header01.hash(), SOURCE, xt1.clone())).unwrap(); - let xt2_watcher = - block_on(pool.submit_and_watch(header01.hash(), SOURCE, xt2.clone())).unwrap(); - - let xt0_status = futures::executor::block_on_stream(xt0_watcher).take(2).collect::>(); - - assert_eq!( - xt0_status, - vec![TransactionStatus::Future, TransactionStatus::Usurped(api.hash_and_length(&xt2).0)] - ); - let xt1_status = futures::executor::block_on_stream(xt1_watcher).take(2).collect::>(); - assert_eq!(xt1_status, vec![TransactionStatus::Future, TransactionStatus::Ready]); - let xt2_status = futures::executor::block_on_stream(xt2_watcher).take(1).collect::>(); - assert_eq!(xt2_status, vec![TransactionStatus::Ready]); - - assert_eq!(pool.mempool_len().1, 2); - assert_ready_iterator!(header01.hash(), pool, [xt2, xt1]); - assert_pool_status!(header01.hash(), &pool, 2, 0); -} - -#[test] -fn fatp_prio_watcher_ready_lower_prio_gets_dropped_from_all_views() { - sp_tracing::try_init_simple(); - - let builder = TestPoolBuilder::new(); - let (pool, api, _) = builder.with_mempool_count_limit(3).with_ready_count(2).build(); - - let header01 = api.push_block(1, vec![], true); - block_on(pool.maintain(new_best_block_event(&pool, None, header01.hash()))); - - let xt0 = uxt(Alice, 200); - let xt1 = uxt(Alice, 200); - - api.set_priority(&xt0, 2); - api.set_priority(&xt1, 3); - - let xt0_watcher = - block_on(pool.submit_and_watch(header01.hash(), SOURCE, xt0.clone())).unwrap(); - - let header02 = api.push_block_with_parent(header01.hash(), vec![], true); - block_on(pool.maintain(new_best_block_event(&pool, Some(header01.hash()), header02.hash()))); - - let header03a = api.push_block_with_parent(header02.hash(), vec![], true); - block_on(pool.maintain(new_best_block_event(&pool, Some(header01.hash()), header03a.hash()))); - - let header03b = api.push_block_with_parent(header02.hash(), vec![], true); - block_on(pool.maintain(new_best_block_event(&pool, Some(header03a.hash()), header03b.hash()))); - - assert_pool_status!(header03a.hash(), &pool, 1, 0); - assert_ready_iterator!(header03a.hash(), pool, [xt0]); - assert_pool_status!(header03b.hash(), &pool, 1, 0); - assert_ready_iterator!(header03b.hash(), pool, [xt0]); - assert_ready_iterator!(header01.hash(), pool, [xt0]); - assert_ready_iterator!(header02.hash(), pool, [xt0]); - - let xt1_watcher = - block_on(pool.submit_and_watch(header01.hash(), SOURCE, xt1.clone())).unwrap(); - - let xt1_status = futures::executor::block_on_stream(xt1_watcher).take(1).collect::>(); - assert_eq!(xt1_status, vec![TransactionStatus::Ready]); - let xt0_status = futures::executor::block_on_stream(xt0_watcher).take(2).collect::>(); - assert_eq!( - xt0_status, - vec![TransactionStatus::Ready, TransactionStatus::Usurped(api.hash_and_length(&xt1).0)] - ); - assert_ready_iterator!(header03a.hash(), pool, [xt1]); - assert_ready_iterator!(header03b.hash(), pool, [xt1]); - assert_ready_iterator!(header01.hash(), pool, [xt1]); - assert_ready_iterator!(header02.hash(), pool, [xt1]); -} - -#[test] -fn fatp_prio_watcher_future_lower_prio_gets_dropped_from_all_views() { - sp_tracing::try_init_simple(); - - let builder = TestPoolBuilder::new(); - let (pool, api, _) = builder.with_mempool_count_limit(3).with_ready_count(2).build(); - - let header01 = api.push_block(1, vec![], true); - block_on(pool.maintain(new_best_block_event(&pool, None, header01.hash()))); - - let xt0 = uxt(Alice, 201); - let xt1 = uxt(Alice, 201); - let xt2 = uxt(Alice, 200); - - api.set_priority(&xt0, 2); - api.set_priority(&xt1, 3); - - let xt0_watcher = - block_on(pool.submit_and_watch(header01.hash(), SOURCE, xt0.clone())).unwrap(); - - let xt1_watcher = - block_on(pool.submit_and_watch(header01.hash(), SOURCE, xt1.clone())).unwrap(); - - let header02 = api.push_block_with_parent(header01.hash(), vec![], true); - block_on(pool.maintain(new_best_block_event(&pool, Some(header01.hash()), header02.hash()))); - - let header03a = api.push_block_with_parent(header02.hash(), vec![], true); - block_on(pool.maintain(new_best_block_event(&pool, Some(header01.hash()), header03a.hash()))); - - let header03b = api.push_block_with_parent(header02.hash(), vec![], true); - block_on(pool.maintain(new_best_block_event(&pool, Some(header03a.hash()), header03b.hash()))); - - assert_pool_status!(header03a.hash(), &pool, 0, 2); - assert_future_iterator!(header03a.hash(), pool, [xt0, xt1]); - assert_pool_status!(header03b.hash(), &pool, 0, 2); - assert_future_iterator!(header03b.hash(), pool, [xt0, xt1]); - assert_future_iterator!(header01.hash(), pool, [xt0, xt1]); - assert_future_iterator!(header02.hash(), pool, [xt0, xt1]); - - let xt2_watcher = - block_on(pool.submit_and_watch(header01.hash(), SOURCE, xt2.clone())).unwrap(); - - let xt2_status = futures::executor::block_on_stream(xt2_watcher).take(1).collect::>(); - assert_eq!(xt2_status, vec![TransactionStatus::Ready]); - let xt1_status = futures::executor::block_on_stream(xt1_watcher).take(1).collect::>(); - assert_eq!(xt1_status, vec![TransactionStatus::Future]); - let xt0_status = futures::executor::block_on_stream(xt0_watcher).take(2).collect::>(); - assert_eq!( - xt0_status, - vec![TransactionStatus::Future, TransactionStatus::Usurped(api.hash_and_length(&xt2).0)] - ); - assert_future_iterator!(header03a.hash(), pool, []); - assert_future_iterator!(header03b.hash(), pool, []); - assert_future_iterator!(header01.hash(), pool, []); - assert_future_iterator!(header02.hash(), pool, []); - - assert_ready_iterator!(header03a.hash(), pool, [xt2, xt1]); - assert_ready_iterator!(header03b.hash(), pool, [xt2, xt1]); - assert_ready_iterator!(header01.hash(), pool, [xt2, xt1]); - assert_ready_iterator!(header02.hash(), pool, [xt2, xt1]); -} diff --git a/substrate/client/transaction-pool/tests/pool.rs b/substrate/client/transaction-pool/tests/pool.rs index 20997606c607..ed0fd7d4e655 100644 --- a/substrate/client/transaction-pool/tests/pool.rs +++ b/substrate/client/transaction-pool/tests/pool.rs @@ -40,8 +40,8 @@ use sp_runtime::{ use std::{collections::BTreeSet, pin::Pin, sync::Arc}; use substrate_test_runtime_client::{ runtime::{Block, Extrinsic, ExtrinsicBuilder, Hash, Header, Nonce, Transfer, TransferData}, + AccountKeyring::*, ClientBlockImportExt, - Sr25519Keyring::*, }; use substrate_test_runtime_transaction_pool::{uxt, TestApi}; @@ -80,14 +80,12 @@ fn create_basic_pool(test_api: TestApi) -> BasicPool { create_basic_pool_with_genesis(Arc::from(test_api)).0 } -const TSOURCE: TimedTransactionSource = - TimedTransactionSource { source: TransactionSource::External, timestamp: None }; const SOURCE: TransactionSource = TransactionSource::External; #[test] fn submission_should_work() { let (pool, api) = pool(); - block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt(Alice, 209).into())) + block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, uxt(Alice, 209).into())) .unwrap(); let pending: Vec<_> = pool @@ -101,9 +99,9 @@ fn submission_should_work() { #[test] fn multiple_submission_should_work() { let (pool, api) = pool(); - block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt(Alice, 209).into())) + block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, uxt(Alice, 209).into())) .unwrap(); - block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt(Alice, 210).into())) + block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, uxt(Alice, 210).into())) .unwrap(); let pending: Vec<_> = pool @@ -118,7 +116,7 @@ fn multiple_submission_should_work() { fn early_nonce_should_be_culled() { sp_tracing::try_init_simple(); let (pool, api) = pool(); - block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt(Alice, 208).into())) + block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, uxt(Alice, 208).into())) .unwrap(); log::debug!("-> {:?}", pool.validated_pool().status()); @@ -134,7 +132,7 @@ fn early_nonce_should_be_culled() { fn late_nonce_should_be_queued() { let (pool, api) = pool(); - block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt(Alice, 210).into())) + block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, uxt(Alice, 210).into())) .unwrap(); let pending: Vec<_> = pool .validated_pool() @@ -143,7 +141,7 @@ fn late_nonce_should_be_queued() { .collect(); assert_eq!(pending, Vec::::new()); - block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt(Alice, 209).into())) + block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, uxt(Alice, 209).into())) .unwrap(); let pending: Vec<_> = pool .validated_pool() @@ -157,9 +155,9 @@ fn late_nonce_should_be_queued() { fn prune_tags_should_work() { let (pool, api) = pool(); let hash209 = - block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt(Alice, 209).into())) + block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, uxt(Alice, 209).into())) .unwrap(); - block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt(Alice, 210).into())) + block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, uxt(Alice, 210).into())) .unwrap(); let pending: Vec<_> = pool @@ -185,9 +183,9 @@ fn should_ban_invalid_transactions() { let (pool, api) = pool(); let uxt = Arc::from(uxt(Alice, 209)); let hash = - block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt.clone())).unwrap(); + block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, uxt.clone())).unwrap(); pool.validated_pool().remove_invalid(&[hash]); - block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt.clone())).unwrap_err(); + block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, uxt.clone())).unwrap_err(); // when let pending: Vec<_> = pool @@ -198,7 +196,7 @@ fn should_ban_invalid_transactions() { assert_eq!(pending, Vec::::new()); // then - block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt.clone())).unwrap_err(); + block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, uxt.clone())).unwrap_err(); } #[test] @@ -226,7 +224,7 @@ fn should_correctly_prune_transactions_providing_more_than_one_tag() { })); let pool = Pool::new(Default::default(), true.into(), api.clone()); let xt0 = Arc::from(uxt(Alice, 209)); - block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, xt0.clone())) + block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, xt0.clone())) .expect("1. Imported"); assert_eq!(pool.validated_pool().status().ready, 1); assert_eq!(api.validation_requests().len(), 1); @@ -244,7 +242,7 @@ fn should_correctly_prune_transactions_providing_more_than_one_tag() { api.increment_nonce(Alice.into()); api.push_block(2, Vec::new(), true); let xt1 = uxt(Alice, 211); - block_on(pool.submit_one(&api.expect_hash_and_number(2), TSOURCE, xt1.clone().into())) + block_on(pool.submit_one(&api.expect_hash_and_number(2), SOURCE, xt1.clone().into())) .expect("2. Imported"); assert_eq!(api.validation_requests().len(), 3); assert_eq!(pool.validated_pool().status().ready, 1); diff --git a/substrate/docs/Upgrading-2.0-to-3.0.md b/substrate/docs/Upgrading-2.0-to-3.0.md index f6fc5cf4b079..1be41a34ef34 100644 --- a/substrate/docs/Upgrading-2.0-to-3.0.md +++ b/substrate/docs/Upgrading-2.0-to-3.0.md @@ -1003,7 +1003,7 @@ modified your chain you should probably try to apply these patches: }; use sp_timestamp; - use sp_finality_tracker; - use sp_keyring::Sr25519Keyring; + use sp_keyring::AccountKeyring; use sc_service_test::TestNetNode; use crate::service::{new_full_base, new_light_base, NewFullBase}; - use sp_runtime::traits::IdentifyAccount; @@ -1034,7 +1034,7 @@ modified your chain you should probably try to apply these patches: + let mut slot = 1u64; // For the extrinsics factory - let bob = Arc::new(Sr25519Keyring::Bob.pair()); + let bob = Arc::new(AccountKeyring::Bob.pair()); @@ -528,14 +539,13 @@ mod tests { Ok((node, (inherent_data_providers, setup_handles.unwrap()))) }, diff --git a/substrate/frame/Cargo.toml b/substrate/frame/Cargo.toml index 8fc0d8468430..2d0daf82997d 100644 --- a/substrate/frame/Cargo.toml +++ b/substrate/frame/Cargo.toml @@ -26,28 +26,28 @@ scale-info = { features = [ ], workspace = true } # primitive deps, used for developing FRAME pallets. -sp-arithmetic = { workspace = true } -sp-core = { workspace = true } -sp-io = { workspace = true } sp-runtime = { workspace = true } +sp-io = { workspace = true } +sp-core = { workspace = true } +sp-arithmetic = { workspace = true } # frame deps, for developing FRAME pallets. frame-support = { workspace = true } frame-system = { workspace = true } # primitive types used for developing FRAME runtimes. +sp-version = { optional = true, workspace = true } sp-api = { optional = true, workspace = true } sp-block-builder = { optional = true, workspace = true } +sp-transaction-pool = { optional = true, workspace = true } +sp-offchain = { optional = true, workspace = true } +sp-session = { optional = true, workspace = true } sp-consensus-aura = { optional = true, workspace = true } sp-consensus-grandpa = { optional = true, workspace = true } sp-genesis-builder = { optional = true, workspace = true } sp-inherents = { optional = true, workspace = true } -sp-keyring = { optional = true, workspace = true } -sp-offchain = { optional = true, workspace = true } -sp-session = { optional = true, workspace = true } sp-storage = { optional = true, workspace = true } -sp-transaction-pool = { optional = true, workspace = true } -sp-version = { optional = true, workspace = true } +sp-keyring = { optional = true, workspace = true } frame-executive = { optional = true, workspace = true } frame-system-rpc-runtime-api = { optional = true, workspace = true } diff --git a/substrate/frame/alliance/Cargo.toml b/substrate/frame/alliance/Cargo.toml index 9d21b9e964c9..451b86b35dde 100644 --- a/substrate/frame/alliance/Cargo.toml +++ b/substrate/frame/alliance/Cargo.toml @@ -31,14 +31,14 @@ frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } -pallet-collective = { optional = true, workspace = true } pallet-identity = { workspace = true } +pallet-collective = { optional = true, workspace = true } [dev-dependencies] array-bytes = { workspace = true, default-features = true } +sp-crypto-hashing = { workspace = true } pallet-balances = { workspace = true, default-features = true } pallet-collective = { workspace = true, default-features = true } -sp-crypto-hashing = { workspace = true } [features] default = ["std"] diff --git a/substrate/frame/alliance/src/weights.rs b/substrate/frame/alliance/src/weights.rs index dff60ec20cde..0184ac91107c 100644 --- a/substrate/frame/alliance/src/weights.rs +++ b/substrate/frame/alliance/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_alliance` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-04-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -91,16 +91,16 @@ impl WeightInfo for SubstrateWeight { /// The range of component `p` is `[1, 100]`. fn propose_proposed(b: u32, m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `721 + m * (32 ±0) + p * (36 ±0)` + // Measured: `688 + m * (32 ±0) + p * (36 ±0)` // Estimated: `6676 + m * (32 ±0) + p * (36 ±0)` - // Minimum execution time: 36_770_000 picoseconds. - Weight::from_parts(39_685_981, 6676) - // Standard Error: 156 - .saturating_add(Weight::from_parts(588, 0).saturating_mul(b.into())) - // Standard Error: 1_636 - .saturating_add(Weight::from_parts(31_314, 0).saturating_mul(m.into())) - // Standard Error: 1_616 - .saturating_add(Weight::from_parts(158_254, 0).saturating_mul(p.into())) + // Minimum execution time: 31_545_000 picoseconds. + Weight::from_parts(33_432_774, 6676) + // Standard Error: 121 + .saturating_add(Weight::from_parts(232, 0).saturating_mul(b.into())) + // Standard Error: 1_263 + .saturating_add(Weight::from_parts(47_800, 0).saturating_mul(m.into())) + // Standard Error: 1_247 + .saturating_add(Weight::from_parts(188_655, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) .saturating_add(Weight::from_parts(0, 32).saturating_mul(m.into())) @@ -113,12 +113,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `m` is `[5, 100]`. fn vote(m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1180 + m * (64 ±0)` + // Measured: `1147 + m * (64 ±0)` // Estimated: `6676 + m * (64 ±0)` - // Minimum execution time: 36_851_000 picoseconds. - Weight::from_parts(38_427_277, 6676) - // Standard Error: 1_877 - .saturating_add(Weight::from_parts(50_131, 0).saturating_mul(m.into())) + // Minimum execution time: 30_462_000 picoseconds. + Weight::from_parts(31_639_466, 6676) + // Standard Error: 980 + .saturating_add(Weight::from_parts(60_075, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) @@ -137,14 +137,14 @@ impl WeightInfo for SubstrateWeight { /// The range of component `p` is `[1, 100]`. fn close_early_disapproved(m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `707 + m * (96 ±0) + p * (36 ±0)` + // Measured: `674 + m * (96 ±0) + p * (36 ±0)` // Estimated: `6676 + m * (97 ±0) + p * (36 ±0)` - // Minimum execution time: 43_572_000 picoseconds. - Weight::from_parts(40_836_679, 6676) - // Standard Error: 1_764 - .saturating_add(Weight::from_parts(59_213, 0).saturating_mul(m.into())) - // Standard Error: 1_720 - .saturating_add(Weight::from_parts(171_689, 0).saturating_mul(p.into())) + // Minimum execution time: 40_765_000 picoseconds. + Weight::from_parts(37_690_472, 6676) + // Standard Error: 1_372 + .saturating_add(Weight::from_parts(69_441, 0).saturating_mul(m.into())) + // Standard Error: 1_338 + .saturating_add(Weight::from_parts(152_833, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 97).saturating_mul(m.into())) @@ -169,16 +169,16 @@ impl WeightInfo for SubstrateWeight { /// The range of component `p` is `[1, 100]`. fn close_early_approved(b: u32, m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1287 + m * (96 ±0) + p * (39 ±0)` + // Measured: `1254 + m * (96 ±0) + p * (39 ±0)` // Estimated: `6676 + m * (97 ±0) + p * (40 ±0)` - // Minimum execution time: 62_758_000 picoseconds. - Weight::from_parts(63_400_227, 6676) - // Standard Error: 233 - .saturating_add(Weight::from_parts(1_156, 0).saturating_mul(b.into())) - // Standard Error: 2_470 - .saturating_add(Weight::from_parts(42_858, 0).saturating_mul(m.into())) - // Standard Error: 2_408 - .saturating_add(Weight::from_parts(185_822, 0).saturating_mul(p.into())) + // Minimum execution time: 57_367_000 picoseconds. + Weight::from_parts(57_264_486, 6676) + // Standard Error: 141 + .saturating_add(Weight::from_parts(884, 0).saturating_mul(b.into())) + // Standard Error: 1_495 + .saturating_add(Weight::from_parts(57_869, 0).saturating_mul(m.into())) + // Standard Error: 1_458 + .saturating_add(Weight::from_parts(158_784, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 97).saturating_mul(m.into())) @@ -200,14 +200,14 @@ impl WeightInfo for SubstrateWeight { /// The range of component `p` is `[1, 100]`. fn close_disapproved(m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `708 + m * (96 ±0) + p * (36 ±0)` + // Measured: `675 + m * (96 ±0) + p * (36 ±0)` // Estimated: `6676 + m * (97 ±0) + p * (36 ±0)` - // Minimum execution time: 45_287_000 picoseconds. - Weight::from_parts(44_144_056, 6676) - // Standard Error: 1_553 - .saturating_add(Weight::from_parts(50_224, 0).saturating_mul(m.into())) - // Standard Error: 1_534 - .saturating_add(Weight::from_parts(154_551, 0).saturating_mul(p.into())) + // Minimum execution time: 41_253_000 picoseconds. + Weight::from_parts(37_550_833, 6676) + // Standard Error: 1_162 + .saturating_add(Weight::from_parts(77_359, 0).saturating_mul(m.into())) + // Standard Error: 1_148 + .saturating_add(Weight::from_parts(153_523, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 97).saturating_mul(m.into())) @@ -230,16 +230,16 @@ impl WeightInfo for SubstrateWeight { /// The range of component `p` is `[1, 100]`. fn close_approved(b: u32, m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `761 + m * (96 ±0) + p * (35 ±0)` + // Measured: `728 + m * (96 ±0) + p * (35 ±0)` // Estimated: `6676 + m * (97 ±0) + p * (36 ±0)` - // Minimum execution time: 45_943_000 picoseconds. - Weight::from_parts(43_665_317, 6676) - // Standard Error: 164 - .saturating_add(Weight::from_parts(1_296, 0).saturating_mul(b.into())) - // Standard Error: 1_757 - .saturating_add(Weight::from_parts(35_145, 0).saturating_mul(m.into())) - // Standard Error: 1_694 - .saturating_add(Weight::from_parts(164_507, 0).saturating_mul(p.into())) + // Minimum execution time: 42_385_000 picoseconds. + Weight::from_parts(37_222_159, 6676) + // Standard Error: 118 + .saturating_add(Weight::from_parts(1_743, 0).saturating_mul(b.into())) + // Standard Error: 1_268 + .saturating_add(Weight::from_parts(59_743, 0).saturating_mul(m.into())) + // Standard Error: 1_222 + .saturating_add(Weight::from_parts(159_606, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 97).saturating_mul(m.into())) @@ -253,14 +253,14 @@ impl WeightInfo for SubstrateWeight { /// The range of component `z` is `[0, 100]`. fn init_members(m: u32, z: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `317` + // Measured: `284` // Estimated: `12362` - // Minimum execution time: 34_959_000 picoseconds. - Weight::from_parts(25_620_911, 12362) - // Standard Error: 1_457 - .saturating_add(Weight::from_parts(130_068, 0).saturating_mul(m.into())) - // Standard Error: 1_440 - .saturating_add(Weight::from_parts(113_433, 0).saturating_mul(z.into())) + // Minimum execution time: 31_184_000 picoseconds. + Weight::from_parts(22_860_208, 12362) + // Standard Error: 1_096 + .saturating_add(Weight::from_parts(129_834, 0).saturating_mul(m.into())) + // Standard Error: 1_083 + .saturating_add(Weight::from_parts(97_546, 0).saturating_mul(z.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -281,16 +281,16 @@ impl WeightInfo for SubstrateWeight { /// The range of component `z` is `[0, 50]`. fn disband(x: u32, y: u32, z: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `0 + x * (50 ±0) + y * (51 ±0) + z * (252 ±0)` + // Measured: `0 + x * (50 ±0) + y * (51 ±0) + z * (251 ±0)` // Estimated: `12362 + x * (2539 ±0) + y * (2539 ±0) + z * (2603 ±1)` - // Minimum execution time: 384_385_000 picoseconds. - Weight::from_parts(390_301_000, 12362) - // Standard Error: 32_391 - .saturating_add(Weight::from_parts(745_632, 0).saturating_mul(x.into())) - // Standard Error: 32_235 - .saturating_add(Weight::from_parts(758_118, 0).saturating_mul(y.into())) - // Standard Error: 64_412 - .saturating_add(Weight::from_parts(14_822_486, 0).saturating_mul(z.into())) + // Minimum execution time: 359_308_000 picoseconds. + Weight::from_parts(361_696_000, 12362) + // Standard Error: 30_917 + .saturating_add(Weight::from_parts(657_166, 0).saturating_mul(x.into())) + // Standard Error: 30_768 + .saturating_add(Weight::from_parts(670_249, 0).saturating_mul(y.into())) + // Standard Error: 61_480 + .saturating_add(Weight::from_parts(14_340_554, 0).saturating_mul(z.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(x.into()))) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(y.into()))) @@ -307,18 +307,18 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_042_000 picoseconds. - Weight::from_parts(6_385_000, 0) + // Minimum execution time: 6_146_000 picoseconds. + Weight::from_parts(6_540_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Alliance::Announcements` (r:1 w:1) /// Proof: `Alliance::Announcements` (`max_values`: Some(1), `max_size`: Some(8702), added: 9197, mode: `MaxEncodedLen`) fn announce() -> Weight { // Proof Size summary in bytes: - // Measured: `312` + // Measured: `279` // Estimated: `10187` - // Minimum execution time: 10_152_000 picoseconds. - Weight::from_parts(10_728_000, 10187) + // Minimum execution time: 9_008_000 picoseconds. + Weight::from_parts(9_835_000, 10187) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -326,10 +326,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Alliance::Announcements` (`max_values`: Some(1), `max_size`: Some(8702), added: 9197, mode: `MaxEncodedLen`) fn remove_announcement() -> Weight { // Proof Size summary in bytes: - // Measured: `385` + // Measured: `352` // Estimated: `10187` - // Minimum execution time: 11_540_000 picoseconds. - Weight::from_parts(12_160_000, 10187) + // Minimum execution time: 10_308_000 picoseconds. + Weight::from_parts(10_602_000, 10187) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -343,10 +343,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Alliance::DepositOf` (`max_values`: None, `max_size`: Some(64), added: 2539, mode: `MaxEncodedLen`) fn join_alliance() -> Weight { // Proof Size summary in bytes: - // Measured: `534` + // Measured: `501` // Estimated: `18048` - // Minimum execution time: 46_932_000 picoseconds. - Weight::from_parts(48_549_000, 18048) + // Minimum execution time: 40_731_000 picoseconds. + Weight::from_parts(42_453_000, 18048) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -356,10 +356,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Alliance::UnscrupulousAccounts` (`max_values`: Some(1), `max_size`: Some(3202), added: 3697, mode: `MaxEncodedLen`) fn nominate_ally() -> Weight { // Proof Size summary in bytes: - // Measured: `433` + // Measured: `400` // Estimated: `18048` - // Minimum execution time: 29_716_000 picoseconds. - Weight::from_parts(30_911_000, 18048) + // Minimum execution time: 24_198_000 picoseconds. + Weight::from_parts(25_258_000, 18048) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -373,10 +373,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `AllianceMotion::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn elevate_ally() -> Weight { // Proof Size summary in bytes: - // Measured: `543` + // Measured: `510` // Estimated: `12362` - // Minimum execution time: 29_323_000 picoseconds. - Weight::from_parts(30_702_000, 12362) + // Minimum execution time: 24_509_000 picoseconds. + Weight::from_parts(25_490_000, 12362) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -392,10 +392,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Alliance::RetiringMembers` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) fn give_retirement_notice() -> Weight { // Proof Size summary in bytes: - // Measured: `543` + // Measured: `510` // Estimated: `23734` - // Minimum execution time: 35_317_000 picoseconds. - Weight::from_parts(37_017_000, 23734) + // Minimum execution time: 30_889_000 picoseconds. + Weight::from_parts(31_930_000, 23734) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -409,10 +409,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn retire() -> Weight { // Proof Size summary in bytes: - // Measured: `753` + // Measured: `720` // Estimated: `6676` - // Minimum execution time: 43_741_000 picoseconds. - Weight::from_parts(45_035_000, 6676) + // Minimum execution time: 38_363_000 picoseconds. + Weight::from_parts(39_428_000, 6676) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -430,10 +430,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `AllianceMotion::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn kick_member() -> Weight { // Proof Size summary in bytes: - // Measured: `807` + // Measured: `774` // Estimated: `18048` - // Minimum execution time: 61_064_000 picoseconds. - Weight::from_parts(63_267_000, 18048) + // Minimum execution time: 60_717_000 picoseconds. + Weight::from_parts(61_785_000, 18048) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -445,14 +445,14 @@ impl WeightInfo for SubstrateWeight { /// The range of component `l` is `[0, 255]`. fn add_unscrupulous_items(n: u32, l: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `312` + // Measured: `279` // Estimated: `27187` - // Minimum execution time: 5_117_000 picoseconds. - Weight::from_parts(5_371_000, 27187) - // Standard Error: 3_341 - .saturating_add(Weight::from_parts(1_210_414, 0).saturating_mul(n.into())) - // Standard Error: 1_308 - .saturating_add(Weight::from_parts(72_982, 0).saturating_mul(l.into())) + // Minimum execution time: 5_393_000 picoseconds. + Weight::from_parts(5_577_000, 27187) + // Standard Error: 3_099 + .saturating_add(Weight::from_parts(1_043_175, 0).saturating_mul(n.into())) + // Standard Error: 1_213 + .saturating_add(Weight::from_parts(71_633, 0).saturating_mul(l.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -466,12 +466,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0 + l * (100 ±0) + n * (289 ±0)` // Estimated: `27187` - // Minimum execution time: 5_433_000 picoseconds. - Weight::from_parts(5_574_000, 27187) - // Standard Error: 193_236 - .saturating_add(Weight::from_parts(18_613_954, 0).saturating_mul(n.into())) - // Standard Error: 75_679 - .saturating_add(Weight::from_parts(221_928, 0).saturating_mul(l.into())) + // Minimum execution time: 5_318_000 picoseconds. + Weight::from_parts(5_581_000, 27187) + // Standard Error: 188_914 + .saturating_add(Weight::from_parts(17_878_267, 0).saturating_mul(n.into())) + // Standard Error: 73_987 + .saturating_add(Weight::from_parts(258_754, 0).saturating_mul(l.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -485,10 +485,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `AllianceMotion::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn abdicate_fellow_status() -> Weight { // Proof Size summary in bytes: - // Measured: `543` + // Measured: `510` // Estimated: `18048` - // Minimum execution time: 34_613_000 picoseconds. - Weight::from_parts(35_866_000, 18048) + // Minimum execution time: 29_423_000 picoseconds. + Weight::from_parts(30_141_000, 18048) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -511,16 +511,16 @@ impl WeightInfo for () { /// The range of component `p` is `[1, 100]`. fn propose_proposed(b: u32, m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `721 + m * (32 ±0) + p * (36 ±0)` + // Measured: `688 + m * (32 ±0) + p * (36 ±0)` // Estimated: `6676 + m * (32 ±0) + p * (36 ±0)` - // Minimum execution time: 36_770_000 picoseconds. - Weight::from_parts(39_685_981, 6676) - // Standard Error: 156 - .saturating_add(Weight::from_parts(588, 0).saturating_mul(b.into())) - // Standard Error: 1_636 - .saturating_add(Weight::from_parts(31_314, 0).saturating_mul(m.into())) - // Standard Error: 1_616 - .saturating_add(Weight::from_parts(158_254, 0).saturating_mul(p.into())) + // Minimum execution time: 31_545_000 picoseconds. + Weight::from_parts(33_432_774, 6676) + // Standard Error: 121 + .saturating_add(Weight::from_parts(232, 0).saturating_mul(b.into())) + // Standard Error: 1_263 + .saturating_add(Weight::from_parts(47_800, 0).saturating_mul(m.into())) + // Standard Error: 1_247 + .saturating_add(Weight::from_parts(188_655, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) .saturating_add(Weight::from_parts(0, 32).saturating_mul(m.into())) @@ -533,12 +533,12 @@ impl WeightInfo for () { /// The range of component `m` is `[5, 100]`. fn vote(m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1180 + m * (64 ±0)` + // Measured: `1147 + m * (64 ±0)` // Estimated: `6676 + m * (64 ±0)` - // Minimum execution time: 36_851_000 picoseconds. - Weight::from_parts(38_427_277, 6676) - // Standard Error: 1_877 - .saturating_add(Weight::from_parts(50_131, 0).saturating_mul(m.into())) + // Minimum execution time: 30_462_000 picoseconds. + Weight::from_parts(31_639_466, 6676) + // Standard Error: 980 + .saturating_add(Weight::from_parts(60_075, 0).saturating_mul(m.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) @@ -557,14 +557,14 @@ impl WeightInfo for () { /// The range of component `p` is `[1, 100]`. fn close_early_disapproved(m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `707 + m * (96 ±0) + p * (36 ±0)` + // Measured: `674 + m * (96 ±0) + p * (36 ±0)` // Estimated: `6676 + m * (97 ±0) + p * (36 ±0)` - // Minimum execution time: 43_572_000 picoseconds. - Weight::from_parts(40_836_679, 6676) - // Standard Error: 1_764 - .saturating_add(Weight::from_parts(59_213, 0).saturating_mul(m.into())) - // Standard Error: 1_720 - .saturating_add(Weight::from_parts(171_689, 0).saturating_mul(p.into())) + // Minimum execution time: 40_765_000 picoseconds. + Weight::from_parts(37_690_472, 6676) + // Standard Error: 1_372 + .saturating_add(Weight::from_parts(69_441, 0).saturating_mul(m.into())) + // Standard Error: 1_338 + .saturating_add(Weight::from_parts(152_833, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 97).saturating_mul(m.into())) @@ -589,16 +589,16 @@ impl WeightInfo for () { /// The range of component `p` is `[1, 100]`. fn close_early_approved(b: u32, m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1287 + m * (96 ±0) + p * (39 ±0)` + // Measured: `1254 + m * (96 ±0) + p * (39 ±0)` // Estimated: `6676 + m * (97 ±0) + p * (40 ±0)` - // Minimum execution time: 62_758_000 picoseconds. - Weight::from_parts(63_400_227, 6676) - // Standard Error: 233 - .saturating_add(Weight::from_parts(1_156, 0).saturating_mul(b.into())) - // Standard Error: 2_470 - .saturating_add(Weight::from_parts(42_858, 0).saturating_mul(m.into())) - // Standard Error: 2_408 - .saturating_add(Weight::from_parts(185_822, 0).saturating_mul(p.into())) + // Minimum execution time: 57_367_000 picoseconds. + Weight::from_parts(57_264_486, 6676) + // Standard Error: 141 + .saturating_add(Weight::from_parts(884, 0).saturating_mul(b.into())) + // Standard Error: 1_495 + .saturating_add(Weight::from_parts(57_869, 0).saturating_mul(m.into())) + // Standard Error: 1_458 + .saturating_add(Weight::from_parts(158_784, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 97).saturating_mul(m.into())) @@ -620,14 +620,14 @@ impl WeightInfo for () { /// The range of component `p` is `[1, 100]`. fn close_disapproved(m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `708 + m * (96 ±0) + p * (36 ±0)` + // Measured: `675 + m * (96 ±0) + p * (36 ±0)` // Estimated: `6676 + m * (97 ±0) + p * (36 ±0)` - // Minimum execution time: 45_287_000 picoseconds. - Weight::from_parts(44_144_056, 6676) - // Standard Error: 1_553 - .saturating_add(Weight::from_parts(50_224, 0).saturating_mul(m.into())) - // Standard Error: 1_534 - .saturating_add(Weight::from_parts(154_551, 0).saturating_mul(p.into())) + // Minimum execution time: 41_253_000 picoseconds. + Weight::from_parts(37_550_833, 6676) + // Standard Error: 1_162 + .saturating_add(Weight::from_parts(77_359, 0).saturating_mul(m.into())) + // Standard Error: 1_148 + .saturating_add(Weight::from_parts(153_523, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 97).saturating_mul(m.into())) @@ -650,16 +650,16 @@ impl WeightInfo for () { /// The range of component `p` is `[1, 100]`. fn close_approved(b: u32, m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `761 + m * (96 ±0) + p * (35 ±0)` + // Measured: `728 + m * (96 ±0) + p * (35 ±0)` // Estimated: `6676 + m * (97 ±0) + p * (36 ±0)` - // Minimum execution time: 45_943_000 picoseconds. - Weight::from_parts(43_665_317, 6676) - // Standard Error: 164 - .saturating_add(Weight::from_parts(1_296, 0).saturating_mul(b.into())) - // Standard Error: 1_757 - .saturating_add(Weight::from_parts(35_145, 0).saturating_mul(m.into())) - // Standard Error: 1_694 - .saturating_add(Weight::from_parts(164_507, 0).saturating_mul(p.into())) + // Minimum execution time: 42_385_000 picoseconds. + Weight::from_parts(37_222_159, 6676) + // Standard Error: 118 + .saturating_add(Weight::from_parts(1_743, 0).saturating_mul(b.into())) + // Standard Error: 1_268 + .saturating_add(Weight::from_parts(59_743, 0).saturating_mul(m.into())) + // Standard Error: 1_222 + .saturating_add(Weight::from_parts(159_606, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 97).saturating_mul(m.into())) @@ -673,14 +673,14 @@ impl WeightInfo for () { /// The range of component `z` is `[0, 100]`. fn init_members(m: u32, z: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `317` + // Measured: `284` // Estimated: `12362` - // Minimum execution time: 34_959_000 picoseconds. - Weight::from_parts(25_620_911, 12362) - // Standard Error: 1_457 - .saturating_add(Weight::from_parts(130_068, 0).saturating_mul(m.into())) - // Standard Error: 1_440 - .saturating_add(Weight::from_parts(113_433, 0).saturating_mul(z.into())) + // Minimum execution time: 31_184_000 picoseconds. + Weight::from_parts(22_860_208, 12362) + // Standard Error: 1_096 + .saturating_add(Weight::from_parts(129_834, 0).saturating_mul(m.into())) + // Standard Error: 1_083 + .saturating_add(Weight::from_parts(97_546, 0).saturating_mul(z.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -701,16 +701,16 @@ impl WeightInfo for () { /// The range of component `z` is `[0, 50]`. fn disband(x: u32, y: u32, z: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `0 + x * (50 ±0) + y * (51 ±0) + z * (252 ±0)` + // Measured: `0 + x * (50 ±0) + y * (51 ±0) + z * (251 ±0)` // Estimated: `12362 + x * (2539 ±0) + y * (2539 ±0) + z * (2603 ±1)` - // Minimum execution time: 384_385_000 picoseconds. - Weight::from_parts(390_301_000, 12362) - // Standard Error: 32_391 - .saturating_add(Weight::from_parts(745_632, 0).saturating_mul(x.into())) - // Standard Error: 32_235 - .saturating_add(Weight::from_parts(758_118, 0).saturating_mul(y.into())) - // Standard Error: 64_412 - .saturating_add(Weight::from_parts(14_822_486, 0).saturating_mul(z.into())) + // Minimum execution time: 359_308_000 picoseconds. + Weight::from_parts(361_696_000, 12362) + // Standard Error: 30_917 + .saturating_add(Weight::from_parts(657_166, 0).saturating_mul(x.into())) + // Standard Error: 30_768 + .saturating_add(Weight::from_parts(670_249, 0).saturating_mul(y.into())) + // Standard Error: 61_480 + .saturating_add(Weight::from_parts(14_340_554, 0).saturating_mul(z.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(x.into()))) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(y.into()))) @@ -727,18 +727,18 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_042_000 picoseconds. - Weight::from_parts(6_385_000, 0) + // Minimum execution time: 6_146_000 picoseconds. + Weight::from_parts(6_540_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Alliance::Announcements` (r:1 w:1) /// Proof: `Alliance::Announcements` (`max_values`: Some(1), `max_size`: Some(8702), added: 9197, mode: `MaxEncodedLen`) fn announce() -> Weight { // Proof Size summary in bytes: - // Measured: `312` + // Measured: `279` // Estimated: `10187` - // Minimum execution time: 10_152_000 picoseconds. - Weight::from_parts(10_728_000, 10187) + // Minimum execution time: 9_008_000 picoseconds. + Weight::from_parts(9_835_000, 10187) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -746,10 +746,10 @@ impl WeightInfo for () { /// Proof: `Alliance::Announcements` (`max_values`: Some(1), `max_size`: Some(8702), added: 9197, mode: `MaxEncodedLen`) fn remove_announcement() -> Weight { // Proof Size summary in bytes: - // Measured: `385` + // Measured: `352` // Estimated: `10187` - // Minimum execution time: 11_540_000 picoseconds. - Weight::from_parts(12_160_000, 10187) + // Minimum execution time: 10_308_000 picoseconds. + Weight::from_parts(10_602_000, 10187) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -763,10 +763,10 @@ impl WeightInfo for () { /// Proof: `Alliance::DepositOf` (`max_values`: None, `max_size`: Some(64), added: 2539, mode: `MaxEncodedLen`) fn join_alliance() -> Weight { // Proof Size summary in bytes: - // Measured: `534` + // Measured: `501` // Estimated: `18048` - // Minimum execution time: 46_932_000 picoseconds. - Weight::from_parts(48_549_000, 18048) + // Minimum execution time: 40_731_000 picoseconds. + Weight::from_parts(42_453_000, 18048) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -776,10 +776,10 @@ impl WeightInfo for () { /// Proof: `Alliance::UnscrupulousAccounts` (`max_values`: Some(1), `max_size`: Some(3202), added: 3697, mode: `MaxEncodedLen`) fn nominate_ally() -> Weight { // Proof Size summary in bytes: - // Measured: `433` + // Measured: `400` // Estimated: `18048` - // Minimum execution time: 29_716_000 picoseconds. - Weight::from_parts(30_911_000, 18048) + // Minimum execution time: 24_198_000 picoseconds. + Weight::from_parts(25_258_000, 18048) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -793,10 +793,10 @@ impl WeightInfo for () { /// Proof: `AllianceMotion::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn elevate_ally() -> Weight { // Proof Size summary in bytes: - // Measured: `543` + // Measured: `510` // Estimated: `12362` - // Minimum execution time: 29_323_000 picoseconds. - Weight::from_parts(30_702_000, 12362) + // Minimum execution time: 24_509_000 picoseconds. + Weight::from_parts(25_490_000, 12362) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -812,10 +812,10 @@ impl WeightInfo for () { /// Proof: `Alliance::RetiringMembers` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) fn give_retirement_notice() -> Weight { // Proof Size summary in bytes: - // Measured: `543` + // Measured: `510` // Estimated: `23734` - // Minimum execution time: 35_317_000 picoseconds. - Weight::from_parts(37_017_000, 23734) + // Minimum execution time: 30_889_000 picoseconds. + Weight::from_parts(31_930_000, 23734) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -829,10 +829,10 @@ impl WeightInfo for () { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn retire() -> Weight { // Proof Size summary in bytes: - // Measured: `753` + // Measured: `720` // Estimated: `6676` - // Minimum execution time: 43_741_000 picoseconds. - Weight::from_parts(45_035_000, 6676) + // Minimum execution time: 38_363_000 picoseconds. + Weight::from_parts(39_428_000, 6676) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -850,10 +850,10 @@ impl WeightInfo for () { /// Proof: `AllianceMotion::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn kick_member() -> Weight { // Proof Size summary in bytes: - // Measured: `807` + // Measured: `774` // Estimated: `18048` - // Minimum execution time: 61_064_000 picoseconds. - Weight::from_parts(63_267_000, 18048) + // Minimum execution time: 60_717_000 picoseconds. + Weight::from_parts(61_785_000, 18048) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -865,14 +865,14 @@ impl WeightInfo for () { /// The range of component `l` is `[0, 255]`. fn add_unscrupulous_items(n: u32, l: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `312` + // Measured: `279` // Estimated: `27187` - // Minimum execution time: 5_117_000 picoseconds. - Weight::from_parts(5_371_000, 27187) - // Standard Error: 3_341 - .saturating_add(Weight::from_parts(1_210_414, 0).saturating_mul(n.into())) - // Standard Error: 1_308 - .saturating_add(Weight::from_parts(72_982, 0).saturating_mul(l.into())) + // Minimum execution time: 5_393_000 picoseconds. + Weight::from_parts(5_577_000, 27187) + // Standard Error: 3_099 + .saturating_add(Weight::from_parts(1_043_175, 0).saturating_mul(n.into())) + // Standard Error: 1_213 + .saturating_add(Weight::from_parts(71_633, 0).saturating_mul(l.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -886,12 +886,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0 + l * (100 ±0) + n * (289 ±0)` // Estimated: `27187` - // Minimum execution time: 5_433_000 picoseconds. - Weight::from_parts(5_574_000, 27187) - // Standard Error: 193_236 - .saturating_add(Weight::from_parts(18_613_954, 0).saturating_mul(n.into())) - // Standard Error: 75_679 - .saturating_add(Weight::from_parts(221_928, 0).saturating_mul(l.into())) + // Minimum execution time: 5_318_000 picoseconds. + Weight::from_parts(5_581_000, 27187) + // Standard Error: 188_914 + .saturating_add(Weight::from_parts(17_878_267, 0).saturating_mul(n.into())) + // Standard Error: 73_987 + .saturating_add(Weight::from_parts(258_754, 0).saturating_mul(l.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -905,10 +905,10 @@ impl WeightInfo for () { /// Proof: `AllianceMotion::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn abdicate_fellow_status() -> Weight { // Proof Size summary in bytes: - // Measured: `543` + // Measured: `510` // Estimated: `18048` - // Minimum execution time: 34_613_000 picoseconds. - Weight::from_parts(35_866_000, 18048) + // Minimum execution time: 29_423_000 picoseconds. + Weight::from_parts(30_141_000, 18048) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } diff --git a/substrate/frame/asset-conversion/Cargo.toml b/substrate/frame/asset-conversion/Cargo.toml index 8987e44ee000..10a118e95639 100644 --- a/substrate/frame/asset-conversion/Cargo.toml +++ b/substrate/frame/asset-conversion/Cargo.toml @@ -17,20 +17,20 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { workspace = true } -frame-benchmarking = { optional = true, workspace = true } +log = { workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } -log = { workspace = true } +frame-benchmarking = { optional = true, workspace = true } scale-info = { features = ["derive"], workspace = true } sp-api = { workspace = true } -sp-arithmetic = { workspace = true } sp-core = { workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } +sp-arithmetic = { workspace = true } [dev-dependencies] -pallet-assets = { workspace = true, default-features = true } pallet-balances = { workspace = true, default-features = true } +pallet-assets = { workspace = true, default-features = true } primitive-types = { features = ["codec", "num-traits", "scale-info"], workspace = true } [features] diff --git a/substrate/frame/asset-conversion/ops/Cargo.toml b/substrate/frame/asset-conversion/ops/Cargo.toml index ebd31bd296de..66333f973d7f 100644 --- a/substrate/frame/asset-conversion/ops/Cargo.toml +++ b/substrate/frame/asset-conversion/ops/Cargo.toml @@ -16,20 +16,20 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { workspace = true } -frame-benchmarking = { optional = true, workspace = true } +log = { workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } -log = { workspace = true } +frame-benchmarking = { optional = true, workspace = true } pallet-asset-conversion = { workspace = true } scale-info = { features = ["derive"], workspace = true } -sp-arithmetic = { workspace = true } sp-core = { workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } +sp-arithmetic = { workspace = true } [dev-dependencies] -pallet-assets = { workspace = true, default-features = true } pallet-balances = { workspace = true, default-features = true } +pallet-assets = { workspace = true, default-features = true } primitive-types = { features = ["codec", "num-traits", "scale-info"], workspace = true } [features] diff --git a/substrate/frame/asset-conversion/ops/src/weights.rs b/substrate/frame/asset-conversion/ops/src/weights.rs index 65762bed72e2..9e7379c50156 100644 --- a/substrate/frame/asset-conversion/ops/src/weights.rs +++ b/substrate/frame/asset-conversion/ops/src/weights.rs @@ -18,27 +18,25 @@ //! Autogenerated weights for `pallet_asset_conversion_ops` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-04-13, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate-node +// target/production/substrate-node // benchmark // pallet -// --chain=dev // --steps=50 // --repeat=20 -// --pallet=pallet_asset_conversion_ops -// --no-storage-info -// --no-median-slopes -// --no-min-squares // --extrinsic=* // --wasm-execution=compiled // --heap-pages=4096 -// --output=./substrate/frame/asset-conversion/ops/src/weights.rs +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_asset_conversion_ops +// --chain=dev // --header=./substrate/HEADER-APACHE2 +// --output=./substrate/frame/asset-conversion-ops/src/weights.rs // --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -71,10 +69,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn migrate_to_new_account() -> Weight { // Proof Size summary in bytes: - // Measured: `1796` + // Measured: `1762` // Estimated: `11426` - // Minimum execution time: 235_181_000 picoseconds. - Weight::from_parts(243_965_000, 11426) + // Minimum execution time: 223_850_000 picoseconds. + Weight::from_parts(231_676_000, 11426) .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().writes(11_u64)) } @@ -96,10 +94,10 @@ impl WeightInfo for () { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn migrate_to_new_account() -> Weight { // Proof Size summary in bytes: - // Measured: `1796` + // Measured: `1762` // Estimated: `11426` - // Minimum execution time: 235_181_000 picoseconds. - Weight::from_parts(243_965_000, 11426) + // Minimum execution time: 223_850_000 picoseconds. + Weight::from_parts(231_676_000, 11426) .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().writes(11_u64)) } diff --git a/substrate/frame/asset-conversion/src/weights.rs b/substrate/frame/asset-conversion/src/weights.rs index dd7feb08f9f4..f6e025520d71 100644 --- a/substrate/frame/asset-conversion/src/weights.rs +++ b/substrate/frame/asset-conversion/src/weights.rs @@ -18,25 +18,24 @@ //! Autogenerated weights for `pallet_asset_conversion` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-03-13, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-p5qp1txx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate-node +// target/production/substrate-node // benchmark // pallet -// --chain=dev // --steps=50 // --repeat=20 -// --pallet=pallet_asset_conversion -// --no-storage-info -// --no-median-slopes -// --no-min-squares // --extrinsic=* // --wasm-execution=compiled // --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_asset_conversion +// --chain=dev +// --header=./substrate/HEADER-APACHE2 // --output=./substrate/frame/asset-conversion/src/weights.rs // --header=./substrate/HEADER-APACHE2 // --template=./substrate/.maintain/frame-weight-template.hbs @@ -72,17 +71,15 @@ impl WeightInfo for SubstrateWeight { /// Proof: `AssetConversion::NextPoolAssetId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `PoolAssets::Asset` (r:1 w:1) /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `PoolAssets::NextAssetId` (r:1 w:0) - /// Proof: `PoolAssets::NextAssetId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `PoolAssets::Account` (r:1 w:1) /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) fn create_pool() -> Weight { // Proof Size summary in bytes: - // Measured: `949` + // Measured: `910` // Estimated: `6360` - // Minimum execution time: 97_276_000 picoseconds. - Weight::from_parts(99_380_000, 6360) - .saturating_add(T::DbWeight::get().reads(9_u64)) + // Minimum execution time: 95_080_000 picoseconds. + Weight::from_parts(97_241_000, 6360) + .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } /// Storage: `AssetConversion::Pools` (r:1 w:0) @@ -99,10 +96,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) fn add_liquidity() -> Weight { // Proof Size summary in bytes: - // Measured: `1546` + // Measured: `1507` // Estimated: `11426` - // Minimum execution time: 153_723_000 picoseconds. - Weight::from_parts(155_774_000, 11426) + // Minimum execution time: 147_652_000 picoseconds. + Weight::from_parts(153_331_000, 11426) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(10_u64)) } @@ -120,8 +117,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1650` // Estimated: `11426` - // Minimum execution time: 138_643_000 picoseconds. - Weight::from_parts(140_518_000, 11426) + // Minimum execution time: 130_738_000 picoseconds. + Weight::from_parts(134_350_000, 11426) .saturating_add(T::DbWeight::get().reads(9_u64)) .saturating_add(T::DbWeight::get().writes(8_u64)) } @@ -134,10 +131,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `89 + n * (419 ±0)` // Estimated: `990 + n * (5218 ±0)` - // Minimum execution time: 93_760_000 picoseconds. - Weight::from_parts(6_225_956, 990) - // Standard Error: 70_327 - .saturating_add(Weight::from_parts(45_209_796, 0).saturating_mul(n.into())) + // Minimum execution time: 79_681_000 picoseconds. + Weight::from_parts(81_461_000, 990) + // Standard Error: 320_959 + .saturating_add(Weight::from_parts(11_223_703, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(n.into()))) .saturating_add(Weight::from_parts(0, 5218).saturating_mul(n.into())) @@ -151,10 +148,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `89 + n * (419 ±0)` // Estimated: `990 + n * (5218 ±0)` - // Minimum execution time: 93_972_000 picoseconds. - Weight::from_parts(4_882_727, 990) - // Standard Error: 69_974 - .saturating_add(Weight::from_parts(45_961_057, 0).saturating_mul(n.into())) + // Minimum execution time: 78_988_000 picoseconds. + Weight::from_parts(81_025_000, 990) + // Standard Error: 320_021 + .saturating_add(Weight::from_parts(11_040_712, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(n.into()))) .saturating_add(Weight::from_parts(0, 5218).saturating_mul(n.into())) @@ -174,12 +171,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[0, 3]`. fn touch(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1610` + // Measured: `1571` // Estimated: `6360` - // Minimum execution time: 56_011_000 picoseconds. - Weight::from_parts(59_515_373, 6360) - // Standard Error: 81_340 - .saturating_add(Weight::from_parts(19_186_821, 0).saturating_mul(n.into())) + // Minimum execution time: 45_757_000 picoseconds. + Weight::from_parts(48_502_032, 6360) + // Standard Error: 62_850 + .saturating_add(Weight::from_parts(19_450_978, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(n.into()))) } @@ -197,17 +194,15 @@ impl WeightInfo for () { /// Proof: `AssetConversion::NextPoolAssetId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `PoolAssets::Asset` (r:1 w:1) /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `PoolAssets::NextAssetId` (r:1 w:0) - /// Proof: `PoolAssets::NextAssetId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `PoolAssets::Account` (r:1 w:1) /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) fn create_pool() -> Weight { // Proof Size summary in bytes: - // Measured: `949` + // Measured: `910` // Estimated: `6360` - // Minimum execution time: 97_276_000 picoseconds. - Weight::from_parts(99_380_000, 6360) - .saturating_add(RocksDbWeight::get().reads(9_u64)) + // Minimum execution time: 95_080_000 picoseconds. + Weight::from_parts(97_241_000, 6360) + .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } /// Storage: `AssetConversion::Pools` (r:1 w:0) @@ -224,10 +219,10 @@ impl WeightInfo for () { /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) fn add_liquidity() -> Weight { // Proof Size summary in bytes: - // Measured: `1546` + // Measured: `1507` // Estimated: `11426` - // Minimum execution time: 153_723_000 picoseconds. - Weight::from_parts(155_774_000, 11426) + // Minimum execution time: 147_652_000 picoseconds. + Weight::from_parts(153_331_000, 11426) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(10_u64)) } @@ -245,8 +240,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1650` // Estimated: `11426` - // Minimum execution time: 138_643_000 picoseconds. - Weight::from_parts(140_518_000, 11426) + // Minimum execution time: 130_738_000 picoseconds. + Weight::from_parts(134_350_000, 11426) .saturating_add(RocksDbWeight::get().reads(9_u64)) .saturating_add(RocksDbWeight::get().writes(8_u64)) } @@ -259,10 +254,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `89 + n * (419 ±0)` // Estimated: `990 + n * (5218 ±0)` - // Minimum execution time: 93_760_000 picoseconds. - Weight::from_parts(6_225_956, 990) - // Standard Error: 70_327 - .saturating_add(Weight::from_parts(45_209_796, 0).saturating_mul(n.into())) + // Minimum execution time: 79_681_000 picoseconds. + Weight::from_parts(81_461_000, 990) + // Standard Error: 320_959 + .saturating_add(Weight::from_parts(11_223_703, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads((3_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes((3_u64).saturating_mul(n.into()))) .saturating_add(Weight::from_parts(0, 5218).saturating_mul(n.into())) @@ -276,10 +271,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `89 + n * (419 ±0)` // Estimated: `990 + n * (5218 ±0)` - // Minimum execution time: 93_972_000 picoseconds. - Weight::from_parts(4_882_727, 990) - // Standard Error: 69_974 - .saturating_add(Weight::from_parts(45_961_057, 0).saturating_mul(n.into())) + // Minimum execution time: 78_988_000 picoseconds. + Weight::from_parts(81_025_000, 990) + // Standard Error: 320_021 + .saturating_add(Weight::from_parts(11_040_712, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads((3_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes((3_u64).saturating_mul(n.into()))) .saturating_add(Weight::from_parts(0, 5218).saturating_mul(n.into())) @@ -299,12 +294,12 @@ impl WeightInfo for () { /// The range of component `n` is `[0, 3]`. fn touch(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1610` + // Measured: `1571` // Estimated: `6360` - // Minimum execution time: 56_011_000 picoseconds. - Weight::from_parts(59_515_373, 6360) - // Standard Error: 81_340 - .saturating_add(Weight::from_parts(19_186_821, 0).saturating_mul(n.into())) + // Minimum execution time: 45_757_000 picoseconds. + Weight::from_parts(48_502_032, 6360) + // Standard Error: 62_850 + .saturating_add(Weight::from_parts(19_450_978, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().writes((2_u64).saturating_mul(n.into()))) } diff --git a/substrate/frame/asset-rate/Cargo.toml b/substrate/frame/asset-rate/Cargo.toml index 01a5ca21b199..514b6fa40c2b 100644 --- a/substrate/frame/asset-rate/Cargo.toml +++ b/substrate/frame/asset-rate/Cargo.toml @@ -18,17 +18,17 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { features = [ "derive", ], workspace = true } +scale-info = { features = ["derive"], workspace = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } -scale-info = { features = ["derive"], workspace = true } -sp-core = { optional = true, workspace = true } sp-runtime = { workspace = true } +sp-core = { optional = true, workspace = true } [dev-dependencies] pallet-balances = { workspace = true, default-features = true } -sp-core = { workspace = true } sp-io = { workspace = true, default-features = true } +sp-core = { workspace = true } [features] default = ["std"] diff --git a/substrate/frame/asset-rate/src/weights.rs b/substrate/frame/asset-rate/src/weights.rs index c1991dc4ebb2..fb577b618b33 100644 --- a/substrate/frame/asset-rate/src/weights.rs +++ b/substrate/frame/asset-rate/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_asset_rate` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-04-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -60,35 +60,35 @@ pub trait WeightInfo { pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { /// Storage: `AssetRate::ConversionRateToNative` (r:1 w:1) - /// Proof: `AssetRate::ConversionRateToNative` (`max_values`: None, `max_size`: Some(37), added: 2512, mode: `MaxEncodedLen`) + /// Proof: `AssetRate::ConversionRateToNative` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `MaxEncodedLen`) fn create() -> Weight { // Proof Size summary in bytes: // Measured: `76` - // Estimated: `3502` - // Minimum execution time: 10_361_000 picoseconds. - Weight::from_parts(10_757_000, 3502) + // Estimated: `3501` + // Minimum execution time: 9_816_000 picoseconds. + Weight::from_parts(10_076_000, 3501) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `AssetRate::ConversionRateToNative` (r:1 w:1) - /// Proof: `AssetRate::ConversionRateToNative` (`max_values`: None, `max_size`: Some(37), added: 2512, mode: `MaxEncodedLen`) + /// Proof: `AssetRate::ConversionRateToNative` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `MaxEncodedLen`) fn update() -> Weight { // Proof Size summary in bytes: - // Measured: `134` - // Estimated: `3502` - // Minimum execution time: 11_193_000 picoseconds. - Weight::from_parts(11_625_000, 3502) + // Measured: `137` + // Estimated: `3501` + // Minimum execution time: 10_164_000 picoseconds. + Weight::from_parts(10_598_000, 3501) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `AssetRate::ConversionRateToNative` (r:1 w:1) - /// Proof: `AssetRate::ConversionRateToNative` (`max_values`: None, `max_size`: Some(37), added: 2512, mode: `MaxEncodedLen`) + /// Proof: `AssetRate::ConversionRateToNative` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `MaxEncodedLen`) fn remove() -> Weight { // Proof Size summary in bytes: - // Measured: `134` - // Estimated: `3502` - // Minimum execution time: 11_941_000 picoseconds. - Weight::from_parts(12_440_000, 3502) + // Measured: `137` + // Estimated: `3501` + // Minimum execution time: 10_837_000 picoseconds. + Weight::from_parts(11_050_000, 3501) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -97,35 +97,35 @@ impl WeightInfo for SubstrateWeight { // For backwards compatibility and tests. impl WeightInfo for () { /// Storage: `AssetRate::ConversionRateToNative` (r:1 w:1) - /// Proof: `AssetRate::ConversionRateToNative` (`max_values`: None, `max_size`: Some(37), added: 2512, mode: `MaxEncodedLen`) + /// Proof: `AssetRate::ConversionRateToNative` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `MaxEncodedLen`) fn create() -> Weight { // Proof Size summary in bytes: // Measured: `76` - // Estimated: `3502` - // Minimum execution time: 10_361_000 picoseconds. - Weight::from_parts(10_757_000, 3502) + // Estimated: `3501` + // Minimum execution time: 9_816_000 picoseconds. + Weight::from_parts(10_076_000, 3501) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `AssetRate::ConversionRateToNative` (r:1 w:1) - /// Proof: `AssetRate::ConversionRateToNative` (`max_values`: None, `max_size`: Some(37), added: 2512, mode: `MaxEncodedLen`) + /// Proof: `AssetRate::ConversionRateToNative` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `MaxEncodedLen`) fn update() -> Weight { // Proof Size summary in bytes: - // Measured: `134` - // Estimated: `3502` - // Minimum execution time: 11_193_000 picoseconds. - Weight::from_parts(11_625_000, 3502) + // Measured: `137` + // Estimated: `3501` + // Minimum execution time: 10_164_000 picoseconds. + Weight::from_parts(10_598_000, 3501) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `AssetRate::ConversionRateToNative` (r:1 w:1) - /// Proof: `AssetRate::ConversionRateToNative` (`max_values`: None, `max_size`: Some(37), added: 2512, mode: `MaxEncodedLen`) + /// Proof: `AssetRate::ConversionRateToNative` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `MaxEncodedLen`) fn remove() -> Weight { // Proof Size summary in bytes: - // Measured: `134` - // Estimated: `3502` - // Minimum execution time: 11_941_000 picoseconds. - Weight::from_parts(12_440_000, 3502) + // Measured: `137` + // Estimated: `3501` + // Minimum execution time: 10_837_000 picoseconds. + Weight::from_parts(11_050_000, 3501) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/substrate/frame/assets-freezer/Cargo.toml b/substrate/frame/assets-freezer/Cargo.toml index 3fffa4d0627f..68bfdd7cfb62 100644 --- a/substrate/frame/assets-freezer/Cargo.toml +++ b/substrate/frame/assets-freezer/Cargo.toml @@ -16,18 +16,18 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { workspace = true } +log = { workspace = true } +scale-info = { features = ["derive"], workspace = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } -log = { workspace = true } pallet-assets = { workspace = true } -scale-info = { features = ["derive"], workspace = true } sp-runtime = { workspace = true } [dev-dependencies] -pallet-balances = { workspace = true } -sp-core = { workspace = true } sp-io = { workspace = true } +sp-core = { workspace = true } +pallet-balances = { workspace = true } [features] default = ["std"] diff --git a/substrate/frame/assets/Cargo.toml b/substrate/frame/assets/Cargo.toml index a062a68d4220..e20b576d0836 100644 --- a/substrate/frame/assets/Cargo.toml +++ b/substrate/frame/assets/Cargo.toml @@ -25,13 +25,13 @@ sp-runtime = { workspace = true } # Needed for type-safe access to storage DB. frame-support = { workspace = true } # `system` module provides us with all sorts of useful stuff and macros depend on it being around. -frame-benchmarking = { optional = true, workspace = true } frame-system = { workspace = true } +frame-benchmarking = { optional = true, workspace = true } sp-core = { workspace = true } [dev-dependencies] -pallet-balances = { workspace = true, default-features = true } sp-io = { workspace = true, default-features = true } +pallet-balances = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/frame/assets/src/lib.rs b/substrate/frame/assets/src/lib.rs index a9b0dc950a61..e909932bfc82 100644 --- a/substrate/frame/assets/src/lib.rs +++ b/substrate/frame/assets/src/lib.rs @@ -275,7 +275,7 @@ pub mod pallet { /// Default implementations of [`DefaultConfig`], which can be used to implement [`Config`]. pub mod config_preludes { use super::*; - use frame_support::derive_impl; + use frame_support::{derive_impl, traits::ConstU64}; pub struct TestDefaultConfig; #[derive_impl(frame_system::config_preludes::TestDefaultConfig, no_aggregated_types)] @@ -289,11 +289,11 @@ pub mod pallet { type RemoveItemsLimit = ConstU32<5>; type AssetId = u32; type AssetIdParameter = u32; - type AssetDeposit = ConstUint<1>; - type AssetAccountDeposit = ConstUint<10>; - type MetadataDepositBase = ConstUint<1>; - type MetadataDepositPerByte = ConstUint<1>; - type ApprovalDeposit = ConstUint<1>; + type AssetDeposit = ConstU64<1>; + type AssetAccountDeposit = ConstU64<10>; + type MetadataDepositBase = ConstU64<1>; + type MetadataDepositPerByte = ConstU64<1>; + type ApprovalDeposit = ConstU64<1>; type StringLimit = ConstU32<50>; type Extra = (); type CallbackHandle = (); diff --git a/substrate/frame/assets/src/weights.rs b/substrate/frame/assets/src/weights.rs index 09997bc9d719..57f7e951b73c 100644 --- a/substrate/frame/assets/src/weights.rs +++ b/substrate/frame/assets/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_assets` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-04-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -91,30 +91,26 @@ pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { /// Storage: `Assets::Asset` (r:1 w:1) /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::NextAssetId` (r:1 w:0) - /// Proof: `Assets::NextAssetId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn create() -> Weight { // Proof Size summary in bytes: // Measured: `293` // Estimated: `3675` - // Minimum execution time: 33_908_000 picoseconds. - Weight::from_parts(37_126_000, 3675) - .saturating_add(T::DbWeight::get().reads(3_u64)) + // Minimum execution time: 26_165_000 picoseconds. + Weight::from_parts(26_838_000, 3675) + .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } /// Storage: `Assets::Asset` (r:1 w:1) /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::NextAssetId` (r:1 w:0) - /// Proof: `Assets::NextAssetId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn force_create() -> Weight { // Proof Size summary in bytes: // Measured: `153` // Estimated: `3675` - // Minimum execution time: 13_105_000 picoseconds. - Weight::from_parts(13_348_000, 3675) - .saturating_add(T::DbWeight::get().reads(2_u64)) + // Minimum execution time: 11_152_000 picoseconds. + Weight::from_parts(11_624_000, 3675) + .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Assets::Asset` (r:1 w:1) @@ -123,8 +119,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `385` // Estimated: `3675` - // Minimum execution time: 17_478_000 picoseconds. - Weight::from_parts(17_964_000, 3675) + // Minimum execution time: 11_961_000 picoseconds. + Weight::from_parts(12_408_000, 3675) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -137,12 +133,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `c` is `[0, 1000]`. fn destroy_accounts(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `71 + c * (208 ±0)` + // Measured: `0 + c * (208 ±0)` // Estimated: `3675 + c * (2609 ±0)` - // Minimum execution time: 20_846_000 picoseconds. - Weight::from_parts(21_195_000, 3675) - // Standard Error: 13_008 - .saturating_add(Weight::from_parts(15_076_064, 0).saturating_mul(c.into())) + // Minimum execution time: 15_815_000 picoseconds. + Weight::from_parts(16_370_000, 3675) + // Standard Error: 7_448 + .saturating_add(Weight::from_parts(13_217_179, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(c.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) @@ -158,10 +154,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `522 + a * (86 ±0)` // Estimated: `3675 + a * (2623 ±0)` - // Minimum execution time: 21_340_000 picoseconds. - Weight::from_parts(21_916_000, 3675) - // Standard Error: 8_545 - .saturating_add(Weight::from_parts(15_868_375, 0).saturating_mul(a.into())) + // Minimum execution time: 16_791_000 picoseconds. + Weight::from_parts(17_066_000, 3675) + // Standard Error: 7_163 + .saturating_add(Weight::from_parts(14_436_592, 0).saturating_mul(a.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(a.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) @@ -176,8 +172,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `351` // Estimated: `3675` - // Minimum execution time: 18_110_000 picoseconds. - Weight::from_parts(18_512_000, 3675) + // Minimum execution time: 12_769_000 picoseconds. + Weight::from_parts(13_097_000, 3675) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -189,8 +185,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `351` // Estimated: `3675` - // Minimum execution time: 27_639_000 picoseconds. - Weight::from_parts(28_680_000, 3675) + // Minimum execution time: 22_539_000 picoseconds. + Weight::from_parts(23_273_000, 3675) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -202,8 +198,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `459` // Estimated: `3675` - // Minimum execution time: 36_011_000 picoseconds. - Weight::from_parts(37_095_000, 3675) + // Minimum execution time: 30_885_000 picoseconds. + Weight::from_parts(31_800_000, 3675) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -217,8 +213,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `498` // Estimated: `6208` - // Minimum execution time: 48_531_000 picoseconds. - Weight::from_parts(50_508_000, 6208) + // Minimum execution time: 43_618_000 picoseconds. + Weight::from_parts(44_794_000, 6208) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -232,8 +228,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `498` // Estimated: `6208` - // Minimum execution time: 44_754_000 picoseconds. - Weight::from_parts(45_999_000, 6208) + // Minimum execution time: 39_174_000 picoseconds. + Weight::from_parts(40_059_000, 6208) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -247,8 +243,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `498` // Estimated: `6208` - // Minimum execution time: 48_407_000 picoseconds. - Weight::from_parts(49_737_000, 6208) + // Minimum execution time: 43_963_000 picoseconds. + Weight::from_parts(44_995_000, 6208) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -260,8 +256,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `459` // Estimated: `3675` - // Minimum execution time: 21_827_000 picoseconds. - Weight::from_parts(22_616_000, 3675) + // Minimum execution time: 15_853_000 picoseconds. + Weight::from_parts(16_414_000, 3675) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -273,8 +269,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `459` // Estimated: `3675` - // Minimum execution time: 21_579_000 picoseconds. - Weight::from_parts(22_406_000, 3675) + // Minimum execution time: 15_925_000 picoseconds. + Weight::from_parts(16_449_000, 3675) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -284,8 +280,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `385` // Estimated: `3675` - // Minimum execution time: 16_754_000 picoseconds. - Weight::from_parts(17_556_000, 3675) + // Minimum execution time: 11_629_000 picoseconds. + Weight::from_parts(12_138_000, 3675) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -295,8 +291,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `385` // Estimated: `3675` - // Minimum execution time: 16_602_000 picoseconds. - Weight::from_parts(17_551_000, 3675) + // Minimum execution time: 11_653_000 picoseconds. + Weight::from_parts(12_058_000, 3675) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -308,8 +304,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `351` // Estimated: `3675` - // Minimum execution time: 18_231_000 picoseconds. - Weight::from_parts(18_899_000, 3675) + // Minimum execution time: 13_292_000 picoseconds. + Weight::from_parts(13_686_000, 3675) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -319,8 +315,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `351` // Estimated: `3675` - // Minimum execution time: 16_396_000 picoseconds. - Weight::from_parts(16_937_000, 3675) + // Minimum execution time: 11_805_000 picoseconds. + Weight::from_parts(12_060_000, 3675) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -330,12 +326,16 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Assets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 50]`. /// The range of component `s` is `[0, 50]`. - fn set_metadata(_n: u32, _s: u32, ) -> Weight { + fn set_metadata(n: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `351` // Estimated: `3675` - // Minimum execution time: 31_604_000 picoseconds. - Weight::from_parts(33_443_707, 3675) + // Minimum execution time: 26_289_000 picoseconds. + Weight::from_parts(27_543_545, 3675) + // Standard Error: 939 + .saturating_add(Weight::from_parts(4_967, 0).saturating_mul(n.into())) + // Standard Error: 939 + .saturating_add(Weight::from_parts(3_698, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -347,8 +347,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `515` // Estimated: `3675` - // Minimum execution time: 32_152_000 picoseconds. - Weight::from_parts(32_893_000, 3675) + // Minimum execution time: 27_560_000 picoseconds. + Weight::from_parts(28_541_000, 3675) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -362,12 +362,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `190` // Estimated: `3675` - // Minimum execution time: 13_637_000 picoseconds. - Weight::from_parts(14_385_881, 3675) - // Standard Error: 375 - .saturating_add(Weight::from_parts(1_821, 0).saturating_mul(n.into())) - // Standard Error: 375 - .saturating_add(Weight::from_parts(147, 0).saturating_mul(s.into())) + // Minimum execution time: 12_378_000 picoseconds. + Weight::from_parts(13_057_891, 3675) + // Standard Error: 474 + .saturating_add(Weight::from_parts(1_831, 0).saturating_mul(n.into())) + // Standard Error: 474 + .saturating_add(Weight::from_parts(2_387, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -379,8 +379,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `515` // Estimated: `3675` - // Minimum execution time: 31_587_000 picoseconds. - Weight::from_parts(32_438_000, 3675) + // Minimum execution time: 27_134_000 picoseconds. + Weight::from_parts(28_333_000, 3675) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -390,8 +390,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `351` // Estimated: `3675` - // Minimum execution time: 16_006_000 picoseconds. - Weight::from_parts(16_623_000, 3675) + // Minimum execution time: 11_524_000 picoseconds. + Weight::from_parts(11_934_000, 3675) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -403,8 +403,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `385` // Estimated: `3675` - // Minimum execution time: 36_026_000 picoseconds. - Weight::from_parts(37_023_000, 3675) + // Minimum execution time: 30_206_000 picoseconds. + Weight::from_parts(31_624_000, 3675) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -420,8 +420,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `668` // Estimated: `6208` - // Minimum execution time: 68_731_000 picoseconds. - Weight::from_parts(70_171_000, 6208) + // Minimum execution time: 64_074_000 picoseconds. + Weight::from_parts(66_145_000, 6208) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -433,8 +433,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `555` // Estimated: `3675` - // Minimum execution time: 38_039_000 picoseconds. - Weight::from_parts(39_018_000, 3675) + // Minimum execution time: 32_790_000 picoseconds. + Weight::from_parts(33_634_000, 3675) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -446,8 +446,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `555` // Estimated: `3675` - // Minimum execution time: 38_056_000 picoseconds. - Weight::from_parts(39_228_000, 3675) + // Minimum execution time: 33_150_000 picoseconds. + Weight::from_parts(34_440_000, 3675) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -457,8 +457,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `351` // Estimated: `3675` - // Minimum execution time: 16_653_000 picoseconds. - Weight::from_parts(17_240_000, 3675) + // Minimum execution time: 12_365_000 picoseconds. + Weight::from_parts(12_870_000, 3675) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -472,8 +472,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `453` // Estimated: `3675` - // Minimum execution time: 37_938_000 picoseconds. - Weight::from_parts(38_960_000, 3675) + // Minimum execution time: 32_308_000 picoseconds. + Weight::from_parts(33_080_000, 3675) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -485,8 +485,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `351` // Estimated: `3675` - // Minimum execution time: 35_210_000 picoseconds. - Weight::from_parts(36_222_000, 3675) + // Minimum execution time: 29_870_000 picoseconds. + Weight::from_parts(30_562_000, 3675) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -500,8 +500,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `579` // Estimated: `3675` - // Minimum execution time: 36_787_000 picoseconds. - Weight::from_parts(38_229_000, 3675) + // Minimum execution time: 31_980_000 picoseconds. + Weight::from_parts(33_747_000, 3675) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -513,8 +513,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `510` // Estimated: `3675` - // Minimum execution time: 34_185_000 picoseconds. - Weight::from_parts(35_456_000, 3675) + // Minimum execution time: 29_599_000 picoseconds. + Weight::from_parts(30_919_000, 3675) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -526,25 +526,20 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `459` // Estimated: `3675` - // Minimum execution time: 21_482_000 picoseconds. - Weight::from_parts(22_135_000, 3675) + // Minimum execution time: 15_741_000 picoseconds. + Weight::from_parts(16_558_000, 3675) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Account` (r:2 w:2) - /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn transfer_all() -> Weight { // Proof Size summary in bytes: - // Measured: `498` - // Estimated: `6208` - // Minimum execution time: 58_108_000 picoseconds. - Weight::from_parts(59_959_000, 6208) - .saturating_add(T::DbWeight::get().reads(4_u64)) - .saturating_add(T::DbWeight::get().writes(4_u64)) + // Measured: `0` + // Estimated: `3593` + // Minimum execution time: 46_573_000 picoseconds. + Weight::from_parts(47_385_000, 3593) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) } } @@ -552,30 +547,26 @@ impl WeightInfo for SubstrateWeight { impl WeightInfo for () { /// Storage: `Assets::Asset` (r:1 w:1) /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::NextAssetId` (r:1 w:0) - /// Proof: `Assets::NextAssetId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn create() -> Weight { // Proof Size summary in bytes: // Measured: `293` // Estimated: `3675` - // Minimum execution time: 33_908_000 picoseconds. - Weight::from_parts(37_126_000, 3675) - .saturating_add(RocksDbWeight::get().reads(3_u64)) + // Minimum execution time: 26_165_000 picoseconds. + Weight::from_parts(26_838_000, 3675) + .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } /// Storage: `Assets::Asset` (r:1 w:1) /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::NextAssetId` (r:1 w:0) - /// Proof: `Assets::NextAssetId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn force_create() -> Weight { // Proof Size summary in bytes: // Measured: `153` // Estimated: `3675` - // Minimum execution time: 13_105_000 picoseconds. - Weight::from_parts(13_348_000, 3675) - .saturating_add(RocksDbWeight::get().reads(2_u64)) + // Minimum execution time: 11_152_000 picoseconds. + Weight::from_parts(11_624_000, 3675) + .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Assets::Asset` (r:1 w:1) @@ -584,8 +575,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `385` // Estimated: `3675` - // Minimum execution time: 17_478_000 picoseconds. - Weight::from_parts(17_964_000, 3675) + // Minimum execution time: 11_961_000 picoseconds. + Weight::from_parts(12_408_000, 3675) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -598,12 +589,12 @@ impl WeightInfo for () { /// The range of component `c` is `[0, 1000]`. fn destroy_accounts(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `71 + c * (208 ±0)` + // Measured: `0 + c * (208 ±0)` // Estimated: `3675 + c * (2609 ±0)` - // Minimum execution time: 20_846_000 picoseconds. - Weight::from_parts(21_195_000, 3675) - // Standard Error: 13_008 - .saturating_add(Weight::from_parts(15_076_064, 0).saturating_mul(c.into())) + // Minimum execution time: 15_815_000 picoseconds. + Weight::from_parts(16_370_000, 3675) + // Standard Error: 7_448 + .saturating_add(Weight::from_parts(13_217_179, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((2_u64).saturating_mul(c.into()))) .saturating_add(RocksDbWeight::get().writes(1_u64)) @@ -619,10 +610,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `522 + a * (86 ±0)` // Estimated: `3675 + a * (2623 ±0)` - // Minimum execution time: 21_340_000 picoseconds. - Weight::from_parts(21_916_000, 3675) - // Standard Error: 8_545 - .saturating_add(Weight::from_parts(15_868_375, 0).saturating_mul(a.into())) + // Minimum execution time: 16_791_000 picoseconds. + Weight::from_parts(17_066_000, 3675) + // Standard Error: 7_163 + .saturating_add(Weight::from_parts(14_436_592, 0).saturating_mul(a.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(a.into()))) .saturating_add(RocksDbWeight::get().writes(1_u64)) @@ -637,8 +628,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `351` // Estimated: `3675` - // Minimum execution time: 18_110_000 picoseconds. - Weight::from_parts(18_512_000, 3675) + // Minimum execution time: 12_769_000 picoseconds. + Weight::from_parts(13_097_000, 3675) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -650,8 +641,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `351` // Estimated: `3675` - // Minimum execution time: 27_639_000 picoseconds. - Weight::from_parts(28_680_000, 3675) + // Minimum execution time: 22_539_000 picoseconds. + Weight::from_parts(23_273_000, 3675) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -663,8 +654,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `459` // Estimated: `3675` - // Minimum execution time: 36_011_000 picoseconds. - Weight::from_parts(37_095_000, 3675) + // Minimum execution time: 30_885_000 picoseconds. + Weight::from_parts(31_800_000, 3675) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -678,8 +669,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `498` // Estimated: `6208` - // Minimum execution time: 48_531_000 picoseconds. - Weight::from_parts(50_508_000, 6208) + // Minimum execution time: 43_618_000 picoseconds. + Weight::from_parts(44_794_000, 6208) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -693,8 +684,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `498` // Estimated: `6208` - // Minimum execution time: 44_754_000 picoseconds. - Weight::from_parts(45_999_000, 6208) + // Minimum execution time: 39_174_000 picoseconds. + Weight::from_parts(40_059_000, 6208) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -708,8 +699,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `498` // Estimated: `6208` - // Minimum execution time: 48_407_000 picoseconds. - Weight::from_parts(49_737_000, 6208) + // Minimum execution time: 43_963_000 picoseconds. + Weight::from_parts(44_995_000, 6208) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -721,8 +712,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `459` // Estimated: `3675` - // Minimum execution time: 21_827_000 picoseconds. - Weight::from_parts(22_616_000, 3675) + // Minimum execution time: 15_853_000 picoseconds. + Weight::from_parts(16_414_000, 3675) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -734,8 +725,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `459` // Estimated: `3675` - // Minimum execution time: 21_579_000 picoseconds. - Weight::from_parts(22_406_000, 3675) + // Minimum execution time: 15_925_000 picoseconds. + Weight::from_parts(16_449_000, 3675) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -745,8 +736,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `385` // Estimated: `3675` - // Minimum execution time: 16_754_000 picoseconds. - Weight::from_parts(17_556_000, 3675) + // Minimum execution time: 11_629_000 picoseconds. + Weight::from_parts(12_138_000, 3675) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -756,8 +747,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `385` // Estimated: `3675` - // Minimum execution time: 16_602_000 picoseconds. - Weight::from_parts(17_551_000, 3675) + // Minimum execution time: 11_653_000 picoseconds. + Weight::from_parts(12_058_000, 3675) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -769,8 +760,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `351` // Estimated: `3675` - // Minimum execution time: 18_231_000 picoseconds. - Weight::from_parts(18_899_000, 3675) + // Minimum execution time: 13_292_000 picoseconds. + Weight::from_parts(13_686_000, 3675) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -780,8 +771,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `351` // Estimated: `3675` - // Minimum execution time: 16_396_000 picoseconds. - Weight::from_parts(16_937_000, 3675) + // Minimum execution time: 11_805_000 picoseconds. + Weight::from_parts(12_060_000, 3675) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -791,12 +782,16 @@ impl WeightInfo for () { /// Proof: `Assets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 50]`. /// The range of component `s` is `[0, 50]`. - fn set_metadata(_n: u32, _s: u32, ) -> Weight { + fn set_metadata(n: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `351` // Estimated: `3675` - // Minimum execution time: 31_604_000 picoseconds. - Weight::from_parts(33_443_707, 3675) + // Minimum execution time: 26_289_000 picoseconds. + Weight::from_parts(27_543_545, 3675) + // Standard Error: 939 + .saturating_add(Weight::from_parts(4_967, 0).saturating_mul(n.into())) + // Standard Error: 939 + .saturating_add(Weight::from_parts(3_698, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -808,8 +803,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `515` // Estimated: `3675` - // Minimum execution time: 32_152_000 picoseconds. - Weight::from_parts(32_893_000, 3675) + // Minimum execution time: 27_560_000 picoseconds. + Weight::from_parts(28_541_000, 3675) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -823,12 +818,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `190` // Estimated: `3675` - // Minimum execution time: 13_637_000 picoseconds. - Weight::from_parts(14_385_881, 3675) - // Standard Error: 375 - .saturating_add(Weight::from_parts(1_821, 0).saturating_mul(n.into())) - // Standard Error: 375 - .saturating_add(Weight::from_parts(147, 0).saturating_mul(s.into())) + // Minimum execution time: 12_378_000 picoseconds. + Weight::from_parts(13_057_891, 3675) + // Standard Error: 474 + .saturating_add(Weight::from_parts(1_831, 0).saturating_mul(n.into())) + // Standard Error: 474 + .saturating_add(Weight::from_parts(2_387, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -840,8 +835,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `515` // Estimated: `3675` - // Minimum execution time: 31_587_000 picoseconds. - Weight::from_parts(32_438_000, 3675) + // Minimum execution time: 27_134_000 picoseconds. + Weight::from_parts(28_333_000, 3675) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -851,8 +846,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `351` // Estimated: `3675` - // Minimum execution time: 16_006_000 picoseconds. - Weight::from_parts(16_623_000, 3675) + // Minimum execution time: 11_524_000 picoseconds. + Weight::from_parts(11_934_000, 3675) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -864,8 +859,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `385` // Estimated: `3675` - // Minimum execution time: 36_026_000 picoseconds. - Weight::from_parts(37_023_000, 3675) + // Minimum execution time: 30_206_000 picoseconds. + Weight::from_parts(31_624_000, 3675) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -881,8 +876,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `668` // Estimated: `6208` - // Minimum execution time: 68_731_000 picoseconds. - Weight::from_parts(70_171_000, 6208) + // Minimum execution time: 64_074_000 picoseconds. + Weight::from_parts(66_145_000, 6208) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -894,8 +889,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `555` // Estimated: `3675` - // Minimum execution time: 38_039_000 picoseconds. - Weight::from_parts(39_018_000, 3675) + // Minimum execution time: 32_790_000 picoseconds. + Weight::from_parts(33_634_000, 3675) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -907,8 +902,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `555` // Estimated: `3675` - // Minimum execution time: 38_056_000 picoseconds. - Weight::from_parts(39_228_000, 3675) + // Minimum execution time: 33_150_000 picoseconds. + Weight::from_parts(34_440_000, 3675) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -918,8 +913,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `351` // Estimated: `3675` - // Minimum execution time: 16_653_000 picoseconds. - Weight::from_parts(17_240_000, 3675) + // Minimum execution time: 12_365_000 picoseconds. + Weight::from_parts(12_870_000, 3675) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -933,8 +928,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `453` // Estimated: `3675` - // Minimum execution time: 37_938_000 picoseconds. - Weight::from_parts(38_960_000, 3675) + // Minimum execution time: 32_308_000 picoseconds. + Weight::from_parts(33_080_000, 3675) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -946,8 +941,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `351` // Estimated: `3675` - // Minimum execution time: 35_210_000 picoseconds. - Weight::from_parts(36_222_000, 3675) + // Minimum execution time: 29_870_000 picoseconds. + Weight::from_parts(30_562_000, 3675) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -961,8 +956,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `579` // Estimated: `3675` - // Minimum execution time: 36_787_000 picoseconds. - Weight::from_parts(38_229_000, 3675) + // Minimum execution time: 31_980_000 picoseconds. + Weight::from_parts(33_747_000, 3675) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -974,8 +969,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `510` // Estimated: `3675` - // Minimum execution time: 34_185_000 picoseconds. - Weight::from_parts(35_456_000, 3675) + // Minimum execution time: 29_599_000 picoseconds. + Weight::from_parts(30_919_000, 3675) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -987,24 +982,19 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `459` // Estimated: `3675` - // Minimum execution time: 21_482_000 picoseconds. - Weight::from_parts(22_135_000, 3675) + // Minimum execution time: 15_741_000 picoseconds. + Weight::from_parts(16_558_000, 3675) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Account` (r:2 w:2) - /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn transfer_all() -> Weight { // Proof Size summary in bytes: - // Measured: `498` - // Estimated: `6208` - // Minimum execution time: 58_108_000 picoseconds. - Weight::from_parts(59_959_000, 6208) - .saturating_add(RocksDbWeight::get().reads(4_u64)) - .saturating_add(RocksDbWeight::get().writes(4_u64)) + // Measured: `0` + // Estimated: `3593` + // Minimum execution time: 46_573_000 picoseconds. + Weight::from_parts(47_385_000, 3593) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) } } diff --git a/substrate/frame/atomic-swap/Cargo.toml b/substrate/frame/atomic-swap/Cargo.toml index 785bfee71b68..db89a58da8f0 100644 --- a/substrate/frame/atomic-swap/Cargo.toml +++ b/substrate/frame/atomic-swap/Cargo.toml @@ -17,8 +17,12 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { workspace = true } -frame = { workspace = true, features = ["experimental", "runtime"] } scale-info = { features = ["derive"], workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } [dev-dependencies] pallet-balances = { workspace = true, default-features = true } @@ -27,11 +31,17 @@ pallet-balances = { workspace = true, default-features = true } default = ["std"] std = [ "codec/std", - "frame/std", + "frame-support/std", + "frame-system/std", "pallet-balances/std", "scale-info/std", + "sp-core/std", + "sp-io/std", + "sp-runtime/std", ] try-runtime = [ - "frame/try-runtime", + "frame-support/try-runtime", + "frame-system/try-runtime", "pallet-balances/try-runtime", + "sp-runtime/try-runtime", ] diff --git a/substrate/frame/atomic-swap/src/lib.rs b/substrate/frame/atomic-swap/src/lib.rs index 9521f20fe009..c3010f5c9c03 100644 --- a/substrate/frame/atomic-swap/src/lib.rs +++ b/substrate/frame/atomic-swap/src/lib.rs @@ -50,11 +50,17 @@ use core::{ marker::PhantomData, ops::{Deref, DerefMut}, }; -use frame::{ - prelude::*, - traits::{BalanceStatus, Currency, ReservableCurrency}, +use frame_support::{ + dispatch::DispatchResult, + pallet_prelude::MaxEncodedLen, + traits::{BalanceStatus, Currency, Get, ReservableCurrency}, + weights::Weight, + RuntimeDebugNoBound, }; +use frame_system::pallet_prelude::BlockNumberFor; use scale_info::TypeInfo; +use sp_io::hashing::blake2_256; +use sp_runtime::RuntimeDebug; /// Pending atomic swap operation. #[derive(Clone, Eq, PartialEq, RuntimeDebugNoBound, Encode, Decode, TypeInfo, MaxEncodedLen)] @@ -153,9 +159,11 @@ where pub use pallet::*; -#[frame::pallet] +#[frame_support::pallet] pub mod pallet { use super::*; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; /// Atomic swap's pallet configuration trait. #[pallet::config] diff --git a/substrate/frame/atomic-swap/src/tests.rs b/substrate/frame/atomic-swap/src/tests.rs index 6fcc5571a523..47ebe6a8f0ac 100644 --- a/substrate/frame/atomic-swap/src/tests.rs +++ b/substrate/frame/atomic-swap/src/tests.rs @@ -19,11 +19,13 @@ use super::*; use crate as pallet_atomic_swap; -use frame::testing_prelude::*; + +use frame_support::{derive_impl, traits::ConstU32}; +use sp_runtime::BuildStorage; type Block = frame_system::mocking::MockBlock; -construct_runtime!( +frame_support::construct_runtime!( pub enum Test { System: frame_system, @@ -52,7 +54,7 @@ impl Config for Test { const A: u64 = 1; const B: u64 = 2; -pub fn new_test_ext() -> TestExternalities { +pub fn new_test_ext() -> sp_io::TestExternalities { let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); let genesis = pallet_balances::GenesisConfig:: { balances: vec![(A, 100), (B, 200)] }; genesis.assimilate_storage(&mut t).unwrap(); diff --git a/substrate/frame/aura/Cargo.toml b/substrate/frame/aura/Cargo.toml index 94a47e4d96cd..94b057d665d4 100644 --- a/substrate/frame/aura/Cargo.toml +++ b/substrate/frame/aura/Cargo.toml @@ -17,11 +17,11 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { features = ["derive", "max-encoded-len"], workspace = true } +log = { workspace = true } +scale-info = { features = ["derive"], workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } -log = { workspace = true } pallet-timestamp = { workspace = true } -scale-info = { features = ["derive"], workspace = true } sp-application-crypto = { workspace = true } sp-consensus-aura = { workspace = true } sp-runtime = { workspace = true } diff --git a/substrate/frame/authority-discovery/Cargo.toml b/substrate/frame/authority-discovery/Cargo.toml index 506c292c837b..01f574a262ad 100644 --- a/substrate/frame/authority-discovery/Cargo.toml +++ b/substrate/frame/authority-discovery/Cargo.toml @@ -19,12 +19,12 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { features = [ "derive", ], workspace = true } +scale-info = { features = ["derive"], workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } pallet-session = { features = [ "historical", ], workspace = true } -scale-info = { features = ["derive"], workspace = true } sp-application-crypto = { workspace = true } sp-authority-discovery = { workspace = true } sp-runtime = { workspace = true } diff --git a/substrate/frame/authorship/Cargo.toml b/substrate/frame/authorship/Cargo.toml index f8b587d44909..74a4a93147a8 100644 --- a/substrate/frame/authorship/Cargo.toml +++ b/substrate/frame/authorship/Cargo.toml @@ -19,10 +19,10 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { features = [ "derive", ], workspace = true } -frame-support = { workspace = true } -frame-system = { workspace = true } impl-trait-for-tuples = { workspace = true } scale-info = { features = ["derive"], workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } sp-runtime = { workspace = true } [dev-dependencies] diff --git a/substrate/frame/authorship/src/lib.rs b/substrate/frame/authorship/src/lib.rs index 5c969a3480d4..1de2262a2014 100644 --- a/substrate/frame/authorship/src/lib.rs +++ b/substrate/frame/authorship/src/lib.rs @@ -67,7 +67,6 @@ pub mod pallet { } #[pallet::storage] - #[pallet::whitelist_storage] /// Author of current block. pub(super) type Author = StorageValue<_, T::AccountId, OptionQuery>; } diff --git a/substrate/frame/babe/Cargo.toml b/substrate/frame/babe/Cargo.toml index 8673e08472eb..f0a7f4648c0a 100644 --- a/substrate/frame/babe/Cargo.toml +++ b/substrate/frame/babe/Cargo.toml @@ -17,14 +17,14 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { features = ["derive"], workspace = true } +log = { workspace = true } +scale-info = { features = ["derive", "serde"], workspace = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } -log = { workspace = true } pallet-authorship = { workspace = true } pallet-session = { workspace = true } pallet-timestamp = { workspace = true } -scale-info = { features = ["derive", "serde"], workspace = true } sp-application-crypto = { features = ["serde"], workspace = true } sp-consensus-babe = { features = ["serde"], workspace = true } sp-core = { features = ["serde"], workspace = true } diff --git a/substrate/frame/babe/src/benchmarking.rs b/substrate/frame/babe/src/benchmarking.rs index 33e275fcb5e3..6b0e31e84718 100644 --- a/substrate/frame/babe/src/benchmarking.rs +++ b/substrate/frame/babe/src/benchmarking.rs @@ -20,16 +20,14 @@ #![cfg(feature = "runtime-benchmarks")] use super::*; -use frame_benchmarking::v2::*; +use frame_benchmarking::v1::benchmarks; type Header = sp_runtime::generic::Header; -#[benchmarks] -mod benchmarks { - use super::*; +benchmarks! { + check_equivocation_proof { + let x in 0 .. 1; - #[benchmark] - fn check_equivocation_proof(x: Linear<0, 1>) { // NOTE: generated with the test below `test_generate_equivocation_report_blob`. // the output is not deterministic since keys are generated randomly (and therefore // signature content changes). it should not affect the benchmark. @@ -55,21 +53,22 @@ mod benchmarks { 124, 11, 167, 227, 103, 88, 78, 23, 228, 33, 96, 41, 207, 183, 227, 189, 114, 70, 254, 30, 128, 243, 233, 83, 214, 45, 74, 182, 120, 119, 64, 243, 219, 119, 63, 240, 205, 123, 231, 82, 205, 174, 143, 70, 2, 86, 182, 20, 16, 141, 145, 91, 116, 195, 58, 223, - 175, 145, 255, 7, 121, 133, + 175, 145, 255, 7, 121, 133 ]; let equivocation_proof1: sp_consensus_babe::EquivocationProof

= Decode::decode(&mut &EQUIVOCATION_PROOF_BLOB[..]).unwrap(); let equivocation_proof2 = equivocation_proof1.clone(); - - #[block] - { - sp_consensus_babe::check_equivocation_proof::
(equivocation_proof1); - } - + }: { + sp_consensus_babe::check_equivocation_proof::
(equivocation_proof1); + } verify { assert!(sp_consensus_babe::check_equivocation_proof::
(equivocation_proof2)); } - impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(3), crate::mock::Test,); + impl_benchmark_test_suite!( + Pallet, + crate::mock::new_test_ext(3), + crate::mock::Test, + ) } diff --git a/substrate/frame/babe/src/mock.rs b/substrate/frame/babe/src/mock.rs index 23857470adc4..c2e24c73a7bd 100644 --- a/substrate/frame/babe/src/mock.rs +++ b/substrate/frame/babe/src/mock.rs @@ -239,7 +239,7 @@ pub fn start_session(session_index: SessionIndex) { /// Progress to the first block at the given era pub fn start_era(era_index: EraIndex) { start_session((era_index * 3).into()); - assert_eq!(pallet_staking::CurrentEra::::get(), Some(era_index)); + assert_eq!(Staking::current_era(), Some(era_index)); } pub fn make_primary_pre_digest( diff --git a/substrate/frame/babe/src/tests.rs b/substrate/frame/babe/src/tests.rs index 5210d9289bcd..eca958160239 100644 --- a/substrate/frame/babe/src/tests.rs +++ b/substrate/frame/babe/src/tests.rs @@ -414,7 +414,7 @@ fn disabled_validators_cannot_author_blocks() { // so we should still be able to author blocks start_era(2); - assert_eq!(pallet_staking::CurrentEra::::get().unwrap(), 2); + assert_eq!(Staking::current_era().unwrap(), 2); // let's disable the validator at index 0 Session::disable_index(0); diff --git a/substrate/frame/bags-list/Cargo.toml b/substrate/frame/bags-list/Cargo.toml index 6b1c4809f773..647f5d26686a 100644 --- a/substrate/frame/bags-list/Cargo.toml +++ b/substrate/frame/bags-list/Cargo.toml @@ -27,14 +27,14 @@ scale-info = { features = [ sp-runtime = { workspace = true } # FRAME -frame-election-provider-support = { workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } +frame-election-provider-support = { workspace = true } # third party -aquamarine = { workspace = true } -docify = { workspace = true } log = { workspace = true } +docify = { workspace = true } +aquamarine = { workspace = true } # Optional imports for benchmarking frame-benchmarking = { optional = true, workspace = true } @@ -44,12 +44,12 @@ sp-io = { optional = true, workspace = true } sp-tracing = { optional = true, workspace = true } [dev-dependencies] -frame-benchmarking = { workspace = true, default-features = true } -frame-election-provider-support = { workspace = true, default-features = true } -pallet-balances = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } sp-io = { workspace = true, default-features = true } sp-tracing = { workspace = true, default-features = true } +pallet-balances = { workspace = true, default-features = true } +frame-election-provider-support = { workspace = true, default-features = true } +frame-benchmarking = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/frame/bags-list/fuzzer/Cargo.toml b/substrate/frame/bags-list/fuzzer/Cargo.toml index db46bc6fe446..b52fc8848237 100644 --- a/substrate/frame/bags-list/fuzzer/Cargo.toml +++ b/substrate/frame/bags-list/fuzzer/Cargo.toml @@ -13,10 +13,10 @@ publish = false workspace = true [dependencies] -frame-election-provider-support = { features = ["fuzz"], workspace = true, default-features = true } honggfuzz = { workspace = true } -pallet-bags-list = { features = ["fuzz"], workspace = true, default-features = true } rand = { features = ["small_rng", "std"], workspace = true, default-features = true } +frame-election-provider-support = { features = ["fuzz"], workspace = true, default-features = true } +pallet-bags-list = { features = ["fuzz"], workspace = true, default-features = true } [[bin]] name = "bags-list" diff --git a/substrate/frame/bags-list/remote-tests/Cargo.toml b/substrate/frame/bags-list/remote-tests/Cargo.toml index 99b203e73fb0..12d61b61c06d 100644 --- a/substrate/frame/bags-list/remote-tests/Cargo.toml +++ b/substrate/frame/bags-list/remote-tests/Cargo.toml @@ -17,18 +17,18 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] # frame +pallet-staking = { workspace = true, default-features = true } +pallet-bags-list = { features = ["fuzz"], workspace = true, default-features = true } frame-election-provider-support = { workspace = true, default-features = true } -frame-support = { workspace = true, default-features = true } frame-system = { workspace = true, default-features = true } -pallet-bags-list = { features = ["fuzz"], workspace = true, default-features = true } -pallet-staking = { workspace = true, default-features = true } +frame-support = { workspace = true, default-features = true } # core +sp-storage = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } sp-std = { workspace = true, default-features = true } -sp-storage = { workspace = true, default-features = true } -sp-tracing = { workspace = true, default-features = true } # utils remote-externalities = { workspace = true, default-features = true } diff --git a/substrate/frame/bags-list/src/weights.rs b/substrate/frame/bags-list/src/weights.rs index 52218277a795..8a5424881e97 100644 --- a/substrate/frame/bags-list/src/weights.rs +++ b/substrate/frame/bags-list/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_bags_list` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-04-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -69,10 +69,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) fn rebag_non_terminal() -> Weight { // Proof Size summary in bytes: - // Measured: `1785` + // Measured: `1719` // Estimated: `11506` - // Minimum execution time: 69_033_000 picoseconds. - Weight::from_parts(71_551_000, 11506) + // Minimum execution time: 60_062_000 picoseconds. + Weight::from_parts(62_341_000, 11506) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -86,10 +86,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) fn rebag_terminal() -> Weight { // Proof Size summary in bytes: - // Measured: `1679` + // Measured: `1613` // Estimated: `8877` - // Minimum execution time: 66_157_000 picoseconds. - Weight::from_parts(69_215_000, 8877) + // Minimum execution time: 57_585_000 picoseconds. + Weight::from_parts(59_480_000, 8877) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -105,10 +105,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) fn put_in_front_of() -> Weight { // Proof Size summary in bytes: - // Measured: `1991` + // Measured: `1925` // Estimated: `11506` - // Minimum execution time: 79_581_000 picoseconds. - Weight::from_parts(81_999_000, 11506) + // Minimum execution time: 69_552_000 picoseconds. + Weight::from_parts(71_211_000, 11506) .saturating_add(T::DbWeight::get().reads(10_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -126,10 +126,10 @@ impl WeightInfo for () { /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) fn rebag_non_terminal() -> Weight { // Proof Size summary in bytes: - // Measured: `1785` + // Measured: `1719` // Estimated: `11506` - // Minimum execution time: 69_033_000 picoseconds. - Weight::from_parts(71_551_000, 11506) + // Minimum execution time: 60_062_000 picoseconds. + Weight::from_parts(62_341_000, 11506) .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -143,10 +143,10 @@ impl WeightInfo for () { /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) fn rebag_terminal() -> Weight { // Proof Size summary in bytes: - // Measured: `1679` + // Measured: `1613` // Estimated: `8877` - // Minimum execution time: 66_157_000 picoseconds. - Weight::from_parts(69_215_000, 8877) + // Minimum execution time: 57_585_000 picoseconds. + Weight::from_parts(59_480_000, 8877) .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -162,10 +162,10 @@ impl WeightInfo for () { /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) fn put_in_front_of() -> Weight { // Proof Size summary in bytes: - // Measured: `1991` + // Measured: `1925` // Estimated: `11506` - // Minimum execution time: 79_581_000 picoseconds. - Weight::from_parts(81_999_000, 11506) + // Minimum execution time: 69_552_000 picoseconds. + Weight::from_parts(71_211_000, 11506) .saturating_add(RocksDbWeight::get().reads(10_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } diff --git a/substrate/frame/balances/Cargo.toml b/substrate/frame/balances/Cargo.toml index 03bc7fcb3fcc..f0117555c37e 100644 --- a/substrate/frame/balances/Cargo.toml +++ b/substrate/frame/balances/Cargo.toml @@ -17,20 +17,20 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { features = ["derive", "max-encoded-len"], workspace = true } -docify = { workspace = true } +log = { workspace = true } +scale-info = { features = ["derive"], workspace = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } -log = { workspace = true } -scale-info = { features = ["derive"], workspace = true } sp-runtime = { workspace = true } +docify = { workspace = true } [dev-dependencies] -frame-support = { features = ["experimental"], workspace = true, default-features = true } pallet-transaction-payment = { workspace = true, default-features = true } -paste = { workspace = true, default-features = true } +frame-support = { features = ["experimental"], workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } sp-io = { workspace = true, default-features = true } +paste = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/frame/balances/src/lib.rs b/substrate/frame/balances/src/lib.rs index 9d7401452101..65e594a904f9 100644 --- a/substrate/frame/balances/src/lib.rs +++ b/substrate/frame/balances/src/lib.rs @@ -205,7 +205,7 @@ pub mod pallet { /// Default implementations of [`DefaultConfig`], which can be used to implement [`Config`]. pub mod config_preludes { use super::*; - use frame_support::derive_impl; + use frame_support::{derive_impl, traits::ConstU64}; pub struct TestDefaultConfig; @@ -222,7 +222,7 @@ pub mod pallet { type RuntimeFreezeReason = (); type Balance = u64; - type ExistentialDeposit = ConstUint<1>; + type ExistentialDeposit = ConstU64<1>; type ReserveIdentifier = (); type FreezeIdentifier = Self::RuntimeFreezeReason; diff --git a/substrate/frame/balances/src/tests/currency_tests.rs b/substrate/frame/balances/src/tests/currency_tests.rs index 5ad818e5bfa2..7fcc49d50aa5 100644 --- a/substrate/frame/balances/src/tests/currency_tests.rs +++ b/substrate/frame/balances/src/tests/currency_tests.rs @@ -265,7 +265,6 @@ fn lock_should_work_reserve() { CALL, &info_from_weight(Weight::from_parts(1, 0)), 1, - 0, ) .is_err()); assert!(ChargeTransactionPayment::::validate_and_prepare( @@ -274,7 +273,6 @@ fn lock_should_work_reserve() { CALL, &info_from_weight(Weight::from_parts(1, 0)), 1, - 0, ) .is_err()); }); @@ -298,7 +296,6 @@ fn lock_should_work_tx_fee() { CALL, &info_from_weight(Weight::from_parts(1, 0)), 1, - 0, ) .is_err()); assert!(ChargeTransactionPayment::::validate_and_prepare( @@ -307,7 +304,6 @@ fn lock_should_work_tx_fee() { CALL, &info_from_weight(Weight::from_parts(1, 0)), 1, - 0, ) .is_err()); }); diff --git a/substrate/frame/balances/src/weights.rs b/substrate/frame/balances/src/weights.rs index 0c7a1354cda0..55decef273f6 100644 --- a/substrate/frame/balances/src/weights.rs +++ b/substrate/frame/balances/src/weights.rs @@ -17,29 +17,27 @@ //! Autogenerated weights for `pallet_balances` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 42.0.0 +//! DATE: 2024-09-04, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` +//! HOSTNAME: `8f4ffe8f7785`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate-node +// frame-omni-bencher +// v1 // benchmark // pallet -// --chain=dev -// --steps=50 -// --repeat=20 -// --pallet=pallet_balances -// --no-storage-info -// --no-median-slopes -// --no-min-squares // --extrinsic=* +// --runtime=target/release/wbuild/kitchensink-runtime/kitchensink_runtime.wasm +// --pallet=pallet_balances +// --header=/__w/polkadot-sdk/polkadot-sdk/substrate/HEADER-APACHE2 +// --output=/__w/polkadot-sdk/polkadot-sdk/substrate/frame/balances/src/weights.rs // --wasm-execution=compiled +// --steps=50 +// --repeat=20 // --heap-pages=4096 -// --output=./substrate/frame/balances/src/weights.rs -// --header=./substrate/HEADER-APACHE2 -// --template=./substrate/.maintain/frame-weight-template.hbs +// --template=substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -71,10 +69,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn transfer_allow_death() -> Weight { // Proof Size summary in bytes: - // Measured: `52` + // Measured: `0` // Estimated: `3593` - // Minimum execution time: 50_023_000 picoseconds. - Weight::from_parts(51_105_000, 3593) + // Minimum execution time: 75_624_000 picoseconds. + Weight::from_parts(77_290_000, 3593) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -82,10 +80,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn transfer_keep_alive() -> Weight { // Proof Size summary in bytes: - // Measured: `52` + // Measured: `0` // Estimated: `3593` - // Minimum execution time: 39_923_000 picoseconds. - Weight::from_parts(40_655_000, 3593) + // Minimum execution time: 60_398_000 picoseconds. + Weight::from_parts(61_290_000, 3593) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -93,10 +91,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn force_set_balance_creating() -> Weight { // Proof Size summary in bytes: - // Measured: `174` + // Measured: `52` // Estimated: `3593` - // Minimum execution time: 15_062_000 picoseconds. - Weight::from_parts(15_772_000, 3593) + // Minimum execution time: 18_963_000 picoseconds. + Weight::from_parts(19_802_000, 3593) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -104,10 +102,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn force_set_balance_killing() -> Weight { // Proof Size summary in bytes: - // Measured: `174` + // Measured: `52` // Estimated: `3593` - // Minimum execution time: 21_797_000 picoseconds. - Weight::from_parts(22_287_000, 3593) + // Minimum execution time: 30_517_000 picoseconds. + Weight::from_parts(31_293_000, 3593) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -115,10 +113,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn force_transfer() -> Weight { // Proof Size summary in bytes: - // Measured: `155` + // Measured: `52` // Estimated: `6196` - // Minimum execution time: 51_425_000 picoseconds. - Weight::from_parts(52_600_000, 6196) + // Minimum execution time: 77_017_000 picoseconds. + Weight::from_parts(78_184_000, 6196) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -126,10 +124,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn transfer_all() -> Weight { // Proof Size summary in bytes: - // Measured: `52` + // Measured: `0` // Estimated: `3593` - // Minimum execution time: 49_399_000 picoseconds. - Weight::from_parts(51_205_000, 3593) + // Minimum execution time: 75_600_000 picoseconds. + Weight::from_parts(76_817_000, 3593) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -137,10 +135,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn force_unreserve() -> Weight { // Proof Size summary in bytes: - // Measured: `174` + // Measured: `52` // Estimated: `3593` - // Minimum execution time: 18_119_000 picoseconds. - Weight::from_parts(18_749_000, 3593) + // Minimum execution time: 24_503_000 picoseconds. + Weight::from_parts(25_026_000, 3593) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -151,10 +149,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0 + u * (135 ±0)` // Estimated: `990 + u * (2603 ±0)` - // Minimum execution time: 16_783_000 picoseconds. - Weight::from_parts(17_076_000, 990) - // Standard Error: 15_126 - .saturating_add(Weight::from_parts(14_834_157, 0).saturating_mul(u.into())) + // Minimum execution time: 24_077_000 picoseconds. + Weight::from_parts(24_339_000, 990) + // Standard Error: 18_669 + .saturating_add(Weight::from_parts(21_570_294, 0).saturating_mul(u.into())) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(u.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(u.into()))) .saturating_add(Weight::from_parts(0, 2603).saturating_mul(u.into())) @@ -163,22 +161,22 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_048_000 picoseconds. - Weight::from_parts(6_346_000, 0) + // Minimum execution time: 8_070_000 picoseconds. + Weight::from_parts(8_727_000, 0) } fn burn_allow_death() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 30_215_000 picoseconds. - Weight::from_parts(30_848_000, 0) + // Minimum execution time: 46_978_000 picoseconds. + Weight::from_parts(47_917_000, 0) } fn burn_keep_alive() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 20_813_000 picoseconds. - Weight::from_parts(21_553_000, 0) + // Minimum execution time: 31_141_000 picoseconds. + Weight::from_parts(31_917_000, 0) } } @@ -188,10 +186,10 @@ impl WeightInfo for () { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn transfer_allow_death() -> Weight { // Proof Size summary in bytes: - // Measured: `52` + // Measured: `0` // Estimated: `3593` - // Minimum execution time: 50_023_000 picoseconds. - Weight::from_parts(51_105_000, 3593) + // Minimum execution time: 75_624_000 picoseconds. + Weight::from_parts(77_290_000, 3593) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -199,10 +197,10 @@ impl WeightInfo for () { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn transfer_keep_alive() -> Weight { // Proof Size summary in bytes: - // Measured: `52` + // Measured: `0` // Estimated: `3593` - // Minimum execution time: 39_923_000 picoseconds. - Weight::from_parts(40_655_000, 3593) + // Minimum execution time: 60_398_000 picoseconds. + Weight::from_parts(61_290_000, 3593) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -210,10 +208,10 @@ impl WeightInfo for () { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn force_set_balance_creating() -> Weight { // Proof Size summary in bytes: - // Measured: `174` + // Measured: `52` // Estimated: `3593` - // Minimum execution time: 15_062_000 picoseconds. - Weight::from_parts(15_772_000, 3593) + // Minimum execution time: 18_963_000 picoseconds. + Weight::from_parts(19_802_000, 3593) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -221,10 +219,10 @@ impl WeightInfo for () { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn force_set_balance_killing() -> Weight { // Proof Size summary in bytes: - // Measured: `174` + // Measured: `52` // Estimated: `3593` - // Minimum execution time: 21_797_000 picoseconds. - Weight::from_parts(22_287_000, 3593) + // Minimum execution time: 30_517_000 picoseconds. + Weight::from_parts(31_293_000, 3593) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -232,10 +230,10 @@ impl WeightInfo for () { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn force_transfer() -> Weight { // Proof Size summary in bytes: - // Measured: `155` + // Measured: `52` // Estimated: `6196` - // Minimum execution time: 51_425_000 picoseconds. - Weight::from_parts(52_600_000, 6196) + // Minimum execution time: 77_017_000 picoseconds. + Weight::from_parts(78_184_000, 6196) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -243,10 +241,10 @@ impl WeightInfo for () { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn transfer_all() -> Weight { // Proof Size summary in bytes: - // Measured: `52` + // Measured: `0` // Estimated: `3593` - // Minimum execution time: 49_399_000 picoseconds. - Weight::from_parts(51_205_000, 3593) + // Minimum execution time: 75_600_000 picoseconds. + Weight::from_parts(76_817_000, 3593) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -254,10 +252,10 @@ impl WeightInfo for () { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn force_unreserve() -> Weight { // Proof Size summary in bytes: - // Measured: `174` + // Measured: `52` // Estimated: `3593` - // Minimum execution time: 18_119_000 picoseconds. - Weight::from_parts(18_749_000, 3593) + // Minimum execution time: 24_503_000 picoseconds. + Weight::from_parts(25_026_000, 3593) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -268,10 +266,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0 + u * (135 ±0)` // Estimated: `990 + u * (2603 ±0)` - // Minimum execution time: 16_783_000 picoseconds. - Weight::from_parts(17_076_000, 990) - // Standard Error: 15_126 - .saturating_add(Weight::from_parts(14_834_157, 0).saturating_mul(u.into())) + // Minimum execution time: 24_077_000 picoseconds. + Weight::from_parts(24_339_000, 990) + // Standard Error: 18_669 + .saturating_add(Weight::from_parts(21_570_294, 0).saturating_mul(u.into())) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(u.into()))) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(u.into()))) .saturating_add(Weight::from_parts(0, 2603).saturating_mul(u.into())) @@ -280,21 +278,21 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_048_000 picoseconds. - Weight::from_parts(6_346_000, 0) + // Minimum execution time: 8_070_000 picoseconds. + Weight::from_parts(8_727_000, 0) } fn burn_allow_death() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 30_215_000 picoseconds. - Weight::from_parts(30_848_000, 0) + // Minimum execution time: 46_978_000 picoseconds. + Weight::from_parts(47_917_000, 0) } fn burn_keep_alive() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 20_813_000 picoseconds. - Weight::from_parts(21_553_000, 0) + // Minimum execution time: 31_141_000 picoseconds. + Weight::from_parts(31_917_000, 0) } } diff --git a/substrate/frame/beefy-mmr/Cargo.toml b/substrate/frame/beefy-mmr/Cargo.toml index 54343bb9ce51..d67ac20ee922 100644 --- a/substrate/frame/beefy-mmr/Cargo.toml +++ b/substrate/frame/beefy-mmr/Cargo.toml @@ -13,22 +13,22 @@ workspace = true [dependencies] array-bytes = { optional = true, workspace = true, default-features = true } -binary-merkle-tree = { workspace = true } codec = { features = ["derive"], workspace = true } +log = { workspace = true } +scale-info = { features = ["derive"], workspace = true } +serde = { optional = true, workspace = true, default-features = true } +binary-merkle-tree = { workspace = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } -log = { workspace = true } pallet-beefy = { workspace = true } pallet-mmr = { workspace = true } pallet-session = { workspace = true } -scale-info = { features = ["derive"], workspace = true } -serde = { optional = true, workspace = true, default-features = true } -sp-api = { workspace = true } sp-consensus-beefy = { workspace = true } sp-core = { workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } +sp-api = { workspace = true } sp-state-machine = { workspace = true } [dev-dependencies] diff --git a/substrate/frame/beefy-mmr/src/weights.rs b/substrate/frame/beefy-mmr/src/weights.rs index dcfdb560ee94..c292f25400cc 100644 --- a/substrate/frame/beefy-mmr/src/weights.rs +++ b/substrate/frame/beefy-mmr/src/weights.rs @@ -18,27 +18,25 @@ //! Autogenerated weights for `pallet_beefy_mmr` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-08-13, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-696hpswk-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate-node +// target/production/substrate-node // benchmark // pallet -// --chain=dev // --steps=50 // --repeat=20 -// --pallet=pallet_beefy_mmr -// --no-storage-info -// --no-median-slopes -// --no-min-squares // --extrinsic=* // --wasm-execution=compiled // --heap-pages=4096 -// --output=./substrate/frame/beefy-mmr/src/weights.rs +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_beefy_mmr +// --chain=dev // --header=./substrate/HEADER-APACHE2 +// --output=./substrate/frame/beefy-mmr/src/weights.rs // --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -63,20 +61,20 @@ impl WeightInfo for SubstrateWeight { /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) fn extract_validation_context() -> Weight { // Proof Size summary in bytes: - // Measured: `68` + // Measured: `92` // Estimated: `3509` - // Minimum execution time: 6_687_000 picoseconds. - Weight::from_parts(6_939_000, 3509) + // Minimum execution time: 7_461_000 picoseconds. + Weight::from_parts(7_669_000, 3509) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: `Mmr::Nodes` (r:1 w:0) /// Proof: `Mmr::Nodes` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) fn read_peak() -> Weight { // Proof Size summary in bytes: - // Measured: `386` + // Measured: `333` // Estimated: `3505` - // Minimum execution time: 10_409_000 picoseconds. - Weight::from_parts(10_795_000, 3505) + // Minimum execution time: 6_137_000 picoseconds. + Weight::from_parts(6_423_000, 3505) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: `Mmr::RootHash` (r:1 w:0) @@ -86,12 +84,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[2, 512]`. fn n_items_proof_is_non_canonical(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `378` + // Measured: `325` // Estimated: `1517` - // Minimum execution time: 15_459_000 picoseconds. - Weight::from_parts(21_963_366, 1517) - // Standard Error: 1_528 - .saturating_add(Weight::from_parts(984_907, 0).saturating_mul(n.into())) + // Minimum execution time: 10_687_000 picoseconds. + Weight::from_parts(14_851_626, 1517) + // Standard Error: 1_455 + .saturating_add(Weight::from_parts(961_703, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) } } @@ -102,20 +100,20 @@ impl WeightInfo for () { /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) fn extract_validation_context() -> Weight { // Proof Size summary in bytes: - // Measured: `68` + // Measured: `92` // Estimated: `3509` - // Minimum execution time: 6_687_000 picoseconds. - Weight::from_parts(6_939_000, 3509) + // Minimum execution time: 7_461_000 picoseconds. + Weight::from_parts(7_669_000, 3509) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: `Mmr::Nodes` (r:1 w:0) /// Proof: `Mmr::Nodes` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) fn read_peak() -> Weight { // Proof Size summary in bytes: - // Measured: `386` + // Measured: `333` // Estimated: `3505` - // Minimum execution time: 10_409_000 picoseconds. - Weight::from_parts(10_795_000, 3505) + // Minimum execution time: 6_137_000 picoseconds. + Weight::from_parts(6_423_000, 3505) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: `Mmr::RootHash` (r:1 w:0) @@ -125,12 +123,12 @@ impl WeightInfo for () { /// The range of component `n` is `[2, 512]`. fn n_items_proof_is_non_canonical(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `378` + // Measured: `325` // Estimated: `1517` - // Minimum execution time: 15_459_000 picoseconds. - Weight::from_parts(21_963_366, 1517) - // Standard Error: 1_528 - .saturating_add(Weight::from_parts(984_907, 0).saturating_mul(n.into())) + // Minimum execution time: 10_687_000 picoseconds. + Weight::from_parts(14_851_626, 1517) + // Standard Error: 1_455 + .saturating_add(Weight::from_parts(961_703, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) } } diff --git a/substrate/frame/beefy/Cargo.toml b/substrate/frame/beefy/Cargo.toml index b8e952dfbd66..05af974e89a7 100644 --- a/substrate/frame/beefy/Cargo.toml +++ b/substrate/frame/beefy/Cargo.toml @@ -13,13 +13,13 @@ workspace = true [dependencies] codec = { features = ["derive"], workspace = true } +log = { workspace = true } +scale-info = { features = ["derive", "serde"], workspace = true } +serde = { optional = true, workspace = true, default-features = true } frame-support = { workspace = true } frame-system = { workspace = true } -log = { workspace = true } pallet-authorship = { workspace = true } pallet-session = { workspace = true } -scale-info = { features = ["derive", "serde"], workspace = true } -serde = { optional = true, workspace = true, default-features = true } sp-consensus-beefy = { features = ["serde"], workspace = true } sp-runtime = { features = ["serde"], workspace = true } sp-session = { workspace = true } diff --git a/substrate/frame/beefy/src/mock.rs b/substrate/frame/beefy/src/mock.rs index 7ae41c609180..2b75c4107414 100644 --- a/substrate/frame/beefy/src/mock.rs +++ b/substrate/frame/beefy/src/mock.rs @@ -366,5 +366,5 @@ pub fn start_session(session_index: SessionIndex) { pub fn start_era(era_index: EraIndex) { start_session((era_index * 3).into()); - assert_eq!(pallet_staking::CurrentEra::::get(), Some(era_index)); + assert_eq!(Staking::current_era(), Some(era_index)); } diff --git a/substrate/frame/beefy/src/tests.rs b/substrate/frame/beefy/src/tests.rs index 89645d21f6ba..d75237205cac 100644 --- a/substrate/frame/beefy/src/tests.rs +++ b/substrate/frame/beefy/src/tests.rs @@ -313,7 +313,7 @@ fn report_equivocation_current_set_works(mut f: impl ReportEquivocationFn) { let authorities = test_authorities(); ExtBuilder::default().add_authorities(authorities).build_and_execute(|| { - assert_eq!(pallet_staking::CurrentEra::::get(), Some(0)); + assert_eq!(Staking::current_era(), Some(0)); assert_eq!(Session::current_index(), 0); start_era(1); @@ -906,7 +906,7 @@ fn report_fork_voting_invalid_context() { let mut era = 1; let block_num = ext.execute_with(|| { - assert_eq!(pallet_staking::CurrentEra::::get(), Some(0)); + assert_eq!(Staking::current_era(), Some(0)); assert_eq!(Session::current_index(), 0); start_era(era); diff --git a/substrate/frame/benchmarking/Cargo.toml b/substrate/frame/benchmarking/Cargo.toml index fabeb9a03195..9ea350a1d290 100644 --- a/substrate/frame/benchmarking/Cargo.toml +++ b/substrate/frame/benchmarking/Cargo.toml @@ -17,14 +17,14 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { workspace = true } -frame-support = { workspace = true } -frame-support-procedural = { workspace = true } -frame-system = { workspace = true } linregress = { optional = true, workspace = true } log = { workspace = true } paste = { workspace = true, default-features = true } scale-info = { features = ["derive"], workspace = true } serde = { optional = true, workspace = true, default-features = true } +frame-support = { workspace = true } +frame-support-procedural = { workspace = true } +frame-system = { workspace = true } sp-api = { workspace = true } sp-application-crypto = { workspace = true } sp-core = { workspace = true } @@ -37,10 +37,7 @@ static_assertions = { workspace = true, default-features = true } [dev-dependencies] array-bytes = { workspace = true, default-features = true } rusty-fork = { workspace = true } -sc-client-db = { workspace = true } -sp-externalities = { workspace = true } sp-keystore = { workspace = true, default-features = true } -sp-state-machine = { workspace = true } [features] default = ["std"] @@ -56,17 +53,14 @@ std = [ "sp-api/std", "sp-application-crypto/std", "sp-core/std", - "sp-externalities/std", "sp-io/std", "sp-keystore/std", "sp-runtime-interface/std", "sp-runtime/std", - "sp-state-machine/std", "sp-storage/std", ] runtime-benchmarks = [ "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", - "sc-client-db/runtime-benchmarks", "sp-runtime/runtime-benchmarks", ] diff --git a/substrate/frame/benchmarking/pov/Cargo.toml b/substrate/frame/benchmarking/pov/Cargo.toml index 47c6d6e5e4bc..ce89dceed3c3 100644 --- a/substrate/frame/benchmarking/pov/Cargo.toml +++ b/substrate/frame/benchmarking/pov/Cargo.toml @@ -16,10 +16,10 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { workspace = true } +scale-info = { features = ["derive"], workspace = true } frame-benchmarking = { workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } -scale-info = { features = ["derive"], workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } diff --git a/substrate/frame/benchmarking/src/tests_instance.rs b/substrate/frame/benchmarking/src/tests_instance.rs index 428f29e2bc16..ecffbd1a018f 100644 --- a/substrate/frame/benchmarking/src/tests_instance.rs +++ b/substrate/frame/benchmarking/src/tests_instance.rs @@ -61,7 +61,6 @@ mod pallet_test { #[pallet::weight({0})] pub fn set_value(origin: OriginFor, n: u32) -> DispatchResult { let _sender = ensure_signed(origin)?; - assert!(n >= T::LowerBound::get()); Value::::put(n); Ok(()) } @@ -82,7 +81,6 @@ frame_support::construct_runtime!( { System: frame_system, TestPallet: pallet_test, - TestPallet2: pallet_test::, } ); @@ -119,12 +117,6 @@ impl pallet_test::Config for Test { type UpperBound = ConstU32<100>; } -impl pallet_test::Config for Test { - type RuntimeEvent = RuntimeEvent; - type LowerBound = ConstU32<50>; - type UpperBound = ConstU32<100>; -} - impl pallet_test::OtherConfig for Test { type OtherEvent = RuntimeEvent; } @@ -138,7 +130,6 @@ mod benchmarks { use crate::account; use frame_support::ensure; use frame_system::RawOrigin; - use sp_core::Get; // Additional used internally by the benchmark macro. use super::pallet_test::{Call, Config, Pallet}; @@ -152,7 +143,7 @@ mod benchmarks { } set_value { - let b in ( >::LowerBound::get() ) .. ( >::UpperBound::get() ); + let b in 1 .. 1000; let caller = account::("caller", 0, 0); }: _ (RawOrigin::Signed(caller), b.into()) verify { @@ -182,53 +173,3 @@ mod benchmarks { ) } } - -#[test] -fn ensure_correct_instance_is_selected() { - use crate::utils::Benchmarking; - - crate::define_benchmarks!( - [pallet_test, TestPallet] - [pallet_test, TestPallet2] - ); - - let whitelist = vec![]; - - let mut batches = Vec::::new(); - let config = crate::BenchmarkConfig { - pallet: "pallet_test".bytes().collect::>(), - // We only want that this `instance` is used. - // Otherwise the wrong components are used. - instance: "TestPallet".bytes().collect::>(), - benchmark: "set_value".bytes().collect::>(), - selected_components: TestPallet::benchmarks(false) - .into_iter() - .find_map(|b| { - if b.name == "set_value".as_bytes() { - Some(b.components.into_iter().map(|c| (c.0, c.1)).collect::>()) - } else { - None - } - }) - .unwrap(), - verify: false, - internal_repeats: 1, - }; - let params = (&config, &whitelist); - - let state = sc_client_db::BenchmarkingState::::new( - Default::default(), - None, - false, - false, - ) - .unwrap(); - - let mut overlay = Default::default(); - let mut ext = sp_state_machine::Ext::new(&mut overlay, &state, None); - sp_externalities::set_and_run_with_externalities(&mut ext, || { - add_benchmarks!(params, batches); - Ok::<_, crate::BenchmarkError>(()) - }) - .unwrap(); -} diff --git a/substrate/frame/benchmarking/src/utils.rs b/substrate/frame/benchmarking/src/utils.rs index 3a10e43d83b8..fb55cee99e81 100644 --- a/substrate/frame/benchmarking/src/utils.rs +++ b/substrate/frame/benchmarking/src/utils.rs @@ -200,8 +200,6 @@ impl From for BenchmarkError { pub struct BenchmarkConfig { /// The encoded name of the pallet to benchmark. pub pallet: Vec, - /// The encoded name of the pallet instance to benchmark. - pub instance: Vec, /// The encoded name of the benchmark/extrinsic to run. pub benchmark: Vec, /// The selected component values to use when running the benchmark. @@ -231,7 +229,6 @@ pub struct BenchmarkMetadata { sp_api::decl_runtime_apis! { /// Runtime api for benchmarking a FRAME runtime. - #[api_version(2)] pub trait Benchmark { /// Get the benchmark metadata available for this runtime. /// diff --git a/substrate/frame/benchmarking/src/v1.rs b/substrate/frame/benchmarking/src/v1.rs index 64f93b22cf1b..e73ed1f4382f 100644 --- a/substrate/frame/benchmarking/src/v1.rs +++ b/substrate/frame/benchmarking/src/v1.rs @@ -1821,13 +1821,12 @@ macro_rules! add_benchmark { let (config, whitelist) = $params; let $crate::BenchmarkConfig { pallet, - instance, benchmark, selected_components, verify, internal_repeats, } = config; - if &pallet[..] == &name_string[..] && &instance[..] == &instance_string[..] { + if &pallet[..] == &name_string[..] { let benchmark_result = <$location>::run_benchmark( &benchmark[..], &selected_components[..], diff --git a/substrate/frame/benchmarking/src/weights.rs b/substrate/frame/benchmarking/src/weights.rs index e3c4df0bf72a..ea9ef6eb5c6d 100644 --- a/substrate/frame/benchmarking/src/weights.rs +++ b/substrate/frame/benchmarking/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `frame_benchmarking` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-04-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -67,49 +67,49 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 157_000 picoseconds. - Weight::from_parts(207_660, 0) + // Minimum execution time: 132_000 picoseconds. + Weight::from_parts(160_546, 0) } /// The range of component `i` is `[0, 1000000]`. fn subtraction(_i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 162_000 picoseconds. - Weight::from_parts(211_047, 0) + // Minimum execution time: 133_000 picoseconds. + Weight::from_parts(171_395, 0) } /// The range of component `i` is `[0, 1000000]`. fn multiplication(_i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 158_000 picoseconds. - Weight::from_parts(221_118, 0) + // Minimum execution time: 126_000 picoseconds. + Weight::from_parts(166_417, 0) } /// The range of component `i` is `[0, 1000000]`. fn division(_i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 160_000 picoseconds. - Weight::from_parts(211_723, 0) + // Minimum execution time: 131_000 picoseconds. + Weight::from_parts(166_348, 0) } fn hashing() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 24_426_716_000 picoseconds. - Weight::from_parts(24_453_973_000, 0) + // Minimum execution time: 26_583_601_000 picoseconds. + Weight::from_parts(26_795_212_000, 0) } /// The range of component `i` is `[0, 100]`. fn sr25519_verification(i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 210_000 picoseconds. - Weight::from_parts(3_898_542, 0) - // Standard Error: 9_136 - .saturating_add(Weight::from_parts(40_574_115, 0).saturating_mul(i.into())) + // Minimum execution time: 158_000 picoseconds. + Weight::from_parts(5_277_102, 0) + // Standard Error: 6_279 + .saturating_add(Weight::from_parts(40_610_511, 0).saturating_mul(i.into())) } } @@ -120,48 +120,48 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 157_000 picoseconds. - Weight::from_parts(207_660, 0) + // Minimum execution time: 132_000 picoseconds. + Weight::from_parts(160_546, 0) } /// The range of component `i` is `[0, 1000000]`. fn subtraction(_i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 162_000 picoseconds. - Weight::from_parts(211_047, 0) + // Minimum execution time: 133_000 picoseconds. + Weight::from_parts(171_395, 0) } /// The range of component `i` is `[0, 1000000]`. fn multiplication(_i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 158_000 picoseconds. - Weight::from_parts(221_118, 0) + // Minimum execution time: 126_000 picoseconds. + Weight::from_parts(166_417, 0) } /// The range of component `i` is `[0, 1000000]`. fn division(_i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 160_000 picoseconds. - Weight::from_parts(211_723, 0) + // Minimum execution time: 131_000 picoseconds. + Weight::from_parts(166_348, 0) } fn hashing() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 24_426_716_000 picoseconds. - Weight::from_parts(24_453_973_000, 0) + // Minimum execution time: 26_583_601_000 picoseconds. + Weight::from_parts(26_795_212_000, 0) } /// The range of component `i` is `[0, 100]`. fn sr25519_verification(i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 210_000 picoseconds. - Weight::from_parts(3_898_542, 0) - // Standard Error: 9_136 - .saturating_add(Weight::from_parts(40_574_115, 0).saturating_mul(i.into())) + // Minimum execution time: 158_000 picoseconds. + Weight::from_parts(5_277_102, 0) + // Standard Error: 6_279 + .saturating_add(Weight::from_parts(40_610_511, 0).saturating_mul(i.into())) } } diff --git a/substrate/frame/bounties/Cargo.toml b/substrate/frame/bounties/Cargo.toml index 926af60d1acb..a272153fed07 100644 --- a/substrate/frame/bounties/Cargo.toml +++ b/substrate/frame/bounties/Cargo.toml @@ -19,12 +19,12 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { features = [ "derive", ], workspace = true } +log = { workspace = true } +scale-info = { features = ["derive"], workspace = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } -log = { workspace = true } pallet-treasury = { workspace = true } -scale-info = { features = ["derive"], workspace = true } sp-core = { workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } diff --git a/substrate/frame/bounties/src/benchmarking.rs b/substrate/frame/bounties/src/benchmarking.rs index b5155909e3cd..8ad85d5420ed 100644 --- a/substrate/frame/bounties/src/benchmarking.rs +++ b/substrate/frame/bounties/src/benchmarking.rs @@ -15,7 +15,9 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Bounties pallet benchmarking. +//! bounties pallet benchmarking. + +#![cfg(feature = "runtime-benchmarks")] use super::*; @@ -23,7 +25,7 @@ use alloc::{vec, vec::Vec}; use frame_benchmarking::v1::{ account, benchmarks_instance_pallet, whitelisted_caller, BenchmarkError, }; -use frame_system::{pallet_prelude::BlockNumberFor as SystemBlockNumberFor, RawOrigin}; +use frame_system::{pallet_prelude::BlockNumberFor, RawOrigin}; use sp_runtime::traits::{BlockNumberProvider, Bounded}; use crate::Pallet as Bounties; @@ -31,20 +33,10 @@ use pallet_treasury::Pallet as Treasury; const SEED: u32 = 0; -fn set_block_number, I: 'static>(n: BlockNumberFor) { +fn set_block_number, I: 'static>(n: BlockNumberFor) { >::BlockNumberProvider::set_block_number(n); } -fn minimum_balance, I: 'static>() -> BalanceOf { - let minimum_balance = T::Currency::minimum_balance(); - - if minimum_balance.is_zero() { - 1u32.into() - } else { - minimum_balance - } -} - // Create bounties that are approved for use in `on_initialize`. fn create_approved_bounties, I: 'static>(n: u32) -> Result<(), BenchmarkError> { for i in 0..n { @@ -70,10 +62,12 @@ fn setup_bounty, I: 'static>( let fee = value / 2u32.into(); let deposit = T::BountyDepositBase::get() + T::DataDepositPerByte::get() * T::MaximumReasonLength::get().into(); - let _ = T::Currency::make_free_balance_be(&caller, deposit + minimum_balance::()); + let _ = T::Currency::make_free_balance_be(&caller, deposit + T::Currency::minimum_balance()); let curator = account("curator", u, SEED); - let _ = - T::Currency::make_free_balance_be(&curator, fee / 2u32.into() + minimum_balance::()); + let _ = T::Currency::make_free_balance_be( + &curator, + fee / 2u32.into() + T::Currency::minimum_balance(), + ); let reason = vec![0; d as usize]; (caller, curator, fee, value, reason) } @@ -97,7 +91,7 @@ fn create_bounty, I: 'static>( fn setup_pot_account, I: 'static>() { let pot_account = Bounties::::account_id(); - let value = minimum_balance::().saturating_mul(1_000_000_000u32.into()); + let value = T::Currency::minimum_balance().saturating_mul(1_000_000_000u32.into()); let _ = T::Currency::make_free_balance_be(&pot_account, value); } @@ -138,7 +132,7 @@ benchmarks_instance_pallet! { Bounties::::propose_bounty(RawOrigin::Signed(caller).into(), value, reason)?; let bounty_id = BountyCount::::get() - 1; let approve_origin = T::SpendOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; - Treasury::::on_initialize(SystemBlockNumberFor::::zero()); + Treasury::::on_initialize(BlockNumberFor::::zero()); }: _(approve_origin, bounty_id, curator_lookup, fee) verify { assert_last_event::( diff --git a/substrate/frame/bounties/src/lib.rs b/substrate/frame/bounties/src/lib.rs index d9accc5061cf..3ed408a19120 100644 --- a/substrate/frame/bounties/src/lib.rs +++ b/substrate/frame/bounties/src/lib.rs @@ -84,7 +84,6 @@ #![cfg_attr(not(feature = "std"), no_std)] -#[cfg(feature = "runtime-benchmarks")] mod benchmarking; pub mod migrations; mod tests; @@ -106,9 +105,7 @@ use sp_runtime::{ use frame_support::{dispatch::DispatchResultWithPostInfo, traits::EnsureOrigin}; use frame_support::pallet_prelude::*; -use frame_system::pallet_prelude::{ - ensure_signed, BlockNumberFor as SystemBlockNumberFor, OriginFor, -}; +use frame_system::pallet_prelude::*; use scale_info::TypeInfo; pub use weights::WeightInfo; @@ -123,9 +120,6 @@ pub type BountyIndex = u32; type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; -type BlockNumberFor = - <>::BlockNumberProvider as BlockNumberProvider>::BlockNumber; - /// A bounty proposal. #[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)] pub struct Bounty { @@ -219,11 +213,11 @@ pub mod pallet { /// The delay period for which a bounty beneficiary need to wait before claim the payout. #[pallet::constant] - type BountyDepositPayoutDelay: Get>; + type BountyDepositPayoutDelay: Get>; /// Bounty duration in blocks. #[pallet::constant] - type BountyUpdatePeriod: Get>; + type BountyUpdatePeriod: Get>; /// The curator deposit is calculated as a percentage of the curator fee. /// @@ -332,7 +326,7 @@ pub mod pallet { _, Twox64Concat, BountyIndex, - Bounty, BlockNumberFor>, + Bounty, BlockNumberFor>, >; /// The description of each bounty. @@ -882,9 +876,9 @@ pub mod pallet { } #[pallet::hooks] - impl, I: 'static> Hooks> for Pallet { + impl, I: 'static> Hooks> for Pallet { #[cfg(feature = "try-runtime")] - fn try_state(_n: SystemBlockNumberFor) -> Result<(), sp_runtime::TryRuntimeError> { + fn try_state(_n: BlockNumberFor) -> Result<(), sp_runtime::TryRuntimeError> { Self::do_try_state() } } @@ -934,7 +928,7 @@ impl, I: 'static> Pallet { /// Get the block number used in the treasury pallet. /// /// It may be configured to use the relay chain block number on a parachain. - pub fn treasury_block_number() -> BlockNumberFor { + pub fn treasury_block_number() -> BlockNumberFor { >::BlockNumberProvider::current_block_number() } diff --git a/substrate/frame/bounties/src/weights.rs b/substrate/frame/bounties/src/weights.rs index 1df6d3143edb..7230fa4a6a77 100644 --- a/substrate/frame/bounties/src/weights.rs +++ b/substrate/frame/bounties/src/weights.rs @@ -18,27 +18,25 @@ //! Autogenerated weights for `pallet_bounties` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-10-22, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-augrssgt-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate-node +// target/production/substrate-node // benchmark // pallet -// --chain=dev // --steps=50 // --repeat=20 -// --pallet=pallet_bounties -// --no-storage-info -// --no-median-slopes -// --no-min-squares // --extrinsic=* // --wasm-execution=compiled // --heap-pages=4096 -// --output=./substrate/frame/bounties/src/weights.rs +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_bounties +// --chain=dev // --header=./substrate/HEADER-APACHE2 +// --output=./substrate/frame/bounties/src/weights.rs // --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -79,12 +77,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `d` is `[0, 300]`. fn propose_bounty(d: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `342` + // Measured: `343` // Estimated: `3593` - // Minimum execution time: 27_112_000 picoseconds. - Weight::from_parts(28_480_264, 3593) - // Standard Error: 167 - .saturating_add(Weight::from_parts(755, 0).saturating_mul(d.into())) + // Minimum execution time: 31_284_000 picoseconds. + Weight::from_parts(33_484_932, 3593) + // Standard Error: 299 + .saturating_add(Weight::from_parts(1_444, 0).saturating_mul(d.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -96,8 +94,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `434` // Estimated: `3642` - // Minimum execution time: 14_400_000 picoseconds. - Weight::from_parts(14_955_000, 3642) + // Minimum execution time: 17_656_000 picoseconds. + Weight::from_parts(18_501_000, 3642) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -107,8 +105,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `454` // Estimated: `3642` - // Minimum execution time: 17_380_000 picoseconds. - Weight::from_parts(18_234_000, 3642) + // Minimum execution time: 15_416_000 picoseconds. + Weight::from_parts(16_463_000, 3642) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -118,10 +116,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Bounties::BountyApprovals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) fn approve_bounty_with_curator() -> Weight { // Proof Size summary in bytes: - // Measured: `434` + // Measured: `454` // Estimated: `3642` - // Minimum execution time: 19_733_000 picoseconds. - Weight::from_parts(21_051_000, 3642) + // Minimum execution time: 21_802_000 picoseconds. + Weight::from_parts(22_884_000, 3642) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -133,8 +131,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `630` // Estimated: `3642` - // Minimum execution time: 44_620_000 picoseconds. - Weight::from_parts(45_529_000, 3642) + // Minimum execution time: 45_843_000 picoseconds. + Weight::from_parts(47_558_000, 3642) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -146,8 +144,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `626` // Estimated: `3642` - // Minimum execution time: 34_825_000 picoseconds. - Weight::from_parts(36_092_000, 3642) + // Minimum execution time: 35_720_000 picoseconds. + Weight::from_parts(37_034_000, 3642) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -159,8 +157,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `638` // Estimated: `3642` - // Minimum execution time: 22_985_000 picoseconds. - Weight::from_parts(23_657_000, 3642) + // Minimum execution time: 23_318_000 picoseconds. + Weight::from_parts(24_491_000, 3642) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -172,18 +170,14 @@ impl WeightInfo for SubstrateWeight { /// Proof: `ChildBounties::ChildrenCuratorFees` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) /// Storage: `Bounties::BountyDescriptions` (r:0 w:1) /// Proof: `Bounties::BountyDescriptions` (`max_values`: None, `max_size`: Some(314), added: 2789, mode: `MaxEncodedLen`) - /// Storage: `ChildBounties::ParentTotalChildBounties` (r:0 w:1) - /// Proof: `ChildBounties::ParentTotalChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) - /// Storage: `ChildBounties::ParentChildBounties` (r:0 w:1) - /// Proof: `ChildBounties::ParentChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) fn claim_bounty() -> Weight { // Proof Size summary in bytes: - // Measured: `1036` + // Measured: `1069` // Estimated: `8799` - // Minimum execution time: 119_682_000 picoseconds. - Weight::from_parts(122_515_000, 8799) + // Minimum execution time: 127_643_000 picoseconds. + Weight::from_parts(130_844_000, 8799) .saturating_add(T::DbWeight::get().reads(5_u64)) - .saturating_add(T::DbWeight::get().writes(8_u64)) + .saturating_add(T::DbWeight::get().writes(6_u64)) } /// Storage: `Bounties::Bounties` (r:1 w:1) /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) @@ -195,31 +189,29 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Bounties::BountyDescriptions` (`max_values`: None, `max_size`: Some(314), added: 2789, mode: `MaxEncodedLen`) fn close_bounty_proposed() -> Weight { // Proof Size summary in bytes: - // Measured: `682` + // Measured: `683` // Estimated: `3642` - // Minimum execution time: 47_430_000 picoseconds. - Weight::from_parts(48_592_000, 3642) + // Minimum execution time: 49_963_000 picoseconds. + Weight::from_parts(51_484_000, 3642) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: `Bounties::Bounties` (r:1 w:1) /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) - /// Storage: `ChildBounties::ParentChildBounties` (r:1 w:1) + /// Storage: `ChildBounties::ParentChildBounties` (r:1 w:0) /// Proof: `ChildBounties::ParentChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:2 w:2) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Bounties::BountyDescriptions` (r:0 w:1) /// Proof: `Bounties::BountyDescriptions` (`max_values`: None, `max_size`: Some(314), added: 2789, mode: `MaxEncodedLen`) - /// Storage: `ChildBounties::ParentTotalChildBounties` (r:0 w:1) - /// Proof: `ChildBounties::ParentTotalChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) fn close_bounty_active() -> Weight { // Proof Size summary in bytes: - // Measured: `952` + // Measured: `985` // Estimated: `6196` - // Minimum execution time: 85_520_000 picoseconds. - Weight::from_parts(87_644_000, 6196) + // Minimum execution time: 89_310_000 picoseconds. + Weight::from_parts(92_223_000, 6196) .saturating_add(T::DbWeight::get().reads(4_u64)) - .saturating_add(T::DbWeight::get().writes(6_u64)) + .saturating_add(T::DbWeight::get().writes(4_u64)) } /// Storage: `Bounties::Bounties` (r:1 w:1) /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) @@ -227,8 +219,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `490` // Estimated: `3642` - // Minimum execution time: 18_145_000 picoseconds. - Weight::from_parts(18_727_000, 3642) + // Minimum execution time: 16_630_000 picoseconds. + Weight::from_parts(17_171_000, 3642) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -241,12 +233,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `b` is `[0, 100]`. fn spend_funds(b: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `71 + b * (298 ±0)` + // Measured: `205 + b * (297 ±0)` // Estimated: `1887 + b * (5206 ±0)` - // Minimum execution time: 3_649_000 picoseconds. - Weight::from_parts(3_727_000, 1887) - // Standard Error: 8_881 - .saturating_add(Weight::from_parts(35_199_034, 0).saturating_mul(b.into())) + // Minimum execution time: 4_334_000 picoseconds. + Weight::from_parts(1_256_424, 1887) + // Standard Error: 42_406 + .saturating_add(Weight::from_parts(36_979_844, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(b.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) @@ -268,12 +260,12 @@ impl WeightInfo for () { /// The range of component `d` is `[0, 300]`. fn propose_bounty(d: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `342` + // Measured: `343` // Estimated: `3593` - // Minimum execution time: 27_112_000 picoseconds. - Weight::from_parts(28_480_264, 3593) - // Standard Error: 167 - .saturating_add(Weight::from_parts(755, 0).saturating_mul(d.into())) + // Minimum execution time: 31_284_000 picoseconds. + Weight::from_parts(33_484_932, 3593) + // Standard Error: 299 + .saturating_add(Weight::from_parts(1_444, 0).saturating_mul(d.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -285,8 +277,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `434` // Estimated: `3642` - // Minimum execution time: 14_400_000 picoseconds. - Weight::from_parts(14_955_000, 3642) + // Minimum execution time: 17_656_000 picoseconds. + Weight::from_parts(18_501_000, 3642) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -296,8 +288,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `454` // Estimated: `3642` - // Minimum execution time: 17_380_000 picoseconds. - Weight::from_parts(18_234_000, 3642) + // Minimum execution time: 15_416_000 picoseconds. + Weight::from_parts(16_463_000, 3642) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -307,10 +299,10 @@ impl WeightInfo for () { /// Proof: `Bounties::BountyApprovals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) fn approve_bounty_with_curator() -> Weight { // Proof Size summary in bytes: - // Measured: `434` + // Measured: `454` // Estimated: `3642` - // Minimum execution time: 19_733_000 picoseconds. - Weight::from_parts(21_051_000, 3642) + // Minimum execution time: 21_802_000 picoseconds. + Weight::from_parts(22_884_000, 3642) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -322,8 +314,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `630` // Estimated: `3642` - // Minimum execution time: 44_620_000 picoseconds. - Weight::from_parts(45_529_000, 3642) + // Minimum execution time: 45_843_000 picoseconds. + Weight::from_parts(47_558_000, 3642) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -335,8 +327,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `626` // Estimated: `3642` - // Minimum execution time: 34_825_000 picoseconds. - Weight::from_parts(36_092_000, 3642) + // Minimum execution time: 35_720_000 picoseconds. + Weight::from_parts(37_034_000, 3642) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -348,8 +340,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `638` // Estimated: `3642` - // Minimum execution time: 22_985_000 picoseconds. - Weight::from_parts(23_657_000, 3642) + // Minimum execution time: 23_318_000 picoseconds. + Weight::from_parts(24_491_000, 3642) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -361,18 +353,14 @@ impl WeightInfo for () { /// Proof: `ChildBounties::ChildrenCuratorFees` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) /// Storage: `Bounties::BountyDescriptions` (r:0 w:1) /// Proof: `Bounties::BountyDescriptions` (`max_values`: None, `max_size`: Some(314), added: 2789, mode: `MaxEncodedLen`) - /// Storage: `ChildBounties::ParentTotalChildBounties` (r:0 w:1) - /// Proof: `ChildBounties::ParentTotalChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) - /// Storage: `ChildBounties::ParentChildBounties` (r:0 w:1) - /// Proof: `ChildBounties::ParentChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) fn claim_bounty() -> Weight { // Proof Size summary in bytes: - // Measured: `1036` + // Measured: `1069` // Estimated: `8799` - // Minimum execution time: 119_682_000 picoseconds. - Weight::from_parts(122_515_000, 8799) + // Minimum execution time: 127_643_000 picoseconds. + Weight::from_parts(130_844_000, 8799) .saturating_add(RocksDbWeight::get().reads(5_u64)) - .saturating_add(RocksDbWeight::get().writes(8_u64)) + .saturating_add(RocksDbWeight::get().writes(6_u64)) } /// Storage: `Bounties::Bounties` (r:1 w:1) /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) @@ -384,31 +372,29 @@ impl WeightInfo for () { /// Proof: `Bounties::BountyDescriptions` (`max_values`: None, `max_size`: Some(314), added: 2789, mode: `MaxEncodedLen`) fn close_bounty_proposed() -> Weight { // Proof Size summary in bytes: - // Measured: `682` + // Measured: `683` // Estimated: `3642` - // Minimum execution time: 47_430_000 picoseconds. - Weight::from_parts(48_592_000, 3642) + // Minimum execution time: 49_963_000 picoseconds. + Weight::from_parts(51_484_000, 3642) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: `Bounties::Bounties` (r:1 w:1) /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) - /// Storage: `ChildBounties::ParentChildBounties` (r:1 w:1) + /// Storage: `ChildBounties::ParentChildBounties` (r:1 w:0) /// Proof: `ChildBounties::ParentChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:2 w:2) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Bounties::BountyDescriptions` (r:0 w:1) /// Proof: `Bounties::BountyDescriptions` (`max_values`: None, `max_size`: Some(314), added: 2789, mode: `MaxEncodedLen`) - /// Storage: `ChildBounties::ParentTotalChildBounties` (r:0 w:1) - /// Proof: `ChildBounties::ParentTotalChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) fn close_bounty_active() -> Weight { // Proof Size summary in bytes: - // Measured: `952` + // Measured: `985` // Estimated: `6196` - // Minimum execution time: 85_520_000 picoseconds. - Weight::from_parts(87_644_000, 6196) + // Minimum execution time: 89_310_000 picoseconds. + Weight::from_parts(92_223_000, 6196) .saturating_add(RocksDbWeight::get().reads(4_u64)) - .saturating_add(RocksDbWeight::get().writes(6_u64)) + .saturating_add(RocksDbWeight::get().writes(4_u64)) } /// Storage: `Bounties::Bounties` (r:1 w:1) /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) @@ -416,8 +402,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `490` // Estimated: `3642` - // Minimum execution time: 18_145_000 picoseconds. - Weight::from_parts(18_727_000, 3642) + // Minimum execution time: 16_630_000 picoseconds. + Weight::from_parts(17_171_000, 3642) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -430,12 +416,12 @@ impl WeightInfo for () { /// The range of component `b` is `[0, 100]`. fn spend_funds(b: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `71 + b * (298 ±0)` + // Measured: `205 + b * (297 ±0)` // Estimated: `1887 + b * (5206 ±0)` - // Minimum execution time: 3_649_000 picoseconds. - Weight::from_parts(3_727_000, 1887) - // Standard Error: 8_881 - .saturating_add(Weight::from_parts(35_199_034, 0).saturating_mul(b.into())) + // Minimum execution time: 4_334_000 picoseconds. + Weight::from_parts(1_256_424, 1887) + // Standard Error: 42_406 + .saturating_add(Weight::from_parts(36_979_844, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().reads((3_u64).saturating_mul(b.into()))) .saturating_add(RocksDbWeight::get().writes(1_u64)) diff --git a/substrate/frame/broker/Cargo.toml b/substrate/frame/broker/Cargo.toml index a4cfe49d3b35..aead49013ef0 100644 --- a/substrate/frame/broker/Cargo.toml +++ b/substrate/frame/broker/Cargo.toml @@ -15,22 +15,22 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -bitvec = { workspace = true } -codec = { features = ["derive"], workspace = true } -frame-benchmarking = { optional = true, workspace = true } -frame-support = { workspace = true } -frame-system = { workspace = true } log = { workspace = true } +codec = { features = ["derive"], workspace = true } scale-info = { features = ["derive"], workspace = true } +bitvec = { workspace = true } sp-api = { workspace = true } sp-arithmetic = { workspace = true } sp-core = { workspace = true } sp-runtime = { workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } [dev-dependencies] -pretty_assertions = { workspace = true } sp-io = { workspace = true, default-features = true } sp-tracing = { workspace = true, default-features = true } +pretty_assertions = { workspace = true } [features] default = ["std"] diff --git a/substrate/frame/broker/src/benchmarking.rs b/substrate/frame/broker/src/benchmarking.rs index 516518740f7d..595bf564f7e1 100644 --- a/substrate/frame/broker/src/benchmarking.rs +++ b/substrate/frame/broker/src/benchmarking.rs @@ -30,11 +30,11 @@ use frame_support::{ }, }; use frame_system::{Pallet as System, RawOrigin}; -use sp_arithmetic::Perbill; +use sp_arithmetic::{traits::Zero, Perbill}; use sp_core::Get; use sp_runtime::{ traits::{BlockNumberProvider, MaybeConvert}, - Saturating, + SaturatedConversion, Saturating, }; const SEED: u32 = 0; @@ -217,11 +217,9 @@ mod benches { _(origin as T::RuntimeOrigin, initial_price, extra_cores.try_into().unwrap()); assert!(SaleInfo::::get().is_some()); - let sale_start = RCBlockNumberProviderOf::::current_block_number() + - config.interlude_length; assert_last_event::( Event::SaleInitialized { - sale_start, + sale_start: 2u32.into(), leadin_length: 1u32.into(), start_price: 1_000_000_000u32.into(), end_price: 10_000_000u32.into(), @@ -287,7 +285,7 @@ mod benches { ); let region = Broker::::do_purchase(caller.clone(), 10_000_000u32.into()) - .expect("Offer not high enough for configuration."); + .map_err(|_| BenchmarkError::Weightless)?; Broker::::do_assign(region, None, 1001, Final) .map_err(|_| BenchmarkError::Weightless)?; @@ -316,7 +314,7 @@ mod benches { ); let region = Broker::::do_purchase(caller.clone(), 10_000_000u32.into()) - .expect("Offer not high enough for configuration."); + .map_err(|_| BenchmarkError::Weightless)?; let recipient: T::AccountId = account("recipient", 0, SEED); @@ -349,7 +347,7 @@ mod benches { ); let region = Broker::::do_purchase(caller.clone(), 10_000_000u32.into()) - .expect("Offer not high enough for configuration."); + .map_err(|_| BenchmarkError::Weightless)?; #[extrinsic_call] _(RawOrigin::Signed(caller), region, 2); @@ -381,7 +379,7 @@ mod benches { ); let region = Broker::::do_purchase(caller.clone(), 10_000_000u32.into()) - .expect("Offer not high enough for configuration."); + .map_err(|_| BenchmarkError::Weightless)?; #[extrinsic_call] _(RawOrigin::Signed(caller), region, 0x00000_fffff_fffff_00000.into()); @@ -417,7 +415,7 @@ mod benches { ); let region = Broker::::do_purchase(caller.clone(), 10_000_000u32.into()) - .expect("Offer not high enough for configuration."); + .map_err(|_| BenchmarkError::Weightless)?; #[extrinsic_call] _(RawOrigin::Signed(caller), region, 1000, Provisional); @@ -452,7 +450,7 @@ mod benches { ); let region = Broker::::do_purchase(caller.clone(), 10_000_000u32.into()) - .expect("Offer not high enough for configuration."); + .map_err(|_| BenchmarkError::Weightless)?; let recipient: T::AccountId = account("recipient", 0, SEED); @@ -492,7 +490,7 @@ mod benches { ); let region = Broker::::do_purchase(caller.clone(), 10_000_000u32.into()) - .expect("Offer not high enough for configuration."); + .map_err(|_| BenchmarkError::Weightless)?; let recipient: T::AccountId = account("recipient", 0, SEED); T::Currency::set_balance(&recipient.clone(), T::Currency::minimum_balance()); @@ -548,7 +546,7 @@ mod benches { T::Currency::set_balance(&Broker::::account_id(), T::Currency::minimum_balance()); let region = Broker::::do_purchase(caller.clone(), 10_000_000u32.into()) - .expect("Offer not high enough for configuration."); + .map_err(|_| BenchmarkError::Weightless)?; let recipient: T::AccountId = account("recipient", 0, SEED); @@ -582,7 +580,7 @@ mod benches { ); let region = Broker::::do_purchase(caller.clone(), 10_000_000u32.into()) - .expect("Offer not high enough for configuration."); + .map_err(|_| BenchmarkError::Weightless)?; advance_to::( (T::TimeslicePeriod::get() * (region_len * 4).into()).try_into().ok().unwrap(), @@ -616,7 +614,7 @@ mod benches { ); let region = Broker::::do_purchase(caller.clone(), 10_000_000u32.into()) - .expect("Offer not high enough for configuration."); + .map_err(|_| BenchmarkError::Weightless)?; let recipient: T::AccountId = account("recipient", 0, SEED); @@ -786,97 +784,78 @@ mod benches { #[benchmark] fn rotate_sale(n: Linear<0, { MAX_CORE_COUNT.into() }>) -> Result<(), BenchmarkError> { + let core_count = n.try_into().unwrap(); let config = new_config_record::(); - Configuration::::put(config.clone()); - // Ensure there is one buyable core then use the rest to max out reservations and leases, if - // possible for worst case. - - // First allocate up to MaxReservedCores for reservations - let n_reservations = T::MaxReservedCores::get().min(n.saturating_sub(1)); - setup_reservations::(n_reservations); - // Then allocate remaining cores to leases, up to MaxLeasedCores - let n_leases = - T::MaxLeasedCores::get().min(n.saturating_sub(1).saturating_sub(n_reservations)); - setup_leases::(n_leases, 1, 20); - - // Start sales so we can test the auto-renewals. - Broker::::do_start_sales( - 10_000_000u32.into(), - n.saturating_sub(n_reservations) - .saturating_sub(n_leases) - .try_into() - .expect("Upper limit of n is a u16."), - ) - .expect("Configuration was initialized before; qed"); - - // Advance to the fixed price period. - advance_to::(2); + let now = frame_system::Pallet::::block_number(); + let end_price = 10_000_000u32.into(); + let commit_timeslice = Broker::::latest_timeslice_ready_to_commit(&config); + let sale = SaleInfoRecordOf:: { + sale_start: now, + leadin_length: Zero::zero(), + end_price, + sellout_price: None, + region_begin: commit_timeslice, + region_end: commit_timeslice.saturating_add(config.region_length), + first_core: 0, + ideal_cores_sold: 0, + cores_offered: 0, + cores_sold: 0, + }; + + let status = StatusRecord { + core_count, + private_pool_size: 0, + system_pool_size: 0, + last_committed_timeslice: commit_timeslice.saturating_sub(1), + last_timeslice: Broker::::current_timeslice(), + }; - // Assume max auto renewals for worst case. This is between 1 and the value of - // MaxAutoRenewals. - let n_renewable = T::MaxAutoRenewals::get() - .min(n.saturating_sub(n_leases).saturating_sub(n_reservations)); + // Assume Reservations to be filled for worst case + setup_reservations::(T::MaxReservedCores::get()); - let timeslice_period: u32 = T::TimeslicePeriod::get().try_into().ok().unwrap(); - let sale = SaleInfo::::get().expect("Sale has started."); + // Assume Leases to be filled for worst case + setup_leases::(T::MaxLeasedCores::get(), 1, 10); - (0..n_renewable.into()).try_for_each(|indx| -> Result<(), BenchmarkError> { + // Assume max auto renewals for worst case. + (0..T::MaxAutoRenewals::get()).try_for_each(|indx| -> Result<(), BenchmarkError> { let task = 1000 + indx; let caller: T::AccountId = T::SovereignAccountOf::maybe_convert(task) .expect("Failed to get sovereign account"); T::Currency::set_balance( &caller.clone(), - T::Currency::minimum_balance().saturating_add(100_000_000u32.into()), + T::Currency::minimum_balance().saturating_add(100u32.into()), ); - let region = Broker::::do_purchase(caller.clone(), 10_000_000u32.into()) - .expect("Offer not high enough for configuration."); + let region = Broker::::do_purchase(caller.clone(), 10u32.into()) + .map_err(|_| BenchmarkError::Weightless)?; Broker::::do_assign(region, None, task, Final) .map_err(|_| BenchmarkError::Weightless)?; - Broker::::do_enable_auto_renew(caller, region.core, task, Some(sale.region_end))?; + Broker::::do_enable_auto_renew(caller, region.core, task, None)?; Ok(()) })?; - // Advance to the block before the rotate_sale in which the auto-renewals will take place. - let rotate_block = timeslice_period.saturating_mul(config.region_length) - 2; - advance_to::(rotate_block - 1); - - // Advance one block and manually tick so we can isolate the `rotate_sale` call. - System::::set_block_number(rotate_block.into()); - RCBlockNumberProviderOf::::set_block_number(rotate_block.into()); - let mut status = Status::::get().expect("Sale has started."); - let sale = SaleInfo::::get().expect("Sale has started."); - Broker::::process_core_count(&mut status); - Broker::::process_revenue(); - status.last_committed_timeslice = config.region_length; - #[block] { Broker::::rotate_sale(sale.clone(), &config, &status); } - // Get prices from the actual price adapter. - let new_prices = T::PriceAdapter::adapt_price(SalePerformance::from_sale(&sale)); - let new_sale = SaleInfo::::get().expect("Sale has started."); - let now = RCBlockNumberProviderOf::::current_block_number(); - let sale_start = config.interlude_length.saturating_add(rotate_block.into()); - - assert_has_event::( + assert!(SaleInfo::::get().is_some()); + assert_last_event::( Event::SaleInitialized { - sale_start, + sale_start: 2u32.into(), leadin_length: 1u32.into(), - start_price: Broker::::sale_price(&new_sale, now), - end_price: new_prices.end_price, + start_price: 1_000_000_000u32.into(), + end_price: 10_000_000u32.into(), region_begin: sale.region_begin + config.region_length, region_end: sale.region_end + config.region_length, ideal_cores_sold: 0, cores_offered: n - .saturating_sub(n_reservations) - .saturating_sub(n_leases) + .saturating_sub(T::MaxReservedCores::get()) + .saturating_sub(T::MaxLeasedCores::get()) .try_into() .unwrap(), } @@ -884,18 +863,18 @@ mod benches { ); // Make sure all cores got renewed: - (0..n_renewable).for_each(|indx| { + (0..T::MaxAutoRenewals::get()).for_each(|indx| { let task = 1000 + indx; let who = T::SovereignAccountOf::maybe_convert(task) .expect("Failed to get sovereign account"); assert_has_event::( Event::Renewed { who, - old_core: n_reservations as u16 + n_leases as u16 + indx as u16, - core: n_reservations as u16 + n_leases as u16 + indx as u16, - price: 10_000_000u32.into(), - begin: new_sale.region_begin, - duration: config.region_length, + old_core: 10 + indx as u16, // first ten cores are allocated to leases. + core: 10 + indx as u16, + price: 10u32.saturated_into(), + begin: 7, + duration: 3, workload: Schedule::truncate_from(vec![ScheduleItem { assignment: Task(task), mask: CoreMask::complete(), @@ -1016,47 +995,6 @@ mod benches { Ok(()) } - #[benchmark] - fn force_reserve() -> Result<(), BenchmarkError> { - Configuration::::put(new_config_record::()); - // Assume Reservations to be almost filled for worst case. - let reservation_count = T::MaxReservedCores::get().saturating_sub(1); - setup_reservations::(reservation_count); - - // Assume leases to be filled for worst case - setup_leases::(T::MaxLeasedCores::get(), 1, 10); - - let origin = - T::AdminOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; - - // Sales must be started. - Broker::::do_start_sales(100u32.into(), CoreIndex::try_from(reservation_count).unwrap()) - .map_err(|_| BenchmarkError::Weightless)?; - - // Add a core. - let status = Status::::get().unwrap(); - Broker::::do_request_core_count(status.core_count + 1).unwrap(); - - advance_to::(T::TimeslicePeriod::get().try_into().ok().unwrap()); - let schedule = new_schedule(); - - #[extrinsic_call] - _(origin as T::RuntimeOrigin, schedule.clone(), status.core_count); - - assert_eq!(Reservations::::decode_len().unwrap(), T::MaxReservedCores::get() as usize); - - let sale_info = SaleInfo::::get().unwrap(); - assert_eq!( - Workplan::::get((sale_info.region_begin, status.core_count)), - Some(schedule.clone()) - ); - // We called at timeslice 1, therefore 2 was already processed and 3 is the next possible - // assignment point. - assert_eq!(Workplan::::get((3, status.core_count)), Some(schedule)); - - Ok(()) - } - #[benchmark] fn swap_leases() -> Result<(), BenchmarkError> { let admin_origin = @@ -1076,62 +1014,56 @@ mod benches { #[benchmark] fn enable_auto_renew() -> Result<(), BenchmarkError> { - let _core_id = setup_and_start_sale::()?; + let _core = setup_and_start_sale::()?; advance_to::(2); - let sale = SaleInfo::::get().expect("Sale has already started."); // We assume max auto renewals for worst case. (0..T::MaxAutoRenewals::get() - 1).try_for_each(|indx| -> Result<(), BenchmarkError> { let task = 1000 + indx; let caller: T::AccountId = T::SovereignAccountOf::maybe_convert(task) .expect("Failed to get sovereign account"); - // Sovereign account needs sufficient funds to purchase and renew. T::Currency::set_balance( &caller.clone(), - T::Currency::minimum_balance().saturating_add(100_000_000u32.into()), + T::Currency::minimum_balance().saturating_add(100u32.into()), ); - let region = Broker::::do_purchase(caller.clone(), 10_000_000u32.into()) - .expect("Offer not high enough for configuration."); + let region = Broker::::do_purchase(caller.clone(), 10u32.into()) + .map_err(|_| BenchmarkError::Weightless)?; Broker::::do_assign(region, None, task, Final) .map_err(|_| BenchmarkError::Weightless)?; - Broker::::do_enable_auto_renew(caller, region.core, task, Some(sale.region_end))?; + Broker::::do_enable_auto_renew(caller, region.core, task, Some(7))?; Ok(()) })?; let caller: T::AccountId = T::SovereignAccountOf::maybe_convert(2001).expect("Failed to get sovereign account"); - // Sovereign account needs sufficient funds to purchase and renew. T::Currency::set_balance( &caller.clone(), - T::Currency::minimum_balance().saturating_add(100_000_000u32.into()), + T::Currency::minimum_balance().saturating_add(100u32.into()), ); // The region for which we benchmark enable auto renew. - let region = Broker::::do_purchase(caller.clone(), 10_000_000u32.into()) - .expect("Offer not high enough for configuration."); + let region = Broker::::do_purchase(caller.clone(), 10u32.into()) + .map_err(|_| BenchmarkError::Weightless)?; Broker::::do_assign(region, None, 2001, Final) .map_err(|_| BenchmarkError::Weightless)?; // The most 'intensive' path is when we renew the core upon enabling auto-renewal. // Therefore, we advance to next bulk sale: - let timeslice_period: u32 = T::TimeslicePeriod::get().try_into().ok().unwrap(); - let config = Configuration::::get().expect("Already configured."); - advance_to::(config.region_length * timeslice_period); + advance_to::(6); #[extrinsic_call] _(RawOrigin::Signed(caller), region.core, 2001, None); assert_last_event::(Event::AutoRenewalEnabled { core: region.core, task: 2001 }.into()); // Make sure we indeed renewed: - let sale = SaleInfo::::get().expect("Sales have started."); assert!(PotentialRenewals::::get(PotentialRenewalId { core: region.core, - when: sale.region_end, + when: 10 // region end after renewal }) .is_some()); @@ -1140,41 +1072,37 @@ mod benches { #[benchmark] fn disable_auto_renew() -> Result<(), BenchmarkError> { - let core_id = setup_and_start_sale::()?; + let _core = setup_and_start_sale::()?; advance_to::(2); - let sale = SaleInfo::::get().expect("Sale has already started."); // We assume max auto renewals for worst case. - (0..T::MaxAutoRenewals::get()).try_for_each(|indx| -> Result<(), BenchmarkError> { + (0..T::MaxAutoRenewals::get() - 1).try_for_each(|indx| -> Result<(), BenchmarkError> { let task = 1000 + indx; let caller: T::AccountId = T::SovereignAccountOf::maybe_convert(task) .expect("Failed to get sovereign account"); T::Currency::set_balance( &caller.clone(), - T::Currency::minimum_balance().saturating_add(10_000_000u32.into()), + T::Currency::minimum_balance().saturating_add(100u32.into()), ); - let region = Broker::::do_purchase(caller.clone(), 10_000_000u32.into()) - .expect("Offer not high enough for configuration."); + let region = Broker::::do_purchase(caller.clone(), 10u32.into()) + .map_err(|_| BenchmarkError::Weightless)?; Broker::::do_assign(region, None, task, Final) .map_err(|_| BenchmarkError::Weightless)?; - Broker::::do_enable_auto_renew(caller, region.core, task, Some(sale.region_end))?; + Broker::::do_enable_auto_renew(caller, region.core, task, Some(7))?; Ok(()) })?; - let task = 1000; - let caller: T::AccountId = - T::SovereignAccountOf::maybe_convert(task).expect("Failed to get sovereign account"); - + T::SovereignAccountOf::maybe_convert(1000).expect("Failed to get sovereign account"); #[extrinsic_call] - _(RawOrigin::Signed(caller), core_id, task); + _(RawOrigin::Signed(caller), _core, 1000); - assert_last_event::(Event::AutoRenewalDisabled { core: core_id, task }.into()); + assert_last_event::(Event::AutoRenewalDisabled { core: _core, task: 1000 }.into()); Ok(()) } @@ -1188,11 +1116,11 @@ mod benches { let caller: T::AccountId = whitelisted_caller(); T::Currency::set_balance( &caller.clone(), - T::Currency::minimum_balance().saturating_add(10_000_000u32.into()), + T::Currency::minimum_balance().saturating_add(u32::MAX.into()), ); - let _region = Broker::::do_purchase(caller.clone(), 10_000_000u32.into()) - .expect("Offer not high enough for configuration."); + let _region = Broker::::do_purchase(caller.clone(), (u32::MAX / 2).into()) + .map_err(|_| BenchmarkError::Weightless)?; let timeslice = Broker::::current_timeslice(); diff --git a/substrate/frame/broker/src/dispatchable_impls.rs b/substrate/frame/broker/src/dispatchable_impls.rs index 489be12bdd15..5fbd957d7908 100644 --- a/substrate/frame/broker/src/dispatchable_impls.rs +++ b/substrate/frame/broker/src/dispatchable_impls.rs @@ -21,7 +21,7 @@ use frame_support::{ traits::{fungible::Mutate, tokens::Preservation::Expendable, DefensiveResult}, }; use sp_arithmetic::traits::{CheckedDiv, Saturating, Zero}; -use sp_runtime::traits::{BlockNumberProvider, Convert}; +use sp_runtime::traits::Convert; use CompletionStatus::{Complete, Partial}; impl Pallet { @@ -60,27 +60,6 @@ impl Pallet { Ok(()) } - pub(crate) fn do_force_reserve(workload: Schedule, core: CoreIndex) -> DispatchResult { - // Sales must have started, otherwise reserve is equivalent. - let sale = SaleInfo::::get().ok_or(Error::::NoSales)?; - - // Reserve - starts at second sale period boundary from now. - Self::do_reserve(workload.clone())?; - - // Add to workload - grants one region from the next sale boundary. - Workplan::::insert((sale.region_begin, core), &workload); - - // Assign now until the next sale boundary unless the next timeslice is already the sale - // boundary. - let status = Status::::get().ok_or(Error::::Uninitialized)?; - let timeslice = status.last_committed_timeslice.saturating_add(1); - if timeslice < sale.region_begin { - Workplan::::insert((timeslice, core), &workload); - } - - Ok(()) - } - pub(crate) fn do_set_lease(task: TaskId, until: Timeslice) -> DispatchResult { let mut r = Leases::::get(); ensure!(until > Self::current_timeslice(), Error::::AlreadyExpired); @@ -112,7 +91,7 @@ impl Pallet { last_committed_timeslice: commit_timeslice.saturating_sub(1), last_timeslice: Self::current_timeslice(), }; - let now = RCBlockNumberProviderOf::::current_block_number(); + let now = frame_system::Pallet::::block_number(); // Imaginary old sale for bootstrapping the first actual sale: let old_sale = SaleInfoRecord { sale_start: now, @@ -140,7 +119,7 @@ impl Pallet { let mut sale = SaleInfo::::get().ok_or(Error::::NoSales)?; Self::ensure_cores_for_sale(&status, &sale)?; - let now = RCBlockNumberProviderOf::::current_block_number(); + let now = frame_system::Pallet::::block_number(); ensure!(now > sale.sale_start, Error::::TooEarly); let price = Self::sale_price(&sale, now); ensure!(price_limit >= price, Error::::Overpriced); @@ -192,7 +171,7 @@ impl Pallet { let begin = sale.region_end; let price_cap = record.price + config.renewal_bump * record.price; - let now = RCBlockNumberProviderOf::::current_block_number(); + let now = frame_system::Pallet::::block_number(); let price = Self::sale_price(&sale, now).min(price_cap); log::debug!( "Renew with: sale price: {:?}, price cap: {:?}, old price: {:?}", @@ -590,7 +569,7 @@ impl Pallet { Self::ensure_cores_for_sale(&status, &sale)?; - let now = RCBlockNumberProviderOf::::current_block_number(); + let now = frame_system::Pallet::::block_number(); Ok(Self::sale_price(&sale, now)) } } diff --git a/substrate/frame/broker/src/lib.rs b/substrate/frame/broker/src/lib.rs index 01368fd6404d..10745544fadf 100644 --- a/substrate/frame/broker/src/lib.rs +++ b/substrate/frame/broker/src/lib.rs @@ -67,7 +67,7 @@ pub mod pallet { use frame_system::pallet_prelude::*; use sp_runtime::traits::{Convert, ConvertBack, MaybeConvert}; - const STORAGE_VERSION: StorageVersion = StorageVersion::new(4); + const STORAGE_VERSION: StorageVersion = StorageVersion::new(3); #[pallet::pallet] #[pallet::storage_version(STORAGE_VERSION)] @@ -305,11 +305,10 @@ pub mod pallet { }, /// A new sale has been initialized. SaleInitialized { - /// The relay block number at which the sale will/did start. - sale_start: RelayBlockNumberOf, - /// The length in relay chain blocks of the Leadin Period (where the price is - /// decreasing). - leadin_length: RelayBlockNumberOf, + /// The local block number at which the sale will/did start. + sale_start: BlockNumberFor, + /// The length in blocks of the Leadin Period (where the price is decreasing). + leadin_length: BlockNumberFor, /// The price of Bulk Coretime at the beginning of the Leadin Period. start_price: BalanceOf, /// The price of Bulk Coretime after the Leadin Period. @@ -585,9 +584,6 @@ pub mod pallet { /// Reserve a core for a workload. /// - /// The workload will be given a reservation, but two sale period boundaries must pass - /// before the core is actually assigned. - /// /// - `origin`: Must be Root or pass `AdminOrigin`. /// - `workload`: The workload which should be permanently placed on a core. #[pallet::call_index(1)] @@ -946,29 +942,6 @@ pub mod pallet { Ok(()) } - /// Reserve a core for a workload immediately. - /// - /// - `origin`: Must be Root or pass `AdminOrigin`. - /// - `workload`: The workload which should be permanently placed on a core starting - /// immediately. - /// - `core`: The core to which the assignment should be made until the reservation takes - /// effect. It is left to the caller to either add this new core or reassign any other - /// tasks to this existing core. - /// - /// This reserves the workload and then injects the workload into the Workplan for the next - /// two sale periods. This overwrites any existing assignments for this core at the start of - /// the next sale period. - #[pallet::call_index(23)] - pub fn force_reserve( - origin: OriginFor, - workload: Schedule, - core: CoreIndex, - ) -> DispatchResultWithPostInfo { - T::AdminOrigin::ensure_origin_or_root(origin)?; - Self::do_force_reserve(workload, core)?; - Ok(Pays::No.into()) - } - #[pallet::call_index(99)] #[pallet::weight(T::WeightInfo::swap_leases())] pub fn swap_leases(origin: OriginFor, id: TaskId, other: TaskId) -> DispatchResult { diff --git a/substrate/frame/broker/src/migration.rs b/substrate/frame/broker/src/migration.rs index f19b1e19bdd1..c2a243d6f0e8 100644 --- a/substrate/frame/broker/src/migration.rs +++ b/substrate/frame/broker/src/migration.rs @@ -130,13 +130,7 @@ mod v2 { mod v3 { use super::*; - use codec::MaxEncodedLen; - use frame_support::{ - pallet_prelude::{OptionQuery, RuntimeDebug, TypeInfo}, - storage_alias, - }; use frame_system::Pallet as System; - use sp_arithmetic::Perbill; pub struct MigrateToV3Impl(PhantomData); @@ -162,244 +156,6 @@ mod v3 { Ok(()) } } - - #[storage_alias] - pub type Configuration = StorageValue, ConfigRecordOf, OptionQuery>; - pub type ConfigRecordOf = - ConfigRecord, RelayBlockNumberOf>; - - // types added here for v4 migration - #[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)] - pub struct ConfigRecord { - /// The number of Relay-chain blocks in advance which scheduling should be fixed and the - /// `Coretime::assign` API used to inform the Relay-chain. - pub advance_notice: RelayBlockNumber, - /// The length in blocks of the Interlude Period for forthcoming sales. - pub interlude_length: BlockNumber, - /// The length in blocks of the Leadin Period for forthcoming sales. - pub leadin_length: BlockNumber, - /// The length in timeslices of Regions which are up for sale in forthcoming sales. - pub region_length: Timeslice, - /// The proportion of cores available for sale which should be sold in order for the price - /// to remain the same in the next sale. - pub ideal_bulk_proportion: Perbill, - /// An artificial limit to the number of cores which are allowed to be sold. If `Some` then - /// no more cores will be sold than this. - pub limit_cores_offered: Option, - /// The amount by which the renewal price increases each sale period. - pub renewal_bump: Perbill, - /// The duration by which rewards for contributions to the InstaPool must be collected. - pub contribution_timeout: Timeslice, - } - - #[storage_alias] - pub type SaleInfo = StorageValue, SaleInfoRecordOf, OptionQuery>; - pub type SaleInfoRecordOf = - SaleInfoRecord, frame_system::pallet_prelude::BlockNumberFor>; - - /// The status of a Bulk Coretime Sale. - #[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)] - pub struct SaleInfoRecord { - /// The relay block number at which the sale will/did start. - pub sale_start: BlockNumber, - /// The length in relay chain blocks of the Leadin Period (where the price is decreasing). - pub leadin_length: BlockNumber, - /// The price of Bulk Coretime after the Leadin Period. - pub price: Balance, - /// The first timeslice of the Regions which are being sold in this sale. - pub region_begin: Timeslice, - /// The timeslice on which the Regions which are being sold in the sale terminate. (i.e. - /// One after the last timeslice which the Regions control.) - pub region_end: Timeslice, - /// The number of cores we want to sell, ideally. Selling this amount would result in no - /// change to the price for the next sale. - pub ideal_cores_sold: CoreIndex, - /// Number of cores which are/have been offered for sale. - pub cores_offered: CoreIndex, - /// The index of the first core which is for sale. Core of Regions which are sold have - /// incrementing indices from this. - pub first_core: CoreIndex, - /// The latest price at which Bulk Coretime was purchased until surpassing the ideal number - /// of cores were sold. - pub sellout_price: Option, - /// Number of cores which have been sold; never more than cores_offered. - pub cores_sold: CoreIndex, - } -} - -pub mod v4 { - use super::*; - - type BlockNumberFor = frame_system::pallet_prelude::BlockNumberFor; - - pub trait BlockToRelayHeightConversion { - /// Converts absolute value of parachain block number to relay chain block number - fn convert_block_number_to_relay_height( - block_number: BlockNumberFor, - ) -> RelayBlockNumberOf; - - /// Converts parachain block length into equivalent relay chain block length - fn convert_block_length_to_relay_length( - block_number: BlockNumberFor, - ) -> RelayBlockNumberOf; - } - - pub struct MigrateToV4Impl(PhantomData, PhantomData); - impl> UncheckedOnRuntimeUpgrade - for MigrateToV4Impl - { - #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { - let (interlude_length, configuration_leadin_length) = - if let Some(config_record) = v3::Configuration::::get() { - (config_record.interlude_length, config_record.leadin_length) - } else { - ((0 as u32).into(), (0 as u32).into()) - }; - - let updated_interlude_length: RelayBlockNumberOf = - BlockConversion::convert_block_length_to_relay_length(interlude_length); - let updated_leadin_length: RelayBlockNumberOf = - BlockConversion::convert_block_length_to_relay_length(configuration_leadin_length); - log::info!(target: LOG_TARGET, "Configuration Pre-Migration: Interlude Length {:?}->{:?} Leadin Length {:?}->{:?}", interlude_length, updated_interlude_length, configuration_leadin_length, updated_leadin_length); - - let (sale_start, sale_info_leadin_length) = - if let Some(sale_info_record) = v3::SaleInfo::::get() { - (sale_info_record.sale_start, sale_info_record.leadin_length) - } else { - ((0 as u32).into(), (0 as u32).into()) - }; - - let updated_sale_start: RelayBlockNumberOf = - BlockConversion::convert_block_number_to_relay_height(sale_start); - let updated_sale_info_leadin_length: RelayBlockNumberOf = - BlockConversion::convert_block_length_to_relay_length(sale_info_leadin_length); - log::info!(target: LOG_TARGET, "SaleInfo Pre-Migration: Sale Start {:?}->{:?} Interlude Length {:?}->{:?}", sale_start, updated_sale_start, sale_info_leadin_length, updated_sale_info_leadin_length); - - Ok((interlude_length, configuration_leadin_length, sale_start, sale_info_leadin_length) - .encode()) - } - - fn on_runtime_upgrade() -> frame_support::weights::Weight { - let mut weight = T::DbWeight::get().reads(1); - - if let Some(config_record) = v3::Configuration::::take() { - log::info!(target: LOG_TARGET, "migrating Configuration record"); - - let updated_interlude_length: RelayBlockNumberOf = - BlockConversion::convert_block_length_to_relay_length( - config_record.interlude_length, - ); - let updated_leadin_length: RelayBlockNumberOf = - BlockConversion::convert_block_length_to_relay_length( - config_record.leadin_length, - ); - - let updated_config_record = ConfigRecord { - interlude_length: updated_interlude_length, - leadin_length: updated_leadin_length, - advance_notice: config_record.advance_notice, - region_length: config_record.region_length, - ideal_bulk_proportion: config_record.ideal_bulk_proportion, - limit_cores_offered: config_record.limit_cores_offered, - renewal_bump: config_record.renewal_bump, - contribution_timeout: config_record.contribution_timeout, - }; - Configuration::::put(updated_config_record); - } - weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1)); - - if let Some(sale_info) = v3::SaleInfo::::take() { - log::info!(target: LOG_TARGET, "migrating SaleInfo record"); - - let updated_sale_start: RelayBlockNumberOf = - BlockConversion::convert_block_number_to_relay_height(sale_info.sale_start); - let updated_leadin_length: RelayBlockNumberOf = - BlockConversion::convert_block_length_to_relay_length(sale_info.leadin_length); - - let updated_sale_info = SaleInfoRecord { - sale_start: updated_sale_start, - leadin_length: updated_leadin_length, - end_price: sale_info.price, - region_begin: sale_info.region_begin, - region_end: sale_info.region_end, - ideal_cores_sold: sale_info.ideal_cores_sold, - cores_offered: sale_info.cores_offered, - first_core: sale_info.first_core, - sellout_price: sale_info.sellout_price, - cores_sold: sale_info.cores_sold, - }; - SaleInfo::::put(updated_sale_info); - } - - weight.saturating_add(T::DbWeight::get().reads_writes(1, 2)) - } - - #[cfg(feature = "try-runtime")] - fn post_upgrade(state: Vec) -> Result<(), sp_runtime::TryRuntimeError> { - let ( - old_interlude_length, - old_configuration_leadin_length, - old_sale_start, - old_sale_info_leadin_length, - ): (BlockNumberFor, BlockNumberFor, BlockNumberFor, BlockNumberFor) = - Decode::decode(&mut &state[..]).expect("pre_upgrade provides a valid state; qed"); - - if let Some(config_record) = Configuration::::get() { - ensure!( - Self::verify_updated_block_length( - old_configuration_leadin_length, - config_record.leadin_length - ), - "must migrate configuration leadin_length" - ); - - ensure!( - Self::verify_updated_block_length( - old_interlude_length, - config_record.interlude_length - ), - "must migrate configuration interlude_length" - ); - } - - if let Some(sale_info) = SaleInfo::::get() { - ensure!( - Self::verify_updated_block_time(old_sale_start, sale_info.sale_start), - "must migrate sale info sale_start" - ); - - ensure!( - Self::verify_updated_block_length( - old_sale_info_leadin_length, - sale_info.leadin_length - ), - "must migrate sale info leadin_length" - ); - } - - Ok(()) - } - } - - #[cfg(feature = "try-runtime")] - impl> - MigrateToV4Impl - { - fn verify_updated_block_time( - old_value: BlockNumberFor, - new_value: RelayBlockNumberOf, - ) -> bool { - BlockConversion::convert_block_number_to_relay_height(old_value) == new_value - } - - fn verify_updated_block_length( - old_value: BlockNumberFor, - new_value: RelayBlockNumberOf, - ) -> bool { - BlockConversion::convert_block_length_to_relay_length(old_value) == new_value - } - } } /// Migrate the pallet storage from `0` to `1`. @@ -426,11 +182,3 @@ pub type MigrateV2ToV3 = frame_support::migrations::VersionedMigration< Pallet, ::DbWeight, >; - -pub type MigrateV3ToV4 = frame_support::migrations::VersionedMigration< - 3, - 4, - v4::MigrateToV4Impl, - Pallet, - ::DbWeight, ->; diff --git a/substrate/frame/broker/src/tests.rs b/substrate/frame/broker/src/tests.rs index a130a2050d9a..f3fd5234e4ca 100644 --- a/substrate/frame/broker/src/tests.rs +++ b/substrate/frame/broker/src/tests.rs @@ -1837,306 +1837,3 @@ fn start_sales_sets_correct_core_count() { System::assert_has_event(Event::::CoreCountRequested { core_count: 9 }.into()); }) } - -// Reservations currently need two sale period boundaries to pass before coming into effect. -#[test] -fn reserve_works() { - TestExt::new().execute_with(|| { - assert_ok!(Broker::do_start_sales(100, 0)); - // Advance forward from start_sales, but not into the first sale. - advance_to(1); - - let system_workload = Schedule::truncate_from(vec![ScheduleItem { - mask: CoreMask::complete(), - assignment: Task(1004), - }]); - - // This shouldn't work, as the reservation will never be assigned a core unless one is - // available. - // assert_noop!(Broker::do_reserve(system_workload.clone()), Error::::Unavailable); - - // Add another core and create the reservation. - let status = Status::::get().unwrap(); - assert_ok!(Broker::request_core_count(RuntimeOrigin::root(), status.core_count + 1)); - assert_ok!(Broker::reserve(RuntimeOrigin::root(), system_workload.clone())); - - // This is added to reservations. - System::assert_last_event( - Event::ReservationMade { index: 0, workload: system_workload.clone() }.into(), - ); - assert_eq!(Reservations::::get(), vec![system_workload.clone()]); - - // But not yet in workplan for any of the next few regions. - for i in 0..20 { - assert_eq!(Workplan::::get((i, 0)), None); - } - // And it hasn't been assigned a core. - assert_eq!(CoretimeTrace::get(), vec![]); - - // Go to next sale. Rotate sale puts it in the workplan. - advance_sale_period(); - assert_eq!(Workplan::::get((7, 0)), Some(system_workload.clone())); - // But it still hasn't been assigned a core. - assert_eq!(CoretimeTrace::get(), vec![]); - - // Go to the second sale after reserving. - advance_sale_period(); - // Core is assigned at block 14 (timeslice 7) after being reserved all the way back at - // timeslice 1! Since the mock periods are 3 timeslices long, this means that reservations - // made in period 0 will only come into effect in period 2. - assert_eq!( - CoretimeTrace::get(), - vec![( - 12, - AssignCore { - core: 0, - begin: 14, - assignment: vec![(Task(1004), 57600)], - end_hint: None - } - )] - ); - System::assert_has_event( - Event::CoreAssigned { - core: 0, - when: 14, - assignment: vec![(CoreAssignment::Task(1004), 57600)], - } - .into(), - ); - - // And it's in the workplan for the next period. - assert_eq!(Workplan::::get((10, 0)), Some(system_workload.clone())); - }); -} - -// We can use a hack to accelerate this by injecting it into the workplan. -#[test] -fn can_reserve_workloads_quickly() { - TestExt::new().execute_with(|| { - // Start sales. - assert_ok!(Broker::do_start_sales(100, 0)); - advance_to(2); - - let system_workload = Schedule::truncate_from(vec![ScheduleItem { - mask: CoreMask::complete(), - assignment: Task(1004), - }]); - - // This shouldn't work, as the reservation will never be assigned a core unless one is - // available. - // assert_noop!(Broker::do_reserve(system_workload.clone()), Error::::Unavailable); - - // Add another core and create the reservation. - let core_count = Status::::get().unwrap().core_count; - assert_ok!(Broker::request_core_count(RuntimeOrigin::root(), core_count + 1)); - assert_ok!(Broker::reserve(RuntimeOrigin::root(), system_workload.clone())); - - // These are the additional steps to onboard this immediately. - let core_index = core_count; - // In a real network this would call the relay chain - // `assigner_coretime::assign_core` extrinsic directly. - ::assign_core( - core_index, - 2, - vec![(Task(1004), 57600)], - None, - ); - // Inject into the workplan to ensure it's scheduled in the next rotate_sale. - Workplan::::insert((4, core_index), system_workload.clone()); - - // Reservation is added for the workload. - System::assert_has_event( - Event::ReservationMade { index: 0, workload: system_workload.clone() }.into(), - ); - System::assert_has_event(Event::CoreCountRequested { core_count: 1 }.into()); - - // It is also in the workplan for the next region. - assert_eq!(Workplan::::get((4, 0)), Some(system_workload.clone())); - - // Go to next sale. Rotate sale puts it in the workplan. - advance_sale_period(); - assert_eq!(Workplan::::get((7, 0)), Some(system_workload.clone())); - - // Go to the second sale after reserving. - advance_sale_period(); - - // Check the trace to ensure it has a core in every region. - assert_eq!( - CoretimeTrace::get(), - vec![ - ( - 2, - AssignCore { - core: 0, - begin: 2, - assignment: vec![(Task(1004), 57600)], - end_hint: None - } - ), - ( - 6, - AssignCore { - core: 0, - begin: 8, - assignment: vec![(Task(1004), 57600)], - end_hint: None - } - ), - ( - 12, - AssignCore { - core: 0, - begin: 14, - assignment: vec![(Task(1004), 57600)], - end_hint: None - } - ) - ] - ); - System::assert_has_event( - Event::CoreAssigned { - core: 0, - when: 8, - assignment: vec![(CoreAssignment::Task(1004), 57600)], - } - .into(), - ); - System::assert_has_event( - Event::CoreAssigned { - core: 0, - when: 14, - assignment: vec![(CoreAssignment::Task(1004), 57600)], - } - .into(), - ); - System::assert_has_event( - Event::CoreAssigned { - core: 0, - when: 14, - assignment: vec![(CoreAssignment::Task(1004), 57600)], - } - .into(), - ); - - // And it's in the workplan for the next period. - assert_eq!(Workplan::::get((10, 0)), Some(system_workload.clone())); - }); -} - -// Add an extrinsic to do it properly. -#[test] -fn force_reserve_works() { - TestExt::new().execute_with(|| { - let system_workload = Schedule::truncate_from(vec![ScheduleItem { - mask: CoreMask::complete(), - assignment: Task(1004), - }]); - - // Not intended to work before sales are started. - assert_noop!( - Broker::force_reserve(RuntimeOrigin::root(), system_workload.clone(), 0), - Error::::NoSales - ); - - // Start sales. - assert_ok!(Broker::do_start_sales(100, 0)); - advance_to(1); - - // Add a new core. With the mock this is instant, with current relay implementation it - // takes two sessions to come into effect. - assert_ok!(Broker::do_request_core_count(1)); - - // Force reserve should now work. - assert_ok!(Broker::force_reserve(RuntimeOrigin::root(), system_workload.clone(), 0)); - - // Reservation is added for the workload. - System::assert_has_event( - Event::ReservationMade { index: 0, workload: system_workload.clone() }.into(), - ); - System::assert_has_event(Event::CoreCountRequested { core_count: 1 }.into()); - assert_eq!(Reservations::::get(), vec![system_workload.clone()]); - - // Advance to where that timeslice will be committed. - advance_to(3); - System::assert_has_event( - Event::CoreAssigned { - core: 0, - when: 4, - assignment: vec![(CoreAssignment::Task(1004), 57600)], - } - .into(), - ); - - // It is also in the workplan for the next region. - assert_eq!(Workplan::::get((4, 0)), Some(system_workload.clone())); - - // Go to next sale. Rotate sale puts it in the workplan. - advance_sale_period(); - assert_eq!(Workplan::::get((7, 0)), Some(system_workload.clone())); - - // Go to the second sale after reserving. - advance_sale_period(); - - // Check the trace to ensure it has a core in every region. - assert_eq!( - CoretimeTrace::get(), - vec![ - ( - 2, - AssignCore { - core: 0, - begin: 4, - assignment: vec![(Task(1004), 57600)], - end_hint: None - } - ), - ( - 6, - AssignCore { - core: 0, - begin: 8, - assignment: vec![(Task(1004), 57600)], - end_hint: None - } - ), - ( - 12, - AssignCore { - core: 0, - begin: 14, - assignment: vec![(Task(1004), 57600)], - end_hint: None - } - ) - ] - ); - System::assert_has_event( - Event::CoreAssigned { - core: 0, - when: 8, - assignment: vec![(CoreAssignment::Task(1004), 57600)], - } - .into(), - ); - System::assert_has_event( - Event::CoreAssigned { - core: 0, - when: 14, - assignment: vec![(CoreAssignment::Task(1004), 57600)], - } - .into(), - ); - System::assert_has_event( - Event::CoreAssigned { - core: 0, - when: 14, - assignment: vec![(CoreAssignment::Task(1004), 57600)], - } - .into(), - ); - - // And it's in the workplan for the next period. - assert_eq!(Workplan::::get((10, 0)), Some(system_workload.clone())); - }); -} diff --git a/substrate/frame/broker/src/tick_impls.rs b/substrate/frame/broker/src/tick_impls.rs index e0b4932f11e2..8dbd5df57166 100644 --- a/substrate/frame/broker/src/tick_impls.rs +++ b/substrate/frame/broker/src/tick_impls.rs @@ -19,7 +19,7 @@ use super::*; use alloc::{vec, vec::Vec}; use frame_support::{pallet_prelude::*, traits::defensive_prelude::*, weights::WeightMeter}; use sp_arithmetic::traits::{One, SaturatedConversion, Saturating, Zero}; -use sp_runtime::traits::{BlockNumberProvider, ConvertBack, MaybeConvert}; +use sp_runtime::traits::{ConvertBack, MaybeConvert}; use CompletionStatus::Complete; impl Pallet { @@ -158,7 +158,7 @@ impl Pallet { config: &ConfigRecordOf, status: &StatusRecord, ) -> Option<()> { - let now = RCBlockNumberProviderOf::::current_block_number(); + let now = frame_system::Pallet::::block_number(); let pool_item = ScheduleItem { assignment: CoreAssignment::Pool, mask: CoreMask::complete() }; diff --git a/substrate/frame/broker/src/types.rs b/substrate/frame/broker/src/types.rs index f970b310a3cb..10e6756bc90e 100644 --- a/substrate/frame/broker/src/types.rs +++ b/substrate/frame/broker/src/types.rs @@ -21,7 +21,7 @@ use crate::{ }; use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::traits::fungible::Inspect; -use frame_system::Config as SConfig; +use frame_system::{pallet_prelude::BlockNumberFor, Config as SConfig}; use scale_info::TypeInfo; use sp_arithmetic::Perbill; use sp_core::{ConstU32, RuntimeDebug}; @@ -208,11 +208,11 @@ pub struct PoolIoRecord { /// The status of a Bulk Coretime Sale. #[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)] -pub struct SaleInfoRecord { - /// The relay block number at which the sale will/did start. - pub sale_start: RelayBlockNumber, +pub struct SaleInfoRecord { + /// The local block number at which the sale will/did start. + pub sale_start: BlockNumber, /// The length in blocks of the Leadin Period (where the price is decreasing). - pub leadin_length: RelayBlockNumber, + pub leadin_length: BlockNumber, /// The price of Bulk Coretime after the Leadin Period. pub end_price: Balance, /// The first timeslice of the Regions which are being sold in this sale. @@ -235,7 +235,7 @@ pub struct SaleInfoRecord { /// Number of cores which have been sold; never more than cores_offered. pub cores_sold: CoreIndex, } -pub type SaleInfoRecordOf = SaleInfoRecord, RelayBlockNumberOf>; +pub type SaleInfoRecordOf = SaleInfoRecord, BlockNumberFor>; /// Record for Polkadot Core reservations (generally tasked with the maintenance of System /// Chains). @@ -272,14 +272,14 @@ pub type OnDemandRevenueRecordOf = /// Configuration of this pallet. #[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)] -pub struct ConfigRecord { +pub struct ConfigRecord { /// The number of Relay-chain blocks in advance which scheduling should be fixed and the /// `Coretime::assign` API used to inform the Relay-chain. pub advance_notice: RelayBlockNumber, /// The length in blocks of the Interlude Period for forthcoming sales. - pub interlude_length: RelayBlockNumber, + pub interlude_length: BlockNumber, /// The length in blocks of the Leadin Period for forthcoming sales. - pub leadin_length: RelayBlockNumber, + pub leadin_length: BlockNumber, /// The length in timeslices of Regions which are up for sale in forthcoming sales. pub region_length: Timeslice, /// The proportion of cores available for sale which should be sold. @@ -296,11 +296,11 @@ pub struct ConfigRecord { /// The duration by which rewards for contributions to the InstaPool must be collected. pub contribution_timeout: Timeslice, } -pub type ConfigRecordOf = ConfigRecord>; +pub type ConfigRecordOf = ConfigRecord, RelayBlockNumberOf>; -impl ConfigRecord +impl ConfigRecord where - RelayBlockNumber: sp_arithmetic::traits::Zero, + BlockNumber: sp_arithmetic::traits::Zero, { /// Check the config for basic validity constraints. pub(crate) fn validate(&self) -> Result<(), ()> { diff --git a/substrate/frame/broker/src/utility_impls.rs b/substrate/frame/broker/src/utility_impls.rs index 73f05d1e5ef4..e937e0cbbec5 100644 --- a/substrate/frame/broker/src/utility_impls.rs +++ b/substrate/frame/broker/src/utility_impls.rs @@ -24,6 +24,7 @@ use frame_support::{ OnUnbalanced, }, }; +use frame_system::pallet_prelude::BlockNumberFor; use sp_arithmetic::{ traits::{SaturatedConversion, Saturating}, FixedPointNumber, FixedU64, @@ -59,7 +60,7 @@ impl Pallet { T::PalletId::get().into_account_truncating() } - pub fn sale_price(sale: &SaleInfoRecordOf, now: RelayBlockNumberOf) -> BalanceOf { + pub fn sale_price(sale: &SaleInfoRecordOf, now: BlockNumberFor) -> BalanceOf { let num = now.saturating_sub(sale.sale_start).min(sale.leadin_length).saturated_into(); let through = FixedU64::from_rational(num, sale.leadin_length.saturated_into()); T::PriceAdapter::leadin_factor_at(through).saturating_mul_int(sale.end_price) diff --git a/substrate/frame/broker/src/weights.rs b/substrate/frame/broker/src/weights.rs index 87e588551661..2f25fddc2050 100644 --- a/substrate/frame/broker/src/weights.rs +++ b/substrate/frame/broker/src/weights.rs @@ -18,25 +18,27 @@ //! Autogenerated weights for `pallet_broker` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-12-11, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-05-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-acd6uxux-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `sergej-B650-AORUS-ELITE-AX`, CPU: `AMD Ryzen 9 7900X3D 12-Core Processor` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// target/production/substrate-node +// ./target/release/substrate-node // benchmark // pallet +// --chain=dev // --steps=50 // --repeat=20 +// --pallet=pallet_broker +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --extrinsic=* // --wasm-execution=compiled // --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_broker -// --chain=dev -// --header=./substrate/HEADER-APACHE2 // --output=./substrate/frame/broker/src/weights.rs +// --header=./substrate/HEADER-APACHE2 // --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -77,11 +79,10 @@ pub trait WeightInfo { fn notify_core_count() -> Weight; fn notify_revenue() -> Weight; fn do_tick_base() -> Weight; - fn force_reserve() -> Weight; fn swap_leases() -> Weight; + fn on_new_timeslice() -> Weight; fn enable_auto_renew() -> Weight; fn disable_auto_renew() -> Weight; - fn on_new_timeslice() -> Weight; } /// Weights for `pallet_broker` using the Substrate node and recommended hardware. @@ -93,8 +94,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_498_000 picoseconds. - Weight::from_parts(2_660_000, 0) + // Minimum execution time: 1_593_000 picoseconds. + Weight::from_parts(1_703_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Broker::Reservations` (r:1 w:1) @@ -103,8 +104,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `5016` // Estimated: `7496` - // Minimum execution time: 23_090_000 picoseconds. - Weight::from_parts(23_664_000, 7496) + // Minimum execution time: 12_864_000 picoseconds. + Weight::from_parts(13_174_000, 7496) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -114,8 +115,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `6218` // Estimated: `7496` - // Minimum execution time: 21_782_000 picoseconds. - Weight::from_parts(22_708_000, 7496) + // Minimum execution time: 12_284_000 picoseconds. + Weight::from_parts(13_566_000, 7496) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -125,8 +126,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `239` // Estimated: `1526` - // Minimum execution time: 14_966_000 picoseconds. - Weight::from_parts(15_592_000, 1526) + // Minimum execution time: 6_743_000 picoseconds. + Weight::from_parts(7_094_000, 1526) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -151,10 +152,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `6330` // Estimated: `8499` - // Minimum execution time: 31_757_000 picoseconds. - Weight::from_parts(57_977_268, 8499) - // Standard Error: 576 - .saturating_add(Weight::from_parts(3_102, 0).saturating_mul(n.into())) + // Minimum execution time: 21_120_000 picoseconds. + Weight::from_parts(40_929_422, 8499) + // Standard Error: 471 + .saturating_add(Weight::from_parts(1_004, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(16_u64)) } @@ -162,15 +163,19 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) /// Storage: `Broker::SaleInfo` (r:1 w:1) /// Proof: `Broker::SaleInfo` (`max_values`: Some(1), `max_size`: Some(57), added: 552, mode: `MaxEncodedLen`) + /// Storage: `Authorship::Author` (r:1 w:0) + /// Proof: `Authorship::Author` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `System::Digest` (r:1 w:0) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Broker::Regions` (r:0 w:1) /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) fn purchase() -> Weight { // Proof Size summary in bytes: - // Measured: `470` - // Estimated: `1542` - // Minimum execution time: 40_469_000 picoseconds. - Weight::from_parts(41_360_000, 1542) - .saturating_add(T::DbWeight::get().reads(2_u64)) + // Measured: `651` + // Estimated: `2136` + // Minimum execution time: 31_169_000 picoseconds. + Weight::from_parts(32_271_000, 2136) + .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } /// Storage: `Broker::Configuration` (r:1 w:0) @@ -181,15 +186,19 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Broker::SaleInfo` (`max_values`: Some(1), `max_size`: Some(57), added: 552, mode: `MaxEncodedLen`) /// Storage: `Broker::PotentialRenewals` (r:1 w:2) /// Proof: `Broker::PotentialRenewals` (`max_values`: None, `max_size`: Some(1233), added: 3708, mode: `MaxEncodedLen`) + /// Storage: `Authorship::Author` (r:1 w:0) + /// Proof: `Authorship::Author` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `System::Digest` (r:1 w:0) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Broker::Workplan` (r:0 w:1) /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) fn renew() -> Weight { // Proof Size summary in bytes: - // Measured: `588` + // Measured: `769` // Estimated: `4698` - // Minimum execution time: 60_724_000 picoseconds. - Weight::from_parts(63_445_000, 4698) - .saturating_add(T::DbWeight::get().reads(4_u64)) + // Minimum execution time: 44_945_000 picoseconds. + Weight::from_parts(47_119_000, 4698) + .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } /// Storage: `Broker::Regions` (r:1 w:1) @@ -198,8 +207,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `496` // Estimated: `3551` - // Minimum execution time: 23_734_000 picoseconds. - Weight::from_parts(25_080_000, 3551) + // Minimum execution time: 11_562_000 picoseconds. + Weight::from_parts(11_943_000, 3551) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -209,8 +218,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `496` // Estimated: `3551` - // Minimum execution time: 25_917_000 picoseconds. - Weight::from_parts(26_715_000, 3551) + // Minimum execution time: 13_075_000 picoseconds. + Weight::from_parts(13_616_000, 3551) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -220,8 +229,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `496` // Estimated: `3551` - // Minimum execution time: 26_764_000 picoseconds. - Weight::from_parts(27_770_000, 3551) + // Minimum execution time: 13_695_000 picoseconds. + Weight::from_parts(14_658_000, 3551) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -237,8 +246,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `741` // Estimated: `4681` - // Minimum execution time: 37_617_000 picoseconds. - Weight::from_parts(39_333_000, 4681) + // Minimum execution time: 22_623_000 picoseconds. + Weight::from_parts(23_233_000, 4681) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -256,8 +265,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `776` // Estimated: `5996` - // Minimum execution time: 43_168_000 picoseconds. - Weight::from_parts(44_741_000, 5996) + // Minimum execution time: 26_901_000 picoseconds. + Weight::from_parts(27_472_000, 5996) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -272,10 +281,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `878` // Estimated: `6196 + m * (2520 ±0)` - // Minimum execution time: 75_317_000 picoseconds. - Weight::from_parts(76_792_860, 6196) - // Standard Error: 55_267 - .saturating_add(Weight::from_parts(1_878_133, 0).saturating_mul(m.into())) + // Minimum execution time: 51_778_000 picoseconds. + Weight::from_parts(53_726_731, 6196) + // Standard Error: 45_279 + .saturating_add(Weight::from_parts(677_769, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(m.into()))) .saturating_add(T::DbWeight::get().writes(5_u64)) @@ -287,8 +296,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `103` // Estimated: `3593` - // Minimum execution time: 44_248_000 picoseconds. - Weight::from_parts(45_201_000, 3593) + // Minimum execution time: 31_790_000 picoseconds. + Weight::from_parts(32_601_000, 3593) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -300,8 +309,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `604` // Estimated: `3551` - // Minimum execution time: 39_853_000 picoseconds. - Weight::from_parts(44_136_000, 3551) + // Minimum execution time: 18_465_000 picoseconds. + Weight::from_parts(21_050_000, 3551) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -315,8 +324,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `601` // Estimated: `3533` - // Minimum execution time: 46_452_000 picoseconds. - Weight::from_parts(52_780_000, 3533) + // Minimum execution time: 23_825_000 picoseconds. + Weight::from_parts(26_250_000, 3533) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -330,10 +339,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn drop_history() -> Weight { // Proof Size summary in bytes: - // Measured: `1117` + // Measured: `1014` // Estimated: `3593` - // Minimum execution time: 64_905_000 picoseconds. - Weight::from_parts(72_914_000, 3593) + // Minimum execution time: 28_103_000 picoseconds. + Weight::from_parts(32_622_000, 3593) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -345,8 +354,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `661` // Estimated: `4698` - // Minimum execution time: 38_831_000 picoseconds. - Weight::from_parts(41_420_000, 4698) + // Minimum execution time: 16_751_000 picoseconds. + Weight::from_parts(17_373_000, 4698) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -355,8 +364,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_595_000 picoseconds. - Weight::from_parts(4_964_606, 0) + // Minimum execution time: 2_705_000 picoseconds. + Weight::from_parts(2_991_768, 0) } /// Storage: `Broker::CoreCountInbox` (r:1 w:1) /// Proof: `Broker::CoreCountInbox` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) @@ -365,58 +374,37 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `404` // Estimated: `1487` - // Minimum execution time: 8_640_000 picoseconds. - Weight::from_parts(9_153_332, 1487) + // Minimum execution time: 4_598_000 picoseconds. + Weight::from_parts(4_937_302, 1487) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: `Broker::RevenueInbox` (r:1 w:1) - /// Proof: `Broker::RevenueInbox` (`max_values`: Some(1), `max_size`: Some(20), added: 515, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0xf308d869daf021a7724e69c557dd8dbe` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xf308d869daf021a7724e69c557dd8dbe` (r:1 w:1) /// Storage: `Broker::InstaPoolHistory` (r:1 w:1) /// Proof: `Broker::InstaPoolHistory` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Authorship::Author` (r:1 w:0) + /// Proof: `Authorship::Author` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `System::Digest` (r:1 w:0) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn process_revenue() -> Weight { // Proof Size summary in bytes: - // Measured: `667` - // Estimated: `3593` - // Minimum execution time: 40_570_000 picoseconds. - Weight::from_parts(41_402_000, 3593) - .saturating_add(T::DbWeight::get().reads(3_u64)) + // Measured: `991` + // Estimated: `4456` + // Minimum execution time: 37_601_000 picoseconds. + Weight::from_parts(38_262_000, 4456) + .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: `Broker::InstaPoolIo` (r:3 w:3) - /// Proof: `Broker::InstaPoolIo` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) - /// Storage: `Broker::Reservations` (r:1 w:0) - /// Proof: `Broker::Reservations` (`max_values`: Some(1), `max_size`: Some(6011), added: 6506, mode: `MaxEncodedLen`) - /// Storage: `Broker::Leases` (r:1 w:1) - /// Proof: `Broker::Leases` (`max_values`: Some(1), `max_size`: Some(41), added: 536, mode: `MaxEncodedLen`) - /// Storage: `Broker::AutoRenewals` (r:1 w:1) - /// Proof: `Broker::AutoRenewals` (`max_values`: Some(1), `max_size`: Some(101), added: 596, mode: `MaxEncodedLen`) - /// Storage: `Broker::Configuration` (r:1 w:0) - /// Proof: `Broker::Configuration` (`max_values`: Some(1), `max_size`: Some(31), added: 526, mode: `MaxEncodedLen`) - /// Storage: `Broker::Status` (r:1 w:0) - /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) - /// Storage: `Broker::PotentialRenewals` (r:10 w:20) - /// Proof: `Broker::PotentialRenewals` (`max_values`: None, `max_size`: Some(1233), added: 3708, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:10 w:10) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `Broker::SaleInfo` (r:0 w:1) - /// Proof: `Broker::SaleInfo` (`max_values`: Some(1), `max_size`: Some(57), added: 552, mode: `MaxEncodedLen`) - /// Storage: `Broker::Workplan` (r:0 w:1000) - /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 1000]`. - fn rotate_sale(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `8548` - // Estimated: `38070` - // Minimum execution time: 29_370_000 picoseconds. - Weight::from_parts(334_030_189, 38070) - // Standard Error: 6_912 - .saturating_add(Weight::from_parts(1_268_750, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(26_u64)) - .saturating_add(T::DbWeight::get().writes(34_u64)) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) + fn rotate_sale(_n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 0_000 picoseconds. + Weight::from_parts(0, 0) } /// Storage: `Broker::InstaPoolIo` (r:1 w:0) /// Proof: `Broker::InstaPoolIo` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) @@ -426,8 +414,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `180` // Estimated: `3493` - // Minimum execution time: 9_005_000 picoseconds. - Weight::from_parts(9_392_000, 3493) + // Minimum execution time: 5_391_000 picoseconds. + Weight::from_parts(5_630_000, 3493) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -439,8 +427,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1423` // Estimated: `4681` - // Minimum execution time: 19_043_000 picoseconds. - Weight::from_parts(20_089_000, 4681) + // Minimum execution time: 10_249_000 picoseconds. + Weight::from_parts(10_529_000, 4681) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -448,8 +436,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 149_000 picoseconds. - Weight::from_parts(183_000, 0) + // Minimum execution time: 120_000 picoseconds. + Weight::from_parts(140_000, 0) } /// Storage: `Broker::CoreCountInbox` (r:0 w:1) /// Proof: `Broker::CoreCountInbox` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) @@ -457,8 +445,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_248_000 picoseconds. - Weight::from_parts(2_425_000, 0) + // Minimum execution time: 1_402_000 picoseconds. + Weight::from_parts(1_513_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Broker::RevenueInbox` (r:0 w:1) @@ -467,8 +455,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_413_000 picoseconds. - Weight::from_parts(2_640_000, 0) + // Minimum execution time: 1_902_000 picoseconds. + Weight::from_parts(2_116_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Broker::Status` (r:1 w:1) @@ -477,33 +465,16 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Broker::Configuration` (`max_values`: Some(1), `max_size`: Some(31), added: 526, mode: `MaxEncodedLen`) /// Storage: `Broker::CoreCountInbox` (r:1 w:0) /// Proof: `Broker::CoreCountInbox` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `Broker::RevenueInbox` (r:1 w:0) - /// Proof: `Broker::RevenueInbox` (`max_values`: Some(1), `max_size`: Some(20), added: 515, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0xf308d869daf021a7724e69c557dd8dbe` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xf308d869daf021a7724e69c557dd8dbe` (r:1 w:1) fn do_tick_base() -> Weight { // Proof Size summary in bytes: - // Measured: `441` - // Estimated: `1516` - // Minimum execution time: 17_083_000 picoseconds. - Weight::from_parts(18_077_000, 1516) + // Measured: `603` + // Estimated: `4068` + // Minimum execution time: 8_897_000 picoseconds. + Weight::from_parts(9_218_000, 4068) .saturating_add(T::DbWeight::get().reads(4_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - /// Storage: `Broker::SaleInfo` (r:1 w:0) - /// Proof: `Broker::SaleInfo` (`max_values`: Some(1), `max_size`: Some(57), added: 552, mode: `MaxEncodedLen`) - /// Storage: `Broker::Reservations` (r:1 w:1) - /// Proof: `Broker::Reservations` (`max_values`: Some(1), `max_size`: Some(6011), added: 6506, mode: `MaxEncodedLen`) - /// Storage: `Broker::Status` (r:1 w:0) - /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) - /// Storage: `Broker::Workplan` (r:0 w:2) - /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) - fn force_reserve() -> Weight { - // Proof Size summary in bytes: - // Measured: `5253` - // Estimated: `7496` - // Minimum execution time: 28_363_000 picoseconds. - Weight::from_parts(29_243_000, 7496) - .saturating_add(T::DbWeight::get().reads(3_u64)) - .saturating_add(T::DbWeight::get().writes(3_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) } /// Storage: `Broker::Leases` (r:1 w:1) /// Proof: `Broker::Leases` (`max_values`: Some(1), `max_size`: Some(41), added: 536, mode: `MaxEncodedLen`) @@ -511,11 +482,18 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `239` // Estimated: `1526` - // Minimum execution time: 11_620_000 picoseconds. - Weight::from_parts(12_063_000, 1526) + // Minimum execution time: 4_678_000 picoseconds. + Weight::from_parts(4_920_000, 1526) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } + fn on_new_timeslice() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 229_000 picoseconds. + Weight::from_parts(268_000, 0) + } /// Storage: `Broker::SaleInfo` (r:1 w:1) /// Proof: `Broker::SaleInfo` (`max_values`: Some(1), `max_size`: Some(57), added: 552, mode: `MaxEncodedLen`) /// Storage: `Broker::PotentialRenewals` (r:1 w:2) @@ -526,37 +504,34 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Authorship::Author` (r:1 w:0) + /// Proof: `Authorship::Author` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `System::Digest` (r:1 w:0) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Broker::AutoRenewals` (r:1 w:1) /// Proof: `Broker::AutoRenewals` (`max_values`: Some(1), `max_size`: Some(101), added: 596, mode: `MaxEncodedLen`) /// Storage: `Broker::Workplan` (r:0 w:1) /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) fn enable_auto_renew() -> Weight { // Proof Size summary in bytes: - // Measured: `1121` + // Measured: `930` // Estimated: `4698` - // Minimum execution time: 85_270_000 picoseconds. - Weight::from_parts(90_457_000, 4698) - .saturating_add(T::DbWeight::get().reads(6_u64)) + // Minimum execution time: 51_597_000 picoseconds. + Weight::from_parts(52_609_000, 4698) + .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } /// Storage: `Broker::AutoRenewals` (r:1 w:1) /// Proof: `Broker::AutoRenewals` (`max_values`: Some(1), `max_size`: Some(101), added: 596, mode: `MaxEncodedLen`) fn disable_auto_renew() -> Weight { // Proof Size summary in bytes: - // Measured: `578` + // Measured: `484` // Estimated: `1586` - // Minimum execution time: 22_479_000 picoseconds. - Weight::from_parts(23_687_000, 1586) + // Minimum execution time: 8_907_000 picoseconds. + Weight::from_parts(9_167_000, 1586) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - fn on_new_timeslice() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 245_000 picoseconds. - Weight::from_parts(290_000, 0) - } } // For backwards compatibility and tests. @@ -567,8 +542,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_498_000 picoseconds. - Weight::from_parts(2_660_000, 0) + // Minimum execution time: 1_593_000 picoseconds. + Weight::from_parts(1_703_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Broker::Reservations` (r:1 w:1) @@ -577,8 +552,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `5016` // Estimated: `7496` - // Minimum execution time: 23_090_000 picoseconds. - Weight::from_parts(23_664_000, 7496) + // Minimum execution time: 12_864_000 picoseconds. + Weight::from_parts(13_174_000, 7496) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -588,8 +563,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `6218` // Estimated: `7496` - // Minimum execution time: 21_782_000 picoseconds. - Weight::from_parts(22_708_000, 7496) + // Minimum execution time: 12_284_000 picoseconds. + Weight::from_parts(13_566_000, 7496) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -599,8 +574,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `239` // Estimated: `1526` - // Minimum execution time: 14_966_000 picoseconds. - Weight::from_parts(15_592_000, 1526) + // Minimum execution time: 6_743_000 picoseconds. + Weight::from_parts(7_094_000, 1526) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -625,10 +600,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `6330` // Estimated: `8499` - // Minimum execution time: 31_757_000 picoseconds. - Weight::from_parts(57_977_268, 8499) - // Standard Error: 576 - .saturating_add(Weight::from_parts(3_102, 0).saturating_mul(n.into())) + // Minimum execution time: 21_120_000 picoseconds. + Weight::from_parts(40_929_422, 8499) + // Standard Error: 471 + .saturating_add(Weight::from_parts(1_004, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().writes(16_u64)) } @@ -636,15 +611,19 @@ impl WeightInfo for () { /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) /// Storage: `Broker::SaleInfo` (r:1 w:1) /// Proof: `Broker::SaleInfo` (`max_values`: Some(1), `max_size`: Some(57), added: 552, mode: `MaxEncodedLen`) + /// Storage: `Authorship::Author` (r:1 w:0) + /// Proof: `Authorship::Author` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `System::Digest` (r:1 w:0) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Broker::Regions` (r:0 w:1) /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) fn purchase() -> Weight { // Proof Size summary in bytes: - // Measured: `470` - // Estimated: `1542` - // Minimum execution time: 40_469_000 picoseconds. - Weight::from_parts(41_360_000, 1542) - .saturating_add(RocksDbWeight::get().reads(2_u64)) + // Measured: `651` + // Estimated: `2136` + // Minimum execution time: 31_169_000 picoseconds. + Weight::from_parts(32_271_000, 2136) + .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } /// Storage: `Broker::Configuration` (r:1 w:0) @@ -655,15 +634,19 @@ impl WeightInfo for () { /// Proof: `Broker::SaleInfo` (`max_values`: Some(1), `max_size`: Some(57), added: 552, mode: `MaxEncodedLen`) /// Storage: `Broker::PotentialRenewals` (r:1 w:2) /// Proof: `Broker::PotentialRenewals` (`max_values`: None, `max_size`: Some(1233), added: 3708, mode: `MaxEncodedLen`) + /// Storage: `Authorship::Author` (r:1 w:0) + /// Proof: `Authorship::Author` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `System::Digest` (r:1 w:0) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Broker::Workplan` (r:0 w:1) /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) fn renew() -> Weight { // Proof Size summary in bytes: - // Measured: `588` + // Measured: `769` // Estimated: `4698` - // Minimum execution time: 60_724_000 picoseconds. - Weight::from_parts(63_445_000, 4698) - .saturating_add(RocksDbWeight::get().reads(4_u64)) + // Minimum execution time: 44_945_000 picoseconds. + Weight::from_parts(47_119_000, 4698) + .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } /// Storage: `Broker::Regions` (r:1 w:1) @@ -672,8 +655,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `496` // Estimated: `3551` - // Minimum execution time: 23_734_000 picoseconds. - Weight::from_parts(25_080_000, 3551) + // Minimum execution time: 11_562_000 picoseconds. + Weight::from_parts(11_943_000, 3551) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -683,8 +666,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `496` // Estimated: `3551` - // Minimum execution time: 25_917_000 picoseconds. - Weight::from_parts(26_715_000, 3551) + // Minimum execution time: 13_075_000 picoseconds. + Weight::from_parts(13_616_000, 3551) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -694,8 +677,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `496` // Estimated: `3551` - // Minimum execution time: 26_764_000 picoseconds. - Weight::from_parts(27_770_000, 3551) + // Minimum execution time: 13_695_000 picoseconds. + Weight::from_parts(14_658_000, 3551) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -711,8 +694,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `741` // Estimated: `4681` - // Minimum execution time: 37_617_000 picoseconds. - Weight::from_parts(39_333_000, 4681) + // Minimum execution time: 22_623_000 picoseconds. + Weight::from_parts(23_233_000, 4681) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -730,8 +713,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `776` // Estimated: `5996` - // Minimum execution time: 43_168_000 picoseconds. - Weight::from_parts(44_741_000, 5996) + // Minimum execution time: 26_901_000 picoseconds. + Weight::from_parts(27_472_000, 5996) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -746,10 +729,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `878` // Estimated: `6196 + m * (2520 ±0)` - // Minimum execution time: 75_317_000 picoseconds. - Weight::from_parts(76_792_860, 6196) - // Standard Error: 55_267 - .saturating_add(Weight::from_parts(1_878_133, 0).saturating_mul(m.into())) + // Minimum execution time: 51_778_000 picoseconds. + Weight::from_parts(53_726_731, 6196) + // Standard Error: 45_279 + .saturating_add(Weight::from_parts(677_769, 0).saturating_mul(m.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(m.into()))) .saturating_add(RocksDbWeight::get().writes(5_u64)) @@ -761,8 +744,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `103` // Estimated: `3593` - // Minimum execution time: 44_248_000 picoseconds. - Weight::from_parts(45_201_000, 3593) + // Minimum execution time: 31_790_000 picoseconds. + Weight::from_parts(32_601_000, 3593) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -774,8 +757,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `604` // Estimated: `3551` - // Minimum execution time: 39_853_000 picoseconds. - Weight::from_parts(44_136_000, 3551) + // Minimum execution time: 18_465_000 picoseconds. + Weight::from_parts(21_050_000, 3551) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -789,8 +772,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `601` // Estimated: `3533` - // Minimum execution time: 46_452_000 picoseconds. - Weight::from_parts(52_780_000, 3533) + // Minimum execution time: 23_825_000 picoseconds. + Weight::from_parts(26_250_000, 3533) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -804,10 +787,10 @@ impl WeightInfo for () { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn drop_history() -> Weight { // Proof Size summary in bytes: - // Measured: `1117` + // Measured: `1014` // Estimated: `3593` - // Minimum execution time: 64_905_000 picoseconds. - Weight::from_parts(72_914_000, 3593) + // Minimum execution time: 28_103_000 picoseconds. + Weight::from_parts(32_622_000, 3593) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -819,8 +802,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `661` // Estimated: `4698` - // Minimum execution time: 38_831_000 picoseconds. - Weight::from_parts(41_420_000, 4698) + // Minimum execution time: 16_751_000 picoseconds. + Weight::from_parts(17_373_000, 4698) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -829,8 +812,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_595_000 picoseconds. - Weight::from_parts(4_964_606, 0) + // Minimum execution time: 2_705_000 picoseconds. + Weight::from_parts(2_991_768, 0) } /// Storage: `Broker::CoreCountInbox` (r:1 w:1) /// Proof: `Broker::CoreCountInbox` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) @@ -839,58 +822,37 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `404` // Estimated: `1487` - // Minimum execution time: 8_640_000 picoseconds. - Weight::from_parts(9_153_332, 1487) + // Minimum execution time: 4_598_000 picoseconds. + Weight::from_parts(4_937_302, 1487) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: `Broker::RevenueInbox` (r:1 w:1) - /// Proof: `Broker::RevenueInbox` (`max_values`: Some(1), `max_size`: Some(20), added: 515, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0xf308d869daf021a7724e69c557dd8dbe` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xf308d869daf021a7724e69c557dd8dbe` (r:1 w:1) /// Storage: `Broker::InstaPoolHistory` (r:1 w:1) /// Proof: `Broker::InstaPoolHistory` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Authorship::Author` (r:1 w:0) + /// Proof: `Authorship::Author` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `System::Digest` (r:1 w:0) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn process_revenue() -> Weight { // Proof Size summary in bytes: - // Measured: `667` - // Estimated: `3593` - // Minimum execution time: 40_570_000 picoseconds. - Weight::from_parts(41_402_000, 3593) - .saturating_add(RocksDbWeight::get().reads(3_u64)) + // Measured: `991` + // Estimated: `4456` + // Minimum execution time: 37_601_000 picoseconds. + Weight::from_parts(38_262_000, 4456) + .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: `Broker::InstaPoolIo` (r:3 w:3) - /// Proof: `Broker::InstaPoolIo` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) - /// Storage: `Broker::Reservations` (r:1 w:0) - /// Proof: `Broker::Reservations` (`max_values`: Some(1), `max_size`: Some(6011), added: 6506, mode: `MaxEncodedLen`) - /// Storage: `Broker::Leases` (r:1 w:1) - /// Proof: `Broker::Leases` (`max_values`: Some(1), `max_size`: Some(41), added: 536, mode: `MaxEncodedLen`) - /// Storage: `Broker::AutoRenewals` (r:1 w:1) - /// Proof: `Broker::AutoRenewals` (`max_values`: Some(1), `max_size`: Some(101), added: 596, mode: `MaxEncodedLen`) - /// Storage: `Broker::Configuration` (r:1 w:0) - /// Proof: `Broker::Configuration` (`max_values`: Some(1), `max_size`: Some(31), added: 526, mode: `MaxEncodedLen`) - /// Storage: `Broker::Status` (r:1 w:0) - /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) - /// Storage: `Broker::PotentialRenewals` (r:10 w:20) - /// Proof: `Broker::PotentialRenewals` (`max_values`: None, `max_size`: Some(1233), added: 3708, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:10 w:10) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `Broker::SaleInfo` (r:0 w:1) - /// Proof: `Broker::SaleInfo` (`max_values`: Some(1), `max_size`: Some(57), added: 552, mode: `MaxEncodedLen`) - /// Storage: `Broker::Workplan` (r:0 w:1000) - /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 1000]`. - fn rotate_sale(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `8548` - // Estimated: `38070` - // Minimum execution time: 29_370_000 picoseconds. - Weight::from_parts(334_030_189, 38070) - // Standard Error: 6_912 - .saturating_add(Weight::from_parts(1_268_750, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(26_u64)) - .saturating_add(RocksDbWeight::get().writes(34_u64)) - .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(n.into()))) + fn rotate_sale(_n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 0_000 picoseconds. + Weight::from_parts(0, 0) } /// Storage: `Broker::InstaPoolIo` (r:1 w:0) /// Proof: `Broker::InstaPoolIo` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) @@ -900,8 +862,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `180` // Estimated: `3493` - // Minimum execution time: 9_005_000 picoseconds. - Weight::from_parts(9_392_000, 3493) + // Minimum execution time: 5_391_000 picoseconds. + Weight::from_parts(5_630_000, 3493) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -913,8 +875,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1423` // Estimated: `4681` - // Minimum execution time: 19_043_000 picoseconds. - Weight::from_parts(20_089_000, 4681) + // Minimum execution time: 10_249_000 picoseconds. + Weight::from_parts(10_529_000, 4681) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -922,8 +884,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 149_000 picoseconds. - Weight::from_parts(183_000, 0) + // Minimum execution time: 120_000 picoseconds. + Weight::from_parts(140_000, 0) } /// Storage: `Broker::CoreCountInbox` (r:0 w:1) /// Proof: `Broker::CoreCountInbox` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) @@ -931,8 +893,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_248_000 picoseconds. - Weight::from_parts(2_425_000, 0) + // Minimum execution time: 1_402_000 picoseconds. + Weight::from_parts(1_513_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Broker::RevenueInbox` (r:0 w:1) @@ -941,8 +903,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_413_000 picoseconds. - Weight::from_parts(2_640_000, 0) + // Minimum execution time: 1_902_000 picoseconds. + Weight::from_parts(2_116_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Broker::Status` (r:1 w:1) @@ -951,33 +913,16 @@ impl WeightInfo for () { /// Proof: `Broker::Configuration` (`max_values`: Some(1), `max_size`: Some(31), added: 526, mode: `MaxEncodedLen`) /// Storage: `Broker::CoreCountInbox` (r:1 w:0) /// Proof: `Broker::CoreCountInbox` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `Broker::RevenueInbox` (r:1 w:0) - /// Proof: `Broker::RevenueInbox` (`max_values`: Some(1), `max_size`: Some(20), added: 515, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0xf308d869daf021a7724e69c557dd8dbe` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xf308d869daf021a7724e69c557dd8dbe` (r:1 w:1) fn do_tick_base() -> Weight { // Proof Size summary in bytes: - // Measured: `441` - // Estimated: `1516` - // Minimum execution time: 17_083_000 picoseconds. - Weight::from_parts(18_077_000, 1516) + // Measured: `603` + // Estimated: `4068` + // Minimum execution time: 8_897_000 picoseconds. + Weight::from_parts(9_218_000, 4068) .saturating_add(RocksDbWeight::get().reads(4_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - /// Storage: `Broker::SaleInfo` (r:1 w:0) - /// Proof: `Broker::SaleInfo` (`max_values`: Some(1), `max_size`: Some(57), added: 552, mode: `MaxEncodedLen`) - /// Storage: `Broker::Reservations` (r:1 w:1) - /// Proof: `Broker::Reservations` (`max_values`: Some(1), `max_size`: Some(6011), added: 6506, mode: `MaxEncodedLen`) - /// Storage: `Broker::Status` (r:1 w:0) - /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) - /// Storage: `Broker::Workplan` (r:0 w:2) - /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) - fn force_reserve() -> Weight { - // Proof Size summary in bytes: - // Measured: `5253` - // Estimated: `7496` - // Minimum execution time: 28_363_000 picoseconds. - Weight::from_parts(29_243_000, 7496) - .saturating_add(RocksDbWeight::get().reads(3_u64)) - .saturating_add(RocksDbWeight::get().writes(3_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) } /// Storage: `Broker::Leases` (r:1 w:1) /// Proof: `Broker::Leases` (`max_values`: Some(1), `max_size`: Some(41), added: 536, mode: `MaxEncodedLen`) @@ -985,11 +930,18 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `239` // Estimated: `1526` - // Minimum execution time: 11_620_000 picoseconds. - Weight::from_parts(12_063_000, 1526) + // Minimum execution time: 4_678_000 picoseconds. + Weight::from_parts(4_920_000, 1526) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } + fn on_new_timeslice() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 229_000 picoseconds. + Weight::from_parts(268_000, 0) + } /// Storage: `Broker::SaleInfo` (r:1 w:1) /// Proof: `Broker::SaleInfo` (`max_values`: Some(1), `max_size`: Some(57), added: 552, mode: `MaxEncodedLen`) /// Storage: `Broker::PotentialRenewals` (r:1 w:2) @@ -1000,35 +952,32 @@ impl WeightInfo for () { /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Authorship::Author` (r:1 w:0) + /// Proof: `Authorship::Author` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `System::Digest` (r:1 w:0) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Broker::AutoRenewals` (r:1 w:1) /// Proof: `Broker::AutoRenewals` (`max_values`: Some(1), `max_size`: Some(101), added: 596, mode: `MaxEncodedLen`) /// Storage: `Broker::Workplan` (r:0 w:1) /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) fn enable_auto_renew() -> Weight { // Proof Size summary in bytes: - // Measured: `1121` + // Measured: `930` // Estimated: `4698` - // Minimum execution time: 85_270_000 picoseconds. - Weight::from_parts(90_457_000, 4698) - .saturating_add(RocksDbWeight::get().reads(6_u64)) + // Minimum execution time: 51_597_000 picoseconds. + Weight::from_parts(52_609_000, 4698) + .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } /// Storage: `Broker::AutoRenewals` (r:1 w:1) /// Proof: `Broker::AutoRenewals` (`max_values`: Some(1), `max_size`: Some(101), added: 596, mode: `MaxEncodedLen`) fn disable_auto_renew() -> Weight { // Proof Size summary in bytes: - // Measured: `578` + // Measured: `484` // Estimated: `1586` - // Minimum execution time: 22_479_000 picoseconds. - Weight::from_parts(23_687_000, 1586) + // Minimum execution time: 8_907_000 picoseconds. + Weight::from_parts(9_167_000, 1586) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - fn on_new_timeslice() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 245_000 picoseconds. - Weight::from_parts(290_000, 0) - } -} +} \ No newline at end of file diff --git a/substrate/frame/child-bounties/Cargo.toml b/substrate/frame/child-bounties/Cargo.toml index b7d9d245892a..a250886b5e3d 100644 --- a/substrate/frame/child-bounties/Cargo.toml +++ b/substrate/frame/child-bounties/Cargo.toml @@ -19,13 +19,13 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { features = [ "derive", ], workspace = true } +log = { workspace = true } +scale-info = { features = ["derive"], workspace = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } -log = { workspace = true } pallet-bounties = { workspace = true } pallet-treasury = { workspace = true } -scale-info = { features = ["derive"], workspace = true } sp-core = { workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } diff --git a/substrate/frame/child-bounties/src/benchmarking.rs b/substrate/frame/child-bounties/src/benchmarking.rs index 2864f3ab5048..67074f90cbf6 100644 --- a/substrate/frame/child-bounties/src/benchmarking.rs +++ b/substrate/frame/child-bounties/src/benchmarking.rs @@ -19,15 +19,17 @@ #![cfg(feature = "runtime-benchmarks")] -use alloc::vec; -use frame_benchmarking::{v2::*, BenchmarkError}; -use frame_support::ensure; -use frame_system::RawOrigin; -use pallet_bounties::Pallet as Bounties; -use pallet_treasury::Pallet as Treasury; +use super::*; + +use alloc::{vec, vec::Vec}; + +use frame_benchmarking::v1::{account, benchmarks, whitelisted_caller, BenchmarkError}; +use frame_system::{pallet_prelude::BlockNumberFor, RawOrigin}; use sp_runtime::traits::BlockNumberProvider; -use crate::*; +use crate::Pallet as ChildBounties; +use pallet_bounties::Pallet as Bounties; +use pallet_treasury::Pallet as Treasury; const SEED: u32 = 0; @@ -142,7 +144,7 @@ fn activate_child_bounty( let mut bounty_setup = activate_bounty::(user, description)?; let child_curator_lookup = T::Lookup::unlookup(bounty_setup.child_curator.clone()); - Pallet::::add_child_bounty( + ChildBounties::::add_child_bounty( RawOrigin::Signed(bounty_setup.curator.clone()).into(), bounty_setup.bounty_id, bounty_setup.child_bounty_value, @@ -151,7 +153,7 @@ fn activate_child_bounty( bounty_setup.child_bounty_id = ParentTotalChildBounties::::get(bounty_setup.bounty_id) - 1; - Pallet::::propose_curator( + ChildBounties::::propose_curator( RawOrigin::Signed(bounty_setup.curator.clone()).into(), bounty_setup.bounty_id, bounty_setup.child_bounty_id, @@ -159,7 +161,7 @@ fn activate_child_bounty( bounty_setup.child_bounty_fee, )?; - Pallet::::accept_curator( + ChildBounties::::accept_curator( RawOrigin::Signed(bounty_setup.child_curator.clone()).into(), bounty_setup.bounty_id, bounty_setup.child_bounty_id, @@ -178,43 +180,26 @@ fn assert_last_event(generic_event: ::RuntimeEvent) { frame_system::Pallet::::assert_last_event(generic_event.into()); } -#[benchmarks] -mod benchmarks { - use super::*; - - #[benchmark] - fn add_child_bounty( - d: Linear<0, { T::MaximumReasonLength::get() }>, - ) -> Result<(), BenchmarkError> { +benchmarks! { + add_child_bounty { + let d in 0 .. T::MaximumReasonLength::get(); setup_pot_account::(); let bounty_setup = activate_bounty::(0, d)?; - - #[extrinsic_call] - _( - RawOrigin::Signed(bounty_setup.curator), - bounty_setup.bounty_id, - bounty_setup.child_bounty_value, - bounty_setup.reason.clone(), - ); - - assert_last_event::( - Event::Added { - index: bounty_setup.bounty_id, - child_index: bounty_setup.child_bounty_id, - } - .into(), - ); - - Ok(()) + }: _(RawOrigin::Signed(bounty_setup.curator), bounty_setup.bounty_id, + bounty_setup.child_bounty_value, bounty_setup.reason.clone()) + verify { + assert_last_event::(Event::Added { + index: bounty_setup.bounty_id, + child_index: bounty_setup.child_bounty_id, + }.into()) } - #[benchmark] - fn propose_curator() -> Result<(), BenchmarkError> { + propose_curator { setup_pot_account::(); let bounty_setup = activate_bounty::(0, T::MaximumReasonLength::get())?; let child_curator_lookup = T::Lookup::unlookup(bounty_setup.child_curator.clone()); - Pallet::::add_child_bounty( + ChildBounties::::add_child_bounty( RawOrigin::Signed(bounty_setup.curator.clone()).into(), bounty_setup.bounty_id, bounty_setup.child_bounty_value, @@ -222,183 +207,118 @@ mod benchmarks { )?; let child_bounty_id = ParentTotalChildBounties::::get(bounty_setup.bounty_id) - 1; - #[extrinsic_call] - _( - RawOrigin::Signed(bounty_setup.curator), - bounty_setup.bounty_id, - child_bounty_id, - child_curator_lookup, - bounty_setup.child_bounty_fee, - ); - - Ok(()) - } + }: _(RawOrigin::Signed(bounty_setup.curator), bounty_setup.bounty_id, + child_bounty_id, child_curator_lookup, bounty_setup.child_bounty_fee) - #[benchmark] - fn accept_curator() -> Result<(), BenchmarkError> { + accept_curator { setup_pot_account::(); let mut bounty_setup = activate_bounty::(0, T::MaximumReasonLength::get())?; let child_curator_lookup = T::Lookup::unlookup(bounty_setup.child_curator.clone()); - Pallet::::add_child_bounty( + ChildBounties::::add_child_bounty( RawOrigin::Signed(bounty_setup.curator.clone()).into(), bounty_setup.bounty_id, bounty_setup.child_bounty_value, bounty_setup.reason.clone(), )?; - bounty_setup.child_bounty_id = - ParentTotalChildBounties::::get(bounty_setup.bounty_id) - 1; + bounty_setup.child_bounty_id = ParentTotalChildBounties::::get(bounty_setup.bounty_id) - 1; - Pallet::::propose_curator( + ChildBounties::::propose_curator( RawOrigin::Signed(bounty_setup.curator.clone()).into(), bounty_setup.bounty_id, bounty_setup.child_bounty_id, child_curator_lookup, bounty_setup.child_bounty_fee, )?; - - #[extrinsic_call] - _( - RawOrigin::Signed(bounty_setup.child_curator), - bounty_setup.bounty_id, - bounty_setup.child_bounty_id, - ); - - Ok(()) - } + }: _(RawOrigin::Signed(bounty_setup.child_curator), bounty_setup.bounty_id, + bounty_setup.child_bounty_id) // Worst case when curator is inactive and any sender un-assigns the curator. - #[benchmark] - fn unassign_curator() -> Result<(), BenchmarkError> { + unassign_curator { setup_pot_account::(); let bounty_setup = activate_child_bounty::(0, T::MaximumReasonLength::get())?; Treasury::::on_initialize(frame_system::Pallet::::block_number()); set_block_number::(T::SpendPeriod::get() + T::BountyUpdatePeriod::get() + 1u32.into()); let caller = whitelisted_caller(); + }: _(RawOrigin::Signed(caller), bounty_setup.bounty_id, + bounty_setup.child_bounty_id) - #[extrinsic_call] - _(RawOrigin::Signed(caller), bounty_setup.bounty_id, bounty_setup.child_bounty_id); - - Ok(()) - } - - #[benchmark] - fn award_child_bounty() -> Result<(), BenchmarkError> { + award_child_bounty { setup_pot_account::(); let bounty_setup = activate_child_bounty::(0, T::MaximumReasonLength::get())?; - let beneficiary_account = account::("beneficiary", 0, SEED); + let beneficiary_account: T::AccountId = account("beneficiary", 0, SEED); let beneficiary = T::Lookup::unlookup(beneficiary_account.clone()); - - #[extrinsic_call] - _( - RawOrigin::Signed(bounty_setup.child_curator), - bounty_setup.bounty_id, - bounty_setup.child_bounty_id, - beneficiary, - ); - - assert_last_event::( - Event::Awarded { - index: bounty_setup.bounty_id, - child_index: bounty_setup.child_bounty_id, - beneficiary: beneficiary_account, - } - .into(), - ); - - Ok(()) + }: _(RawOrigin::Signed(bounty_setup.child_curator), bounty_setup.bounty_id, + bounty_setup.child_bounty_id, beneficiary) + verify { + assert_last_event::(Event::Awarded { + index: bounty_setup.bounty_id, + child_index: bounty_setup.child_bounty_id, + beneficiary: beneficiary_account + }.into()) } - #[benchmark] - fn claim_child_bounty() -> Result<(), BenchmarkError> { + claim_child_bounty { setup_pot_account::(); let bounty_setup = activate_child_bounty::(0, T::MaximumReasonLength::get())?; - let beneficiary_account = account("beneficiary", 0, SEED); + let beneficiary_account: T::AccountId = account("beneficiary", 0, SEED); let beneficiary = T::Lookup::unlookup(beneficiary_account); - Pallet::::award_child_bounty( + ChildBounties::::award_child_bounty( RawOrigin::Signed(bounty_setup.child_curator.clone()).into(), bounty_setup.bounty_id, bounty_setup.child_bounty_id, - beneficiary, + beneficiary )?; - let beneficiary_account = account("beneficiary", 0, SEED); + let beneficiary_account: T::AccountId = account("beneficiary", 0, SEED); + let beneficiary = T::Lookup::unlookup(beneficiary_account.clone()); set_block_number::(T::SpendPeriod::get() + T::BountyDepositPayoutDelay::get()); - ensure!( - T::Currency::free_balance(&beneficiary_account).is_zero(), - "Beneficiary already has balance." - ); - - #[extrinsic_call] - _( - RawOrigin::Signed(bounty_setup.curator), - bounty_setup.bounty_id, - bounty_setup.child_bounty_id, - ); - - ensure!( - !T::Currency::free_balance(&beneficiary_account).is_zero(), - "Beneficiary didn't get paid." - ); - - Ok(()) + ensure!(T::Currency::free_balance(&beneficiary_account).is_zero(), + "Beneficiary already has balance."); + + }: _(RawOrigin::Signed(bounty_setup.curator), bounty_setup.bounty_id, + bounty_setup.child_bounty_id) + verify { + ensure!(!T::Currency::free_balance(&beneficiary_account).is_zero(), + "Beneficiary didn't get paid."); } // Best case scenario. - #[benchmark] - fn close_child_bounty_added() -> Result<(), BenchmarkError> { + close_child_bounty_added { setup_pot_account::(); let mut bounty_setup = activate_bounty::(0, T::MaximumReasonLength::get())?; - Pallet::::add_child_bounty( + ChildBounties::::add_child_bounty( RawOrigin::Signed(bounty_setup.curator.clone()).into(), bounty_setup.bounty_id, bounty_setup.child_bounty_value, bounty_setup.reason.clone(), )?; - bounty_setup.child_bounty_id = - ParentTotalChildBounties::::get(bounty_setup.bounty_id) - 1; - - #[extrinsic_call] - close_child_bounty(RawOrigin::Root, bounty_setup.bounty_id, bounty_setup.child_bounty_id); - - assert_last_event::( - Event::Canceled { - index: bounty_setup.bounty_id, - child_index: bounty_setup.child_bounty_id, - } - .into(), - ); - - Ok(()) + bounty_setup.child_bounty_id = ParentTotalChildBounties::::get(bounty_setup.bounty_id) - 1; + + }: close_child_bounty(RawOrigin::Root, bounty_setup.bounty_id, + bounty_setup.child_bounty_id) + verify { + assert_last_event::(Event::Canceled { + index: bounty_setup.bounty_id, + child_index: bounty_setup.child_bounty_id + }.into()) } // Worst case scenario. - #[benchmark] - fn close_child_bounty_active() -> Result<(), BenchmarkError> { + close_child_bounty_active { setup_pot_account::(); let bounty_setup = activate_child_bounty::(0, T::MaximumReasonLength::get())?; Treasury::::on_initialize(frame_system::Pallet::::block_number()); - - #[extrinsic_call] - close_child_bounty(RawOrigin::Root, bounty_setup.bounty_id, bounty_setup.child_bounty_id); - - assert_last_event::( - Event::Canceled { - index: bounty_setup.bounty_id, - child_index: bounty_setup.child_bounty_id, - } - .into(), - ); - - Ok(()) + }: close_child_bounty(RawOrigin::Root, bounty_setup.bounty_id, bounty_setup.child_bounty_id) + verify { + assert_last_event::(Event::Canceled { + index: bounty_setup.bounty_id, + child_index: bounty_setup.child_bounty_id, + }.into()) } - impl_benchmark_test_suite! { - Pallet, - tests::new_test_ext(), - tests::Test - } + impl_benchmark_test_suite!(ChildBounties, crate::tests::new_test_ext(), crate::tests::Test) } diff --git a/substrate/frame/child-bounties/src/lib.rs b/substrate/frame/child-bounties/src/lib.rs index 9fca26510989..ea1d9547d465 100644 --- a/substrate/frame/child-bounties/src/lib.rs +++ b/substrate/frame/child-bounties/src/lib.rs @@ -79,9 +79,7 @@ use sp_runtime::{ }; use frame_support::pallet_prelude::*; -use frame_system::pallet_prelude::{ - ensure_signed, BlockNumberFor as SystemBlockNumberFor, OriginFor, -}; +use frame_system::pallet_prelude::*; use pallet_bounties::BountyStatus; use scale_info::TypeInfo; pub use weights::WeightInfo; @@ -92,8 +90,6 @@ type BalanceOf = pallet_treasury::BalanceOf; type BountiesError = pallet_bounties::Error; type BountyIndex = pallet_bounties::BountyIndex; type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; -type BlockNumberFor = - <::BlockNumberProvider as BlockNumberProvider>::BlockNumber; /// A child bounty proposal. #[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)] @@ -814,7 +810,7 @@ pub mod pallet { } #[pallet::hooks] - impl Hooks> for Pallet { + impl Hooks> for Pallet { fn integrity_test() { let parent_bounty_id: BountyIndex = 1; let child_bounty_id: BountyIndex = 2; diff --git a/substrate/frame/child-bounties/src/weights.rs b/substrate/frame/child-bounties/src/weights.rs index 61bb5bca7a78..1c0583d58e02 100644 --- a/substrate/frame/child-bounties/src/weights.rs +++ b/substrate/frame/child-bounties/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_child_bounties` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-04-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -70,21 +70,19 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:2 w:2) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `ChildBounties::ParentTotalChildBounties` (r:1 w:1) - /// Proof: `ChildBounties::ParentTotalChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) - /// Storage: `ChildBounties::ChildBountyDescriptionsV1` (r:0 w:1) - /// Proof: `ChildBounties::ChildBountyDescriptionsV1` (`max_values`: None, `max_size`: Some(326), added: 2801, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBountyCount` (r:1 w:1) + /// Proof: `ChildBounties::ChildBountyCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBountyDescriptions` (r:0 w:1) + /// Proof: `ChildBounties::ChildBountyDescriptions` (`max_values`: None, `max_size`: Some(314), added: 2789, mode: `MaxEncodedLen`) /// Storage: `ChildBounties::ChildBounties` (r:0 w:1) /// Proof: `ChildBounties::ChildBounties` (`max_values`: None, `max_size`: Some(145), added: 2620, mode: `MaxEncodedLen`) /// The range of component `d` is `[0, 300]`. - fn add_child_bounty(d: u32, ) -> Weight { + fn add_child_bounty(_d: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `812` + // Measured: `745` // Estimated: `6196` - // Minimum execution time: 71_601_000 picoseconds. - Weight::from_parts(74_162_244, 6196) - // Standard Error: 328 - .saturating_add(Weight::from_parts(1_528, 0).saturating_mul(d.into())) + // Minimum execution time: 65_654_000 picoseconds. + Weight::from_parts(68_255_084, 6196) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -96,10 +94,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `ChildBounties::ChildrenCuratorFees` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) fn propose_curator() -> Weight { // Proof Size summary in bytes: - // Measured: `842` + // Measured: `799` // Estimated: `3642` - // Minimum execution time: 24_835_000 picoseconds. - Weight::from_parts(26_049_000, 3642) + // Minimum execution time: 18_534_000 picoseconds. + Weight::from_parts(19_332_000, 3642) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -111,10 +109,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn accept_curator() -> Weight { // Proof Size summary in bytes: - // Measured: `1048` + // Measured: `945` // Estimated: `3642` - // Minimum execution time: 40_409_000 picoseconds. - Weight::from_parts(41_432_000, 3642) + // Minimum execution time: 33_212_000 picoseconds. + Weight::from_parts(35_407_000, 3642) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -126,10 +124,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn unassign_curator() -> Weight { // Proof Size summary in bytes: - // Measured: `1048` + // Measured: `945` // Estimated: `3642` - // Minimum execution time: 49_747_000 picoseconds. - Weight::from_parts(51_222_000, 3642) + // Minimum execution time: 35_510_000 picoseconds. + Weight::from_parts(36_345_000, 3642) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -139,10 +137,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `ChildBounties::ChildBounties` (`max_values`: None, `max_size`: Some(145), added: 2620, mode: `MaxEncodedLen`) fn award_child_bounty() -> Weight { // Proof Size summary in bytes: - // Measured: `908` + // Measured: `842` // Estimated: `3642` - // Minimum execution time: 26_462_000 picoseconds. - Weight::from_parts(27_166_000, 3642) + // Minimum execution time: 19_085_000 picoseconds. + Weight::from_parts(20_094_000, 3642) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -152,14 +150,14 @@ impl WeightInfo for SubstrateWeight { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `ChildBounties::ParentChildBounties` (r:1 w:1) /// Proof: `ChildBounties::ParentChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) - /// Storage: `ChildBounties::ChildBountyDescriptionsV1` (r:0 w:1) - /// Proof: `ChildBounties::ChildBountyDescriptionsV1` (`max_values`: None, `max_size`: Some(326), added: 2801, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBountyDescriptions` (r:0 w:1) + /// Proof: `ChildBounties::ChildBountyDescriptions` (`max_values`: None, `max_size`: Some(314), added: 2789, mode: `MaxEncodedLen`) fn claim_child_bounty() -> Weight { // Proof Size summary in bytes: - // Measured: `752` + // Measured: `682` // Estimated: `8799` - // Minimum execution time: 110_207_000 picoseconds. - Weight::from_parts(111_918_000, 8799) + // Minimum execution time: 110_529_000 picoseconds. + Weight::from_parts(112_660_000, 8799) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -173,14 +171,14 @@ impl WeightInfo for SubstrateWeight { /// Proof: `ChildBounties::ParentChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:2 w:2) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `ChildBounties::ChildBountyDescriptionsV1` (r:0 w:1) - /// Proof: `ChildBounties::ChildBountyDescriptionsV1` (`max_values`: None, `max_size`: Some(326), added: 2801, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBountyDescriptions` (r:0 w:1) + /// Proof: `ChildBounties::ChildBountyDescriptions` (`max_values`: None, `max_size`: Some(314), added: 2789, mode: `MaxEncodedLen`) fn close_child_bounty_added() -> Weight { // Proof Size summary in bytes: - // Measured: `1122` + // Measured: `1045` // Estimated: `6196` - // Minimum execution time: 78_217_000 picoseconds. - Weight::from_parts(79_799_000, 6196) + // Minimum execution time: 76_363_000 picoseconds. + Weight::from_parts(77_799_000, 6196) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -194,14 +192,14 @@ impl WeightInfo for SubstrateWeight { /// Proof: `ChildBounties::ChildrenCuratorFees` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) /// Storage: `ChildBounties::ParentChildBounties` (r:1 w:1) /// Proof: `ChildBounties::ParentChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) - /// Storage: `ChildBounties::ChildBountyDescriptionsV1` (r:0 w:1) - /// Proof: `ChildBounties::ChildBountyDescriptionsV1` (`max_values`: None, `max_size`: Some(326), added: 2801, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBountyDescriptions` (r:0 w:1) + /// Proof: `ChildBounties::ChildBountyDescriptions` (`max_values`: None, `max_size`: Some(314), added: 2789, mode: `MaxEncodedLen`) fn close_child_bounty_active() -> Weight { // Proof Size summary in bytes: - // Measured: `1343` + // Measured: `1232` // Estimated: `8799` - // Minimum execution time: 93_624_000 picoseconds. - Weight::from_parts(96_697_000, 8799) + // Minimum execution time: 89_977_000 picoseconds. + Weight::from_parts(92_978_000, 8799) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } @@ -215,21 +213,19 @@ impl WeightInfo for () { /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:2 w:2) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `ChildBounties::ParentTotalChildBounties` (r:1 w:1) - /// Proof: `ChildBounties::ParentTotalChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) - /// Storage: `ChildBounties::ChildBountyDescriptionsV1` (r:0 w:1) - /// Proof: `ChildBounties::ChildBountyDescriptionsV1` (`max_values`: None, `max_size`: Some(326), added: 2801, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBountyCount` (r:1 w:1) + /// Proof: `ChildBounties::ChildBountyCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBountyDescriptions` (r:0 w:1) + /// Proof: `ChildBounties::ChildBountyDescriptions` (`max_values`: None, `max_size`: Some(314), added: 2789, mode: `MaxEncodedLen`) /// Storage: `ChildBounties::ChildBounties` (r:0 w:1) /// Proof: `ChildBounties::ChildBounties` (`max_values`: None, `max_size`: Some(145), added: 2620, mode: `MaxEncodedLen`) /// The range of component `d` is `[0, 300]`. - fn add_child_bounty(d: u32, ) -> Weight { + fn add_child_bounty(_d: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `812` + // Measured: `745` // Estimated: `6196` - // Minimum execution time: 71_601_000 picoseconds. - Weight::from_parts(74_162_244, 6196) - // Standard Error: 328 - .saturating_add(Weight::from_parts(1_528, 0).saturating_mul(d.into())) + // Minimum execution time: 65_654_000 picoseconds. + Weight::from_parts(68_255_084, 6196) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -241,10 +237,10 @@ impl WeightInfo for () { /// Proof: `ChildBounties::ChildrenCuratorFees` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) fn propose_curator() -> Weight { // Proof Size summary in bytes: - // Measured: `842` + // Measured: `799` // Estimated: `3642` - // Minimum execution time: 24_835_000 picoseconds. - Weight::from_parts(26_049_000, 3642) + // Minimum execution time: 18_534_000 picoseconds. + Weight::from_parts(19_332_000, 3642) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -256,10 +252,10 @@ impl WeightInfo for () { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn accept_curator() -> Weight { // Proof Size summary in bytes: - // Measured: `1048` + // Measured: `945` // Estimated: `3642` - // Minimum execution time: 40_409_000 picoseconds. - Weight::from_parts(41_432_000, 3642) + // Minimum execution time: 33_212_000 picoseconds. + Weight::from_parts(35_407_000, 3642) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -271,10 +267,10 @@ impl WeightInfo for () { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn unassign_curator() -> Weight { // Proof Size summary in bytes: - // Measured: `1048` + // Measured: `945` // Estimated: `3642` - // Minimum execution time: 49_747_000 picoseconds. - Weight::from_parts(51_222_000, 3642) + // Minimum execution time: 35_510_000 picoseconds. + Weight::from_parts(36_345_000, 3642) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -284,10 +280,10 @@ impl WeightInfo for () { /// Proof: `ChildBounties::ChildBounties` (`max_values`: None, `max_size`: Some(145), added: 2620, mode: `MaxEncodedLen`) fn award_child_bounty() -> Weight { // Proof Size summary in bytes: - // Measured: `908` + // Measured: `842` // Estimated: `3642` - // Minimum execution time: 26_462_000 picoseconds. - Weight::from_parts(27_166_000, 3642) + // Minimum execution time: 19_085_000 picoseconds. + Weight::from_parts(20_094_000, 3642) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -297,14 +293,14 @@ impl WeightInfo for () { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `ChildBounties::ParentChildBounties` (r:1 w:1) /// Proof: `ChildBounties::ParentChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) - /// Storage: `ChildBounties::ChildBountyDescriptionsV1` (r:0 w:1) - /// Proof: `ChildBounties::ChildBountyDescriptionsV1` (`max_values`: None, `max_size`: Some(326), added: 2801, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBountyDescriptions` (r:0 w:1) + /// Proof: `ChildBounties::ChildBountyDescriptions` (`max_values`: None, `max_size`: Some(314), added: 2789, mode: `MaxEncodedLen`) fn claim_child_bounty() -> Weight { // Proof Size summary in bytes: - // Measured: `752` + // Measured: `682` // Estimated: `8799` - // Minimum execution time: 110_207_000 picoseconds. - Weight::from_parts(111_918_000, 8799) + // Minimum execution time: 110_529_000 picoseconds. + Weight::from_parts(112_660_000, 8799) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -318,14 +314,14 @@ impl WeightInfo for () { /// Proof: `ChildBounties::ParentChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:2 w:2) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `ChildBounties::ChildBountyDescriptionsV1` (r:0 w:1) - /// Proof: `ChildBounties::ChildBountyDescriptionsV1` (`max_values`: None, `max_size`: Some(326), added: 2801, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBountyDescriptions` (r:0 w:1) + /// Proof: `ChildBounties::ChildBountyDescriptions` (`max_values`: None, `max_size`: Some(314), added: 2789, mode: `MaxEncodedLen`) fn close_child_bounty_added() -> Weight { // Proof Size summary in bytes: - // Measured: `1122` + // Measured: `1045` // Estimated: `6196` - // Minimum execution time: 78_217_000 picoseconds. - Weight::from_parts(79_799_000, 6196) + // Minimum execution time: 76_363_000 picoseconds. + Weight::from_parts(77_799_000, 6196) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -339,14 +335,14 @@ impl WeightInfo for () { /// Proof: `ChildBounties::ChildrenCuratorFees` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) /// Storage: `ChildBounties::ParentChildBounties` (r:1 w:1) /// Proof: `ChildBounties::ParentChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) - /// Storage: `ChildBounties::ChildBountyDescriptionsV1` (r:0 w:1) - /// Proof: `ChildBounties::ChildBountyDescriptionsV1` (`max_values`: None, `max_size`: Some(326), added: 2801, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBountyDescriptions` (r:0 w:1) + /// Proof: `ChildBounties::ChildBountyDescriptions` (`max_values`: None, `max_size`: Some(314), added: 2789, mode: `MaxEncodedLen`) fn close_child_bounty_active() -> Weight { // Proof Size summary in bytes: - // Measured: `1343` + // Measured: `1232` // Estimated: `8799` - // Minimum execution time: 93_624_000 picoseconds. - Weight::from_parts(96_697_000, 8799) + // Minimum execution time: 89_977_000 picoseconds. + Weight::from_parts(92_978_000, 8799) .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } diff --git a/substrate/frame/collective/Cargo.toml b/substrate/frame/collective/Cargo.toml index 8e53000352ae..59a9d23f7b19 100644 --- a/substrate/frame/collective/Cargo.toml +++ b/substrate/frame/collective/Cargo.toml @@ -18,11 +18,11 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { features = ["derive"], workspace = true } docify = { workspace = true } +log = { workspace = true } +scale-info = { features = ["derive"], workspace = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { features = ["experimental"], workspace = true } frame-system = { workspace = true } -log = { workspace = true } -scale-info = { features = ["derive"], workspace = true } sp-core = { workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } diff --git a/substrate/frame/collective/src/weights.rs b/substrate/frame/collective/src/weights.rs index 4d47d2fe9ead..1a7485b4ab7b 100644 --- a/substrate/frame/collective/src/weights.rs +++ b/substrate/frame/collective/src/weights.rs @@ -18,27 +18,25 @@ //! Autogenerated weights for `pallet_collective` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-09-02, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-svzsllib-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate-node +// target/production/substrate-node // benchmark // pallet -// --chain=dev // --steps=50 // --repeat=20 -// --pallet=pallet_collective -// --no-storage-info -// --no-median-slopes -// --no-min-squares // --extrinsic=* // --wasm-execution=compiled // --heap-pages=4096 -// --output=./substrate/frame/collective/src/weights.rs +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_collective +// --chain=dev // --header=./substrate/HEADER-APACHE2 +// --output=./substrate/frame/collective/src/weights.rs // --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -82,13 +80,13 @@ impl WeightInfo for SubstrateWeight { fn set_members(m: u32, _n: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0 + m * (3232 ±0) + p * (3190 ±0)` - // Estimated: `15927 + m * (1967 ±24) + p * (4332 ±24)` - // Minimum execution time: 16_292_000 picoseconds. - Weight::from_parts(16_707_000, 15927) - // Standard Error: 65_976 - .saturating_add(Weight::from_parts(4_766_715, 0).saturating_mul(m.into())) - // Standard Error: 65_976 - .saturating_add(Weight::from_parts(9_280_562, 0).saturating_mul(p.into())) + // Estimated: `15894 + m * (1967 ±23) + p * (4332 ±23)` + // Minimum execution time: 16_699_000 picoseconds. + Weight::from_parts(17_015_000, 15894) + // Standard Error: 63_844 + .saturating_add(Weight::from_parts(4_593_256, 0).saturating_mul(m.into())) + // Standard Error: 63_844 + .saturating_add(Weight::from_parts(8_935_845, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(p.into()))) .saturating_add(T::DbWeight::get().writes(2_u64)) @@ -106,14 +104,14 @@ impl WeightInfo for SubstrateWeight { /// The range of component `m` is `[1, 100]`. fn execute(b: u32, m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `413 + m * (32 ±0)` + // Measured: `380 + m * (32 ±0)` // Estimated: `3997 + m * (32 ±0)` - // Minimum execution time: 24_281_000 picoseconds. - Weight::from_parts(23_568_200, 3997) - // Standard Error: 47 - .saturating_add(Weight::from_parts(1_681, 0).saturating_mul(b.into())) - // Standard Error: 492 - .saturating_add(Weight::from_parts(15_851, 0).saturating_mul(m.into())) + // Minimum execution time: 22_010_000 picoseconds. + Weight::from_parts(21_392_812, 3997) + // Standard Error: 34 + .saturating_add(Weight::from_parts(1_533, 0).saturating_mul(b.into())) + // Standard Error: 354 + .saturating_add(Weight::from_parts(15_866, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(Weight::from_parts(0, 32).saturating_mul(m.into())) } @@ -129,14 +127,14 @@ impl WeightInfo for SubstrateWeight { /// The range of component `m` is `[1, 100]`. fn propose_execute(b: u32, m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `413 + m * (32 ±0)` + // Measured: `380 + m * (32 ±0)` // Estimated: `3997 + m * (32 ±0)` - // Minimum execution time: 26_424_000 picoseconds. - Weight::from_parts(26_130_784, 3997) - // Standard Error: 56 - .saturating_add(Weight::from_parts(1_577, 0).saturating_mul(b.into())) - // Standard Error: 585 - .saturating_add(Weight::from_parts(20_984, 0).saturating_mul(m.into())) + // Minimum execution time: 24_250_000 picoseconds. + Weight::from_parts(23_545_893, 3997) + // Standard Error: 40 + .saturating_add(Weight::from_parts(1_646, 0).saturating_mul(b.into())) + // Standard Error: 421 + .saturating_add(Weight::from_parts(26_248, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(Weight::from_parts(0, 32).saturating_mul(m.into())) } @@ -147,7 +145,7 @@ impl WeightInfo for SubstrateWeight { /// Storage: `Council::Proposals` (r:1 w:1) /// Proof: `Council::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(337), added: 2812, mode: `MaxEncodedLen`) /// Storage: `Council::ProposalCount` (r:1 w:1) /// Proof: `Council::ProposalCount` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Council::Voting` (r:0 w:1) @@ -159,16 +157,16 @@ impl WeightInfo for SubstrateWeight { /// The range of component `p` is `[1, 100]`. fn propose_proposed(b: u32, m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `651 + m * (32 ±0) + p * (36 ±0)` - // Estimated: `4024 + m * (33 ±0) + p * (36 ±0)` - // Minimum execution time: 47_547_000 picoseconds. - Weight::from_parts(65_808_006, 4024) - // Standard Error: 330 - .saturating_add(Weight::from_parts(4_211, 0).saturating_mul(b.into())) - // Standard Error: 3_443 - .saturating_add(Weight::from_parts(43_705, 0).saturating_mul(m.into())) - // Standard Error: 3_399 - .saturating_add(Weight::from_parts(235_928, 0).saturating_mul(p.into())) + // Measured: `618 + m * (32 ±0) + p * (36 ±0)` + // Estimated: `3991 + m * (33 ±0) + p * (36 ±0)` + // Minimum execution time: 46_538_000 picoseconds. + Weight::from_parts(63_900_448, 3991) + // Standard Error: 350 + .saturating_add(Weight::from_parts(2_827, 0).saturating_mul(b.into())) + // Standard Error: 3_658 + .saturating_add(Weight::from_parts(53_340, 0).saturating_mul(m.into())) + // Standard Error: 3_611 + .saturating_add(Weight::from_parts(213_719, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) .saturating_add(Weight::from_parts(0, 33).saturating_mul(m.into())) @@ -181,12 +179,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `m` is `[5, 100]`. fn vote(m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1044 + m * (64 ±0)` - // Estimated: `4508 + m * (64 ±0)` - // Minimum execution time: 32_388_000 picoseconds. - Weight::from_parts(34_955_946, 4508) - // Standard Error: 2_253 - .saturating_add(Weight::from_parts(34_184, 0).saturating_mul(m.into())) + // Measured: `1011 + m * (64 ±0)` + // Estimated: `4475 + m * (64 ±0)` + // Minimum execution time: 28_413_000 picoseconds. + Weight::from_parts(28_981_832, 4475) + // Standard Error: 665 + .saturating_add(Weight::from_parts(43_005, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) @@ -203,14 +201,14 @@ impl WeightInfo for SubstrateWeight { /// The range of component `p` is `[1, 100]`. fn close_early_disapproved(m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `633 + m * (64 ±0) + p * (36 ±0)` - // Estimated: `4075 + m * (65 ±0) + p * (36 ±0)` - // Minimum execution time: 29_663_000 picoseconds. - Weight::from_parts(33_355_561, 4075) - // Standard Error: 2_045 - .saturating_add(Weight::from_parts(28_190, 0).saturating_mul(m.into())) - // Standard Error: 1_994 - .saturating_add(Weight::from_parts(185_801, 0).saturating_mul(p.into())) + // Measured: `600 + m * (64 ±0) + p * (36 ±0)` + // Estimated: `4042 + m * (65 ±0) + p * (36 ±0)` + // Minimum execution time: 27_725_000 picoseconds. + Weight::from_parts(30_174_093, 4042) + // Standard Error: 1_458 + .saturating_add(Weight::from_parts(41_100, 0).saturating_mul(m.into())) + // Standard Error: 1_422 + .saturating_add(Weight::from_parts(177_303, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 65).saturating_mul(m.into())) @@ -233,16 +231,16 @@ impl WeightInfo for SubstrateWeight { /// The range of component `p` is `[1, 100]`. fn close_early_approved(b: u32, m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1080 + b * (1 ±0) + m * (64 ±0) + p * (40 ±0)` - // Estimated: `4393 + b * (1 ±0) + m * (66 ±0) + p * (40 ±0)` - // Minimum execution time: 46_764_000 picoseconds. - Weight::from_parts(49_084_241, 4393) - // Standard Error: 284 - .saturating_add(Weight::from_parts(3_771, 0).saturating_mul(b.into())) - // Standard Error: 3_003 - .saturating_add(Weight::from_parts(33_189, 0).saturating_mul(m.into())) - // Standard Error: 2_927 - .saturating_add(Weight::from_parts(245_387, 0).saturating_mul(p.into())) + // Measured: `1047 + b * (1 ±0) + m * (64 ±0) + p * (40 ±0)` + // Estimated: `4360 + b * (1 ±0) + m * (66 ±0) + p * (40 ±0)` + // Minimum execution time: 48_882_000 picoseconds. + Weight::from_parts(51_938_773, 4360) + // Standard Error: 208 + .saturating_add(Weight::from_parts(3_559, 0).saturating_mul(b.into())) + // Standard Error: 2_201 + .saturating_add(Weight::from_parts(38_678, 0).saturating_mul(m.into())) + // Standard Error: 2_145 + .saturating_add(Weight::from_parts(214_061, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(b.into())) @@ -263,14 +261,14 @@ impl WeightInfo for SubstrateWeight { /// The range of component `p` is `[1, 100]`. fn close_disapproved(m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `653 + m * (64 ±0) + p * (36 ±0)` - // Estimated: `4095 + m * (65 ±0) + p * (36 ±0)` - // Minimum execution time: 32_188_000 picoseconds. - Weight::from_parts(35_015_624, 4095) - // Standard Error: 2_283 - .saturating_add(Weight::from_parts(39_633, 0).saturating_mul(m.into())) - // Standard Error: 2_226 - .saturating_add(Weight::from_parts(191_898, 0).saturating_mul(p.into())) + // Measured: `620 + m * (64 ±0) + p * (36 ±0)` + // Estimated: `4062 + m * (65 ±0) + p * (36 ±0)` + // Minimum execution time: 30_613_000 picoseconds. + Weight::from_parts(36_174_190, 4062) + // Standard Error: 1_899 + .saturating_add(Weight::from_parts(46_781, 0).saturating_mul(m.into())) + // Standard Error: 1_851 + .saturating_add(Weight::from_parts(185_875, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 65).saturating_mul(m.into())) @@ -295,16 +293,16 @@ impl WeightInfo for SubstrateWeight { /// The range of component `p` is `[1, 100]`. fn close_approved(b: u32, m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1100 + b * (1 ±0) + m * (64 ±0) + p * (40 ±0)` - // Estimated: `4413 + b * (1 ±0) + m * (66 ±0) + p * (40 ±0)` - // Minimum execution time: 49_281_000 picoseconds. - Weight::from_parts(53_838_013, 4413) - // Standard Error: 317 - .saturating_add(Weight::from_parts(4_011, 0).saturating_mul(b.into())) - // Standard Error: 3_353 - .saturating_add(Weight::from_parts(19_609, 0).saturating_mul(m.into())) - // Standard Error: 3_269 - .saturating_add(Weight::from_parts(236_964, 0).saturating_mul(p.into())) + // Measured: `1067 + b * (1 ±0) + m * (64 ±0) + p * (40 ±0)` + // Estimated: `4380 + b * (1 ±0) + m * (66 ±0) + p * (40 ±0)` + // Minimum execution time: 51_253_000 picoseconds. + Weight::from_parts(56_399_941, 4380) + // Standard Error: 218 + .saturating_add(Weight::from_parts(2_920, 0).saturating_mul(b.into())) + // Standard Error: 2_310 + .saturating_add(Weight::from_parts(30_473, 0).saturating_mul(m.into())) + // Standard Error: 2_252 + .saturating_add(Weight::from_parts(208_468, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(b.into())) @@ -320,12 +318,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `p` is `[1, 100]`. fn disapprove_proposal(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `425 + p * (32 ±0)` - // Estimated: `1910 + p * (32 ±0)` - // Minimum execution time: 14_767_000 picoseconds. - Weight::from_parts(16_823_844, 1910) - // Standard Error: 1_424 - .saturating_add(Weight::from_parts(170_583, 0).saturating_mul(p.into())) + // Measured: `392 + p * (32 ±0)` + // Estimated: `1877 + p * (32 ±0)` + // Minimum execution time: 14_646_000 picoseconds. + Weight::from_parts(17_305_497, 1877) + // Standard Error: 1_331 + .saturating_add(Weight::from_parts(156_038, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 32).saturating_mul(p.into())) @@ -337,7 +335,7 @@ impl WeightInfo for SubstrateWeight { /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(337), added: 2812, mode: `MaxEncodedLen`) /// Storage: `Council::Proposals` (r:1 w:1) /// Proof: `Council::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Council::Voting` (r:0 w:1) @@ -346,19 +344,19 @@ impl WeightInfo for SubstrateWeight { /// The range of component `p` is `[1, 100]`. fn kill(d: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1896 + d * (212 ±0) + p * (41 ±0)` - // Estimated: `5205 + d * (1910 ±14) + p * (43 ±0)` - // Minimum execution time: 24_956_000 picoseconds. - Weight::from_parts(25_382_488, 5205) - // Standard Error: 374_961 - .saturating_add(Weight::from_parts(31_856_043, 0).saturating_mul(d.into())) - // Standard Error: 5_806 - .saturating_add(Weight::from_parts(288_259, 0).saturating_mul(p.into())) + // Measured: `1863 + d * (212 ±0) + p * (41 ±0)` + // Estimated: `5172 + d * (1901 ±14) + p * (43 ±0)` + // Minimum execution time: 22_164_000 picoseconds. + Weight::from_parts(24_932_256, 5172) + // Standard Error: 404_014 + .saturating_add(Weight::from_parts(33_833_807, 0).saturating_mul(d.into())) + // Standard Error: 6_256 + .saturating_add(Weight::from_parts(281_910, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(d.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(d.into()))) - .saturating_add(Weight::from_parts(0, 1910).saturating_mul(d.into())) + .saturating_add(Weight::from_parts(0, 1901).saturating_mul(d.into())) .saturating_add(Weight::from_parts(0, 43).saturating_mul(p.into())) } /// Storage: `Council::ProposalOf` (r:1 w:0) @@ -368,13 +366,13 @@ impl WeightInfo for SubstrateWeight { /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(337), added: 2812, mode: `MaxEncodedLen`) fn release_proposal_cost() -> Weight { // Proof Size summary in bytes: - // Measured: `1997` - // Estimated: `5462` - // Minimum execution time: 67_153_000 picoseconds. - Weight::from_parts(70_174_000, 5462) + // Measured: `1964` + // Estimated: `5429` + // Minimum execution time: 69_220_000 picoseconds. + Weight::from_parts(70_215_000, 5429) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -396,13 +394,13 @@ impl WeightInfo for () { fn set_members(m: u32, _n: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0 + m * (3232 ±0) + p * (3190 ±0)` - // Estimated: `15927 + m * (1967 ±24) + p * (4332 ±24)` - // Minimum execution time: 16_292_000 picoseconds. - Weight::from_parts(16_707_000, 15927) - // Standard Error: 65_976 - .saturating_add(Weight::from_parts(4_766_715, 0).saturating_mul(m.into())) - // Standard Error: 65_976 - .saturating_add(Weight::from_parts(9_280_562, 0).saturating_mul(p.into())) + // Estimated: `15894 + m * (1967 ±23) + p * (4332 ±23)` + // Minimum execution time: 16_699_000 picoseconds. + Weight::from_parts(17_015_000, 15894) + // Standard Error: 63_844 + .saturating_add(Weight::from_parts(4_593_256, 0).saturating_mul(m.into())) + // Standard Error: 63_844 + .saturating_add(Weight::from_parts(8_935_845, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(p.into()))) .saturating_add(RocksDbWeight::get().writes(2_u64)) @@ -420,14 +418,14 @@ impl WeightInfo for () { /// The range of component `m` is `[1, 100]`. fn execute(b: u32, m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `413 + m * (32 ±0)` + // Measured: `380 + m * (32 ±0)` // Estimated: `3997 + m * (32 ±0)` - // Minimum execution time: 24_281_000 picoseconds. - Weight::from_parts(23_568_200, 3997) - // Standard Error: 47 - .saturating_add(Weight::from_parts(1_681, 0).saturating_mul(b.into())) - // Standard Error: 492 - .saturating_add(Weight::from_parts(15_851, 0).saturating_mul(m.into())) + // Minimum execution time: 22_010_000 picoseconds. + Weight::from_parts(21_392_812, 3997) + // Standard Error: 34 + .saturating_add(Weight::from_parts(1_533, 0).saturating_mul(b.into())) + // Standard Error: 354 + .saturating_add(Weight::from_parts(15_866, 0).saturating_mul(m.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(Weight::from_parts(0, 32).saturating_mul(m.into())) } @@ -443,14 +441,14 @@ impl WeightInfo for () { /// The range of component `m` is `[1, 100]`. fn propose_execute(b: u32, m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `413 + m * (32 ±0)` + // Measured: `380 + m * (32 ±0)` // Estimated: `3997 + m * (32 ±0)` - // Minimum execution time: 26_424_000 picoseconds. - Weight::from_parts(26_130_784, 3997) - // Standard Error: 56 - .saturating_add(Weight::from_parts(1_577, 0).saturating_mul(b.into())) - // Standard Error: 585 - .saturating_add(Weight::from_parts(20_984, 0).saturating_mul(m.into())) + // Minimum execution time: 24_250_000 picoseconds. + Weight::from_parts(23_545_893, 3997) + // Standard Error: 40 + .saturating_add(Weight::from_parts(1_646, 0).saturating_mul(b.into())) + // Standard Error: 421 + .saturating_add(Weight::from_parts(26_248, 0).saturating_mul(m.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(Weight::from_parts(0, 32).saturating_mul(m.into())) } @@ -461,7 +459,7 @@ impl WeightInfo for () { /// Storage: `Council::Proposals` (r:1 w:1) /// Proof: `Council::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(337), added: 2812, mode: `MaxEncodedLen`) /// Storage: `Council::ProposalCount` (r:1 w:1) /// Proof: `Council::ProposalCount` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Council::Voting` (r:0 w:1) @@ -473,16 +471,16 @@ impl WeightInfo for () { /// The range of component `p` is `[1, 100]`. fn propose_proposed(b: u32, m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `651 + m * (32 ±0) + p * (36 ±0)` - // Estimated: `4024 + m * (33 ±0) + p * (36 ±0)` - // Minimum execution time: 47_547_000 picoseconds. - Weight::from_parts(65_808_006, 4024) - // Standard Error: 330 - .saturating_add(Weight::from_parts(4_211, 0).saturating_mul(b.into())) - // Standard Error: 3_443 - .saturating_add(Weight::from_parts(43_705, 0).saturating_mul(m.into())) - // Standard Error: 3_399 - .saturating_add(Weight::from_parts(235_928, 0).saturating_mul(p.into())) + // Measured: `618 + m * (32 ±0) + p * (36 ±0)` + // Estimated: `3991 + m * (33 ±0) + p * (36 ±0)` + // Minimum execution time: 46_538_000 picoseconds. + Weight::from_parts(63_900_448, 3991) + // Standard Error: 350 + .saturating_add(Weight::from_parts(2_827, 0).saturating_mul(b.into())) + // Standard Error: 3_658 + .saturating_add(Weight::from_parts(53_340, 0).saturating_mul(m.into())) + // Standard Error: 3_611 + .saturating_add(Weight::from_parts(213_719, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) .saturating_add(Weight::from_parts(0, 33).saturating_mul(m.into())) @@ -495,12 +493,12 @@ impl WeightInfo for () { /// The range of component `m` is `[5, 100]`. fn vote(m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1044 + m * (64 ±0)` - // Estimated: `4508 + m * (64 ±0)` - // Minimum execution time: 32_388_000 picoseconds. - Weight::from_parts(34_955_946, 4508) - // Standard Error: 2_253 - .saturating_add(Weight::from_parts(34_184, 0).saturating_mul(m.into())) + // Measured: `1011 + m * (64 ±0)` + // Estimated: `4475 + m * (64 ±0)` + // Minimum execution time: 28_413_000 picoseconds. + Weight::from_parts(28_981_832, 4475) + // Standard Error: 665 + .saturating_add(Weight::from_parts(43_005, 0).saturating_mul(m.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) @@ -517,14 +515,14 @@ impl WeightInfo for () { /// The range of component `p` is `[1, 100]`. fn close_early_disapproved(m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `633 + m * (64 ±0) + p * (36 ±0)` - // Estimated: `4075 + m * (65 ±0) + p * (36 ±0)` - // Minimum execution time: 29_663_000 picoseconds. - Weight::from_parts(33_355_561, 4075) - // Standard Error: 2_045 - .saturating_add(Weight::from_parts(28_190, 0).saturating_mul(m.into())) - // Standard Error: 1_994 - .saturating_add(Weight::from_parts(185_801, 0).saturating_mul(p.into())) + // Measured: `600 + m * (64 ±0) + p * (36 ±0)` + // Estimated: `4042 + m * (65 ±0) + p * (36 ±0)` + // Minimum execution time: 27_725_000 picoseconds. + Weight::from_parts(30_174_093, 4042) + // Standard Error: 1_458 + .saturating_add(Weight::from_parts(41_100, 0).saturating_mul(m.into())) + // Standard Error: 1_422 + .saturating_add(Weight::from_parts(177_303, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 65).saturating_mul(m.into())) @@ -547,16 +545,16 @@ impl WeightInfo for () { /// The range of component `p` is `[1, 100]`. fn close_early_approved(b: u32, m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1080 + b * (1 ±0) + m * (64 ±0) + p * (40 ±0)` - // Estimated: `4393 + b * (1 ±0) + m * (66 ±0) + p * (40 ±0)` - // Minimum execution time: 46_764_000 picoseconds. - Weight::from_parts(49_084_241, 4393) - // Standard Error: 284 - .saturating_add(Weight::from_parts(3_771, 0).saturating_mul(b.into())) - // Standard Error: 3_003 - .saturating_add(Weight::from_parts(33_189, 0).saturating_mul(m.into())) - // Standard Error: 2_927 - .saturating_add(Weight::from_parts(245_387, 0).saturating_mul(p.into())) + // Measured: `1047 + b * (1 ±0) + m * (64 ±0) + p * (40 ±0)` + // Estimated: `4360 + b * (1 ±0) + m * (66 ±0) + p * (40 ±0)` + // Minimum execution time: 48_882_000 picoseconds. + Weight::from_parts(51_938_773, 4360) + // Standard Error: 208 + .saturating_add(Weight::from_parts(3_559, 0).saturating_mul(b.into())) + // Standard Error: 2_201 + .saturating_add(Weight::from_parts(38_678, 0).saturating_mul(m.into())) + // Standard Error: 2_145 + .saturating_add(Weight::from_parts(214_061, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(b.into())) @@ -577,14 +575,14 @@ impl WeightInfo for () { /// The range of component `p` is `[1, 100]`. fn close_disapproved(m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `653 + m * (64 ±0) + p * (36 ±0)` - // Estimated: `4095 + m * (65 ±0) + p * (36 ±0)` - // Minimum execution time: 32_188_000 picoseconds. - Weight::from_parts(35_015_624, 4095) - // Standard Error: 2_283 - .saturating_add(Weight::from_parts(39_633, 0).saturating_mul(m.into())) - // Standard Error: 2_226 - .saturating_add(Weight::from_parts(191_898, 0).saturating_mul(p.into())) + // Measured: `620 + m * (64 ±0) + p * (36 ±0)` + // Estimated: `4062 + m * (65 ±0) + p * (36 ±0)` + // Minimum execution time: 30_613_000 picoseconds. + Weight::from_parts(36_174_190, 4062) + // Standard Error: 1_899 + .saturating_add(Weight::from_parts(46_781, 0).saturating_mul(m.into())) + // Standard Error: 1_851 + .saturating_add(Weight::from_parts(185_875, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 65).saturating_mul(m.into())) @@ -609,16 +607,16 @@ impl WeightInfo for () { /// The range of component `p` is `[1, 100]`. fn close_approved(b: u32, m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1100 + b * (1 ±0) + m * (64 ±0) + p * (40 ±0)` - // Estimated: `4413 + b * (1 ±0) + m * (66 ±0) + p * (40 ±0)` - // Minimum execution time: 49_281_000 picoseconds. - Weight::from_parts(53_838_013, 4413) - // Standard Error: 317 - .saturating_add(Weight::from_parts(4_011, 0).saturating_mul(b.into())) - // Standard Error: 3_353 - .saturating_add(Weight::from_parts(19_609, 0).saturating_mul(m.into())) - // Standard Error: 3_269 - .saturating_add(Weight::from_parts(236_964, 0).saturating_mul(p.into())) + // Measured: `1067 + b * (1 ±0) + m * (64 ±0) + p * (40 ±0)` + // Estimated: `4380 + b * (1 ±0) + m * (66 ±0) + p * (40 ±0)` + // Minimum execution time: 51_253_000 picoseconds. + Weight::from_parts(56_399_941, 4380) + // Standard Error: 218 + .saturating_add(Weight::from_parts(2_920, 0).saturating_mul(b.into())) + // Standard Error: 2_310 + .saturating_add(Weight::from_parts(30_473, 0).saturating_mul(m.into())) + // Standard Error: 2_252 + .saturating_add(Weight::from_parts(208_468, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(b.into())) @@ -634,12 +632,12 @@ impl WeightInfo for () { /// The range of component `p` is `[1, 100]`. fn disapprove_proposal(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `425 + p * (32 ±0)` - // Estimated: `1910 + p * (32 ±0)` - // Minimum execution time: 14_767_000 picoseconds. - Weight::from_parts(16_823_844, 1910) - // Standard Error: 1_424 - .saturating_add(Weight::from_parts(170_583, 0).saturating_mul(p.into())) + // Measured: `392 + p * (32 ±0)` + // Estimated: `1877 + p * (32 ±0)` + // Minimum execution time: 14_646_000 picoseconds. + Weight::from_parts(17_305_497, 1877) + // Standard Error: 1_331 + .saturating_add(Weight::from_parts(156_038, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 32).saturating_mul(p.into())) @@ -651,7 +649,7 @@ impl WeightInfo for () { /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(337), added: 2812, mode: `MaxEncodedLen`) /// Storage: `Council::Proposals` (r:1 w:1) /// Proof: `Council::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Council::Voting` (r:0 w:1) @@ -660,19 +658,19 @@ impl WeightInfo for () { /// The range of component `p` is `[1, 100]`. fn kill(d: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1896 + d * (212 ±0) + p * (41 ±0)` - // Estimated: `5205 + d * (1910 ±14) + p * (43 ±0)` - // Minimum execution time: 24_956_000 picoseconds. - Weight::from_parts(25_382_488, 5205) - // Standard Error: 374_961 - .saturating_add(Weight::from_parts(31_856_043, 0).saturating_mul(d.into())) - // Standard Error: 5_806 - .saturating_add(Weight::from_parts(288_259, 0).saturating_mul(p.into())) + // Measured: `1863 + d * (212 ±0) + p * (41 ±0)` + // Estimated: `5172 + d * (1901 ±14) + p * (43 ±0)` + // Minimum execution time: 22_164_000 picoseconds. + Weight::from_parts(24_932_256, 5172) + // Standard Error: 404_014 + .saturating_add(Weight::from_parts(33_833_807, 0).saturating_mul(d.into())) + // Standard Error: 6_256 + .saturating_add(Weight::from_parts(281_910, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().reads((2_u64).saturating_mul(d.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(RocksDbWeight::get().writes((3_u64).saturating_mul(d.into()))) - .saturating_add(Weight::from_parts(0, 1910).saturating_mul(d.into())) + .saturating_add(Weight::from_parts(0, 1901).saturating_mul(d.into())) .saturating_add(Weight::from_parts(0, 43).saturating_mul(p.into())) } /// Storage: `Council::ProposalOf` (r:1 w:0) @@ -682,13 +680,13 @@ impl WeightInfo for () { /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(337), added: 2812, mode: `MaxEncodedLen`) fn release_proposal_cost() -> Weight { // Proof Size summary in bytes: - // Measured: `1997` - // Estimated: `5462` - // Minimum execution time: 67_153_000 picoseconds. - Weight::from_parts(70_174_000, 5462) + // Measured: `1964` + // Estimated: `5429` + // Minimum execution time: 69_220_000 picoseconds. + Weight::from_parts(70_215_000, 5429) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } diff --git a/substrate/frame/contracts/Cargo.toml b/substrate/frame/contracts/Cargo.toml index e39128639e3e..316ea6813048 100644 --- a/substrate/frame/contracts/Cargo.toml +++ b/substrate/frame/contracts/Cargo.toml @@ -18,25 +18,25 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] +paste = { workspace = true } bitflags = { workspace = true } codec = { features = [ "derive", "max-encoded-len", ], workspace = true } -impl-trait-for-tuples = { workspace = true } -log = { workspace = true } -paste = { workspace = true } scale-info = { features = ["derive"], workspace = true } +log = { workspace = true } serde = { optional = true, features = ["derive"], workspace = true, default-features = true } smallvec = { features = [ "const_generics", ], workspace = true } wasmi = { workspace = true } +impl-trait-for-tuples = { workspace = true } # Only used in benchmarking to generate contract code +wasm-instrument = { optional = true, workspace = true } rand = { optional = true, workspace = true } rand_pcg = { optional = true, workspace = true } -wasm-instrument = { optional = true, workspace = true } # Substrate Dependencies environmental = { workspace = true } @@ -44,8 +44,8 @@ frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } pallet-balances = { optional = true, workspace = true } -pallet-contracts-proc-macro = { workspace = true, default-features = true } pallet-contracts-uapi = { workspace = true, default-features = true } +pallet-contracts-proc-macro = { workspace = true, default-features = true } sp-api = { workspace = true } sp-core = { workspace = true } sp-io = { workspace = true } @@ -58,21 +58,21 @@ xcm-builder = { workspace = true } [dev-dependencies] array-bytes = { workspace = true, default-features = true } assert_matches = { workspace = true } -pallet-contracts-fixtures = { workspace = true } pretty_assertions = { workspace = true } wat = { workspace = true } +pallet-contracts-fixtures = { workspace = true } # Polkadot Dependencies xcm-builder = { workspace = true, default-features = true } # Substrate Dependencies -pallet-assets = { workspace = true, default-features = true } pallet-balances = { workspace = true, default-features = true } -pallet-insecure-randomness-collective-flip = { workspace = true, default-features = true } -pallet-message-queue = { workspace = true, default-features = true } -pallet-proxy = { workspace = true, default-features = true } pallet-timestamp = { workspace = true, default-features = true } +pallet-message-queue = { workspace = true, default-features = true } +pallet-insecure-randomness-collective-flip = { workspace = true, default-features = true } pallet-utility = { workspace = true, default-features = true } +pallet-assets = { workspace = true, default-features = true } +pallet-proxy = { workspace = true, default-features = true } sp-keystore = { workspace = true, default-features = true } sp-tracing = { workspace = true, default-features = true } @@ -119,7 +119,6 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", "wasm-instrument", "xcm-builder/runtime-benchmarks", - "xcm/runtime-benchmarks", ] try-runtime = [ "frame-support/try-runtime", diff --git a/substrate/frame/contracts/fixtures/Cargo.toml b/substrate/frame/contracts/fixtures/Cargo.toml index cf31f9eccc9c..4c01c1f061b7 100644 --- a/substrate/frame/contracts/fixtures/Cargo.toml +++ b/substrate/frame/contracts/fixtures/Cargo.toml @@ -11,13 +11,13 @@ description = "Fixtures for testing contracts pallet." workspace = true [dependencies] -anyhow = { workspace = true, default-features = true } frame-system = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } +anyhow = { workspace = true, default-features = true } [build-dependencies] -anyhow = { workspace = true, default-features = true } parity-wasm = { workspace = true } tempfile = { workspace = true } toml = { workspace = true } twox-hash = { workspace = true, default-features = true } +anyhow = { workspace = true, default-features = true } diff --git a/substrate/frame/contracts/fixtures/build/Cargo.toml b/substrate/frame/contracts/fixtures/build/Cargo.toml index 18e8c2767d5f..ba487a2bb5ca 100644 --- a/substrate/frame/contracts/fixtures/build/Cargo.toml +++ b/substrate/frame/contracts/fixtures/build/Cargo.toml @@ -8,9 +8,9 @@ edition = "2021" # All paths or versions are injected dynamically by the build script. [dependencies] +uapi = { package = 'pallet-contracts-uapi', path = "", default-features = false } common = { package = 'pallet-contracts-fixtures-common', path = "" } polkavm-derive = { version = "" } -uapi = { package = 'pallet-contracts-uapi', path = "", default-features = false } [profile.release] opt-level = 3 diff --git a/substrate/frame/contracts/mock-network/Cargo.toml b/substrate/frame/contracts/mock-network/Cargo.toml index a7423b33abc1..d6e2d51ef452 100644 --- a/substrate/frame/contracts/mock-network/Cargo.toml +++ b/substrate/frame/contracts/mock-network/Cargo.toml @@ -19,8 +19,8 @@ frame-system = { workspace = true } pallet-assets = { workspace = true, default-features = true } pallet-balances = { workspace = true, default-features = true } pallet-contracts = { workspace = true, default-features = true } -pallet-contracts-proc-macro = { workspace = true, default-features = true } pallet-contracts-uapi = { workspace = true } +pallet-contracts-proc-macro = { workspace = true, default-features = true } pallet-insecure-randomness-collective-flip = { workspace = true, default-features = true } pallet-message-queue = { workspace = true, default-features = true } pallet-proxy = { workspace = true, default-features = true } @@ -44,8 +44,8 @@ xcm-simulator = { workspace = true, default-features = true } [dev-dependencies] assert_matches = { workspace = true } -pallet-contracts-fixtures = { workspace = true } pretty_assertions = { workspace = true } +pallet-contracts-fixtures = { workspace = true } [features] default = ["std"] @@ -87,5 +87,4 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", - "xcm/runtime-benchmarks", ] diff --git a/substrate/frame/contracts/src/tests.rs b/substrate/frame/contracts/src/tests.rs index b01d0aa4fa48..c3b6e3273f34 100644 --- a/substrate/frame/contracts/src/tests.rs +++ b/substrate/frame/contracts/src/tests.rs @@ -399,7 +399,6 @@ impl pallet_proxy::Config for Test { type CallHasher = BlakeTwo256; type AnnouncementDepositBase = ConstU64<1>; type AnnouncementDepositFactor = ConstU64<1>; - type BlockNumberProvider = frame_system::Pallet; } impl pallet_dummy::Config for Test {} diff --git a/substrate/frame/contracts/src/weights.rs b/substrate/frame/contracts/src/weights.rs index f6c56468e5de..25b36fc404fe 100644 --- a/substrate/frame/contracts/src/weights.rs +++ b/substrate/frame/contracts/src/weights.rs @@ -18,27 +18,25 @@ //! Autogenerated weights for `pallet_contracts` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-07-17, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-yaoqqom-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate-node +// target/production/substrate-node // benchmark // pallet -// --chain=dev // --steps=50 // --repeat=20 -// --pallet=pallet_contracts -// --no-storage-info -// --no-median-slopes -// --no-min-squares // --extrinsic=* // --wasm-execution=compiled // --heap-pages=4096 -// --output=./substrate/frame/contracts/src/weights.rs +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_contracts +// --chain=dev // --header=./substrate/HEADER-APACHE2 +// --output=./substrate/frame/contracts/src/weights.rs // --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -143,8 +141,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `1627` - // Minimum execution time: 2_809_000 picoseconds. - Weight::from_parts(2_956_000, 1627) + // Minimum execution time: 1_915_000 picoseconds. + Weight::from_parts(1_986_000, 1627) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: `Skipped::Metadata` (r:0 w:0) @@ -154,10 +152,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `452 + k * (69 ±0)` // Estimated: `442 + k * (70 ±0)` - // Minimum execution time: 17_559_000 picoseconds. - Weight::from_parts(17_850_000, 442) - // Standard Error: 2_722 - .saturating_add(Weight::from_parts(1_376_892, 0).saturating_mul(k.into())) + // Minimum execution time: 11_103_000 picoseconds. + Weight::from_parts(11_326_000, 442) + // Standard Error: 2_291 + .saturating_add(Weight::from_parts(1_196_329, 0).saturating_mul(k.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(k.into()))) .saturating_add(T::DbWeight::get().writes(2_u64)) @@ -171,10 +169,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `211 + c * (1 ±0)` // Estimated: `6149 + c * (1 ±0)` - // Minimum execution time: 8_830_000 picoseconds. - Weight::from_parts(6_649_003, 6149) + // Minimum execution time: 7_783_000 picoseconds. + Weight::from_parts(4_462_075, 6149) // Standard Error: 5 - .saturating_add(Weight::from_parts(1_676, 0).saturating_mul(c.into())) + .saturating_add(Weight::from_parts(1_634, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) @@ -187,8 +185,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `510` // Estimated: `6450` - // Minimum execution time: 21_927_000 picoseconds. - Weight::from_parts(22_655_000, 6450) + // Minimum execution time: 15_971_000 picoseconds. + Weight::from_parts(16_730_000, 6450) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -201,10 +199,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `171 + k * (1 ±0)` // Estimated: `3635 + k * (1 ±0)` - // Minimum execution time: 4_465_000 picoseconds. - Weight::from_parts(4_774_000, 3635) - // Standard Error: 867 - .saturating_add(Weight::from_parts(1_071_462, 0).saturating_mul(k.into())) + // Minimum execution time: 3_149_000 picoseconds. + Weight::from_parts(3_264_000, 3635) + // Standard Error: 559 + .saturating_add(Weight::from_parts(1_111_209, 0).saturating_mul(k.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(k.into()))) @@ -223,10 +221,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `325 + c * (1 ±0)` // Estimated: `6263 + c * (1 ±0)` - // Minimum execution time: 21_627_000 picoseconds. - Weight::from_parts(21_491_424, 6263) + // Minimum execution time: 15_072_000 picoseconds. + Weight::from_parts(15_721_891, 6263) // Standard Error: 2 - .saturating_add(Weight::from_parts(480, 0).saturating_mul(c.into())) + .saturating_add(Weight::from_parts(428, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) @@ -237,8 +235,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `440` // Estimated: `6380` - // Minimum execution time: 17_262_000 picoseconds. - Weight::from_parts(17_785_000, 6380) + // Minimum execution time: 12_047_000 picoseconds. + Weight::from_parts(12_500_000, 6380) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -247,13 +245,13 @@ impl WeightInfo for SubstrateWeight { /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) /// Storage: `Balances::Holds` (r:1 w:0) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `Measured`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) fn v14_migration_step() -> Weight { // Proof Size summary in bytes: // Measured: `352` // Estimated: `6292` - // Minimum execution time: 52_303_000 picoseconds. - Weight::from_parts(53_902_000, 6292) + // Minimum execution time: 47_488_000 picoseconds. + Weight::from_parts(48_482_000, 6292) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -265,8 +263,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `594` // Estimated: `6534` - // Minimum execution time: 58_585_000 picoseconds. - Weight::from_parts(60_478_000, 6534) + // Minimum execution time: 52_801_000 picoseconds. + Weight::from_parts(54_230_000, 6534) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -276,8 +274,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `409` // Estimated: `6349` - // Minimum execution time: 16_673_000 picoseconds. - Weight::from_parts(17_325_000, 6349) + // Minimum execution time: 11_618_000 picoseconds. + Weight::from_parts(12_068_000, 6349) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -287,8 +285,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `1627` - // Minimum execution time: 3_073_000 picoseconds. - Weight::from_parts(3_262_000, 1627) + // Minimum execution time: 2_131_000 picoseconds. + Weight::from_parts(2_255_000, 1627) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -300,8 +298,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `166` // Estimated: `3631` - // Minimum execution time: 11_687_000 picoseconds. - Weight::from_parts(12_178_000, 3631) + // Minimum execution time: 10_773_000 picoseconds. + Weight::from_parts(11_118_000, 3631) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -311,8 +309,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3607` - // Minimum execution time: 4_553_000 picoseconds. - Weight::from_parts(4_826_000, 3607) + // Minimum execution time: 4_371_000 picoseconds. + Weight::from_parts(4_624_000, 3607) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) @@ -323,8 +321,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `167` // Estimated: `3632` - // Minimum execution time: 6_794_000 picoseconds. - Weight::from_parts(6_959_000, 3632) + // Minimum execution time: 5_612_000 picoseconds. + Weight::from_parts(5_838_000, 3632) .saturating_add(T::DbWeight::get().reads(2_u64)) } /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) @@ -335,8 +333,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3607` - // Minimum execution time: 6_120_000 picoseconds. - Weight::from_parts(6_420_000, 3607) + // Minimum execution time: 5_487_000 picoseconds. + Weight::from_parts(5_693_000, 3607) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -356,11 +354,11 @@ impl WeightInfo for SubstrateWeight { fn call_with_code_per_byte(c: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `800 + c * (1 ±0)` - // Estimated: `4268 + c * (1 ±0)` - // Minimum execution time: 266_424_000 picoseconds. - Weight::from_parts(283_325_502, 4268) - // Standard Error: 12 - .saturating_add(Weight::from_parts(950, 0).saturating_mul(c.into())) + // Estimated: `4266 + c * (1 ±0)` + // Minimum execution time: 247_545_000 picoseconds. + Weight::from_parts(268_016_699, 4266) + // Standard Error: 4 + .saturating_add(Weight::from_parts(700, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) @@ -370,7 +368,7 @@ impl WeightInfo for SubstrateWeight { /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) /// Storage: `Balances::Holds` (r:2 w:2) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `Measured`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) /// Storage: `Contracts::Nonce` (r:1 w:1) /// Proof: `Contracts::Nonce` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) @@ -387,15 +385,15 @@ impl WeightInfo for SubstrateWeight { fn instantiate_with_code(c: u32, i: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `323` - // Estimated: `6267` - // Minimum execution time: 4_371_315_000 picoseconds. - Weight::from_parts(4_739_462_000, 6267) - // Standard Error: 329 - .saturating_add(Weight::from_parts(38_518, 0).saturating_mul(c.into())) - // Standard Error: 39 - .saturating_add(Weight::from_parts(605, 0).saturating_mul(i.into())) - // Standard Error: 39 - .saturating_add(Weight::from_parts(561, 0).saturating_mul(s.into())) + // Estimated: `6262` + // Minimum execution time: 4_396_772_000 picoseconds. + Weight::from_parts(235_107_907, 6262) + // Standard Error: 185 + .saturating_add(Weight::from_parts(53_843, 0).saturating_mul(c.into())) + // Standard Error: 22 + .saturating_add(Weight::from_parts(2_143, 0).saturating_mul(i.into())) + // Standard Error: 22 + .saturating_add(Weight::from_parts(2_210, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } @@ -414,19 +412,19 @@ impl WeightInfo for SubstrateWeight { /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `Measured`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) /// The range of component `i` is `[0, 1048576]`. /// The range of component `s` is `[0, 1048576]`. fn instantiate(i: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `560` - // Estimated: `4016` - // Minimum execution time: 2_304_531_000 picoseconds. - Weight::from_parts(2_352_810_000, 4016) - // Standard Error: 35 - .saturating_add(Weight::from_parts(1_004, 0).saturating_mul(i.into())) - // Standard Error: 35 - .saturating_add(Weight::from_parts(936, 0).saturating_mul(s.into())) + // Estimated: `4017` + // Minimum execution time: 2_240_868_000 picoseconds. + Weight::from_parts(2_273_668_000, 4017) + // Standard Error: 32 + .saturating_add(Weight::from_parts(934, 0).saturating_mul(i.into())) + // Standard Error: 32 + .saturating_add(Weight::from_parts(920, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -446,8 +444,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `826` // Estimated: `4291` - // Minimum execution time: 183_658_000 picoseconds. - Weight::from_parts(189_507_000, 4291) + // Minimum execution time: 165_067_000 picoseconds. + Weight::from_parts(168_582_000, 4291) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -456,7 +454,7 @@ impl WeightInfo for SubstrateWeight { /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `Measured`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) /// Storage: `Contracts::PristineCode` (r:0 w:1) /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) /// The range of component `c` is `[0, 125952]`. @@ -464,10 +462,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3607` - // Minimum execution time: 253_006_000 picoseconds. - Weight::from_parts(269_271_744, 3607) - // Standard Error: 79 - .saturating_add(Weight::from_parts(49_970, 0).saturating_mul(c.into())) + // Minimum execution time: 229_454_000 picoseconds. + Weight::from_parts(251_495_551, 3607) + // Standard Error: 71 + .saturating_add(Weight::from_parts(51_428, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -476,7 +474,7 @@ impl WeightInfo for SubstrateWeight { /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `Measured`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) /// Storage: `Contracts::PristineCode` (r:0 w:1) /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) /// The range of component `c` is `[0, 125952]`. @@ -484,10 +482,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3607` - // Minimum execution time: 247_567_000 picoseconds. - Weight::from_parts(271_875_922, 3607) - // Standard Error: 78 - .saturating_add(Weight::from_parts(50_117, 0).saturating_mul(c.into())) + // Minimum execution time: 240_390_000 picoseconds. + Weight::from_parts(273_854_266, 3607) + // Standard Error: 243 + .saturating_add(Weight::from_parts(51_836, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -496,15 +494,15 @@ impl WeightInfo for SubstrateWeight { /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `Measured`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) /// Storage: `Contracts::PristineCode` (r:0 w:1) /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) fn remove_code() -> Weight { // Proof Size summary in bytes: // Measured: `315` // Estimated: `3780` - // Minimum execution time: 48_151_000 picoseconds. - Weight::from_parts(49_407_000, 3780) + // Minimum execution time: 39_374_000 picoseconds. + Weight::from_parts(40_247_000, 3780) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -518,8 +516,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `552` // Estimated: `6492` - // Minimum execution time: 30_173_000 picoseconds. - Weight::from_parts(30_941_000, 6492) + // Minimum execution time: 24_473_000 picoseconds. + Weight::from_parts(25_890_000, 6492) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -528,17 +526,17 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_350_000 picoseconds. - Weight::from_parts(9_238_867, 0) - // Standard Error: 139 - .saturating_add(Weight::from_parts(52_355, 0).saturating_mul(r.into())) + // Minimum execution time: 8_528_000 picoseconds. + Weight::from_parts(9_301_010, 0) + // Standard Error: 98 + .saturating_add(Weight::from_parts(53_173, 0).saturating_mul(r.into())) } fn seal_caller() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 757_000 picoseconds. - Weight::from_parts(827_000, 0) + // Minimum execution time: 643_000 picoseconds. + Weight::from_parts(678_000, 0) } /// Storage: `Contracts::ContractInfoOf` (r:1 w:0) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) @@ -546,8 +544,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `354` // Estimated: `3819` - // Minimum execution time: 12_202_000 picoseconds. - Weight::from_parts(12_708_000, 3819) + // Minimum execution time: 6_107_000 picoseconds. + Weight::from_parts(6_235_000, 3819) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: `Contracts::ContractInfoOf` (r:1 w:0) @@ -556,106 +554,109 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `447` // Estimated: `3912` - // Minimum execution time: 13_492_000 picoseconds. - Weight::from_parts(13_845_000, 3912) + // Minimum execution time: 7_316_000 picoseconds. + Weight::from_parts(7_653_000, 3912) .saturating_add(T::DbWeight::get().reads(1_u64)) } fn seal_own_code_hash() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 798_000 picoseconds. - Weight::from_parts(856_000, 0) + // Minimum execution time: 721_000 picoseconds. + Weight::from_parts(764_000, 0) } fn seal_caller_is_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 364_000 picoseconds. - Weight::from_parts(414_000, 0) + // Minimum execution time: 369_000 picoseconds. + Weight::from_parts(417_000, 0) } fn seal_caller_is_root() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 355_000 picoseconds. - Weight::from_parts(396_000, 0) + // Minimum execution time: 318_000 picoseconds. + Weight::from_parts(349_000, 0) } fn seal_address() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 653_000 picoseconds. - Weight::from_parts(719_000, 0) + // Minimum execution time: 590_000 picoseconds. + Weight::from_parts(628_000, 0) } fn seal_gas_left() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 770_000 picoseconds. - Weight::from_parts(827_000, 0) + // Minimum execution time: 660_000 picoseconds. + Weight::from_parts(730_000, 0) } fn seal_balance() -> Weight { // Proof Size summary in bytes: // Measured: `140` // Estimated: `0` - // Minimum execution time: 5_839_000 picoseconds. - Weight::from_parts(6_174_000, 0) + // Minimum execution time: 4_361_000 picoseconds. + Weight::from_parts(4_577_000, 0) } fn seal_value_transferred() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 681_000 picoseconds. - Weight::from_parts(757_000, 0) + // Minimum execution time: 560_000 picoseconds. + Weight::from_parts(603_000, 0) } fn seal_minimum_balance() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 696_000 picoseconds. - Weight::from_parts(730_000, 0) + // Minimum execution time: 561_000 picoseconds. + Weight::from_parts(610_000, 0) } fn seal_block_number() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 654_000 picoseconds. - Weight::from_parts(713_000, 0) + // Minimum execution time: 557_000 picoseconds. + Weight::from_parts(583_000, 0) } fn seal_now() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 707_000 picoseconds. - Weight::from_parts(752_000, 0) + // Minimum execution time: 550_000 picoseconds. + Weight::from_parts(602_000, 0) } + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `Measured`) fn seal_weight_to_fee() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_562_000 picoseconds. - Weight::from_parts(1_749_000, 0) + // Measured: `67` + // Estimated: `1552` + // Minimum execution time: 4_065_000 picoseconds. + Weight::from_parts(4_291_000, 1552) + .saturating_add(T::DbWeight::get().reads(1_u64)) } /// The range of component `n` is `[0, 1048572]`. fn seal_input(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 483_000 picoseconds. - Weight::from_parts(536_000, 0) - // Standard Error: 4 - .saturating_add(Weight::from_parts(329, 0).saturating_mul(n.into())) + // Minimum execution time: 487_000 picoseconds. + Weight::from_parts(517_000, 0) + // Standard Error: 3 + .saturating_add(Weight::from_parts(301, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 1048572]`. fn seal_return(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 372_000 picoseconds. - Weight::from_parts(384_000, 0) - // Standard Error: 11 - .saturating_add(Weight::from_parts(433, 0).saturating_mul(n.into())) + // Minimum execution time: 318_000 picoseconds. + Weight::from_parts(372_000, 0) + // Standard Error: 10 + .saturating_add(Weight::from_parts(411, 0).saturating_mul(n.into())) } /// Storage: `Contracts::DeletionQueueCounter` (r:1 w:1) /// Proof: `Contracts::DeletionQueueCounter` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) @@ -668,10 +669,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `319 + n * (78 ±0)` // Estimated: `3784 + n * (2553 ±0)` - // Minimum execution time: 19_308_000 picoseconds. - Weight::from_parts(20_544_934, 3784) - // Standard Error: 9_422 - .saturating_add(Weight::from_parts(4_431_910, 0).saturating_mul(n.into())) + // Minimum execution time: 13_251_000 picoseconds. + Weight::from_parts(15_257_892, 3784) + // Standard Error: 7_089 + .saturating_add(Weight::from_parts(3_443_907, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) @@ -684,8 +685,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `76` // Estimated: `1561` - // Minimum execution time: 4_503_000 picoseconds. - Weight::from_parts(4_743_000, 1561) + // Minimum execution time: 3_434_000 picoseconds. + Weight::from_parts(3_605_000, 1561) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: `System::EventTopics` (r:4 w:4) @@ -696,12 +697,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `990 + t * (2475 ±0)` - // Minimum execution time: 3_838_000 picoseconds. - Weight::from_parts(4_110_930, 990) - // Standard Error: 6_782 - .saturating_add(Weight::from_parts(2_241_357, 0).saturating_mul(t.into())) + // Minimum execution time: 3_668_000 picoseconds. + Weight::from_parts(3_999_591, 990) + // Standard Error: 5_767 + .saturating_add(Weight::from_parts(2_011_090, 0).saturating_mul(t.into())) // Standard Error: 1 - .saturating_add(Weight::from_parts(20, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(12, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(t.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(t.into()))) .saturating_add(Weight::from_parts(0, 2475).saturating_mul(t.into())) @@ -711,10 +712,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 506_000 picoseconds. - Weight::from_parts(526_000, 0) - // Standard Error: 11 - .saturating_add(Weight::from_parts(1_223, 0).saturating_mul(i.into())) + // Minimum execution time: 443_000 picoseconds. + Weight::from_parts(472_000, 0) + // Standard Error: 10 + .saturating_add(Weight::from_parts(1_207, 0).saturating_mul(i.into())) } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -722,8 +723,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `16618` // Estimated: `16618` - // Minimum execution time: 16_531_000 picoseconds. - Weight::from_parts(16_947_000, 16618) + // Minimum execution time: 13_752_000 picoseconds. + Weight::from_parts(14_356_000, 16618) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: `Skipped::Metadata` (r:0 w:0) @@ -732,8 +733,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `26628` // Estimated: `26628` - // Minimum execution time: 57_673_000 picoseconds. - Weight::from_parts(63_131_000, 26628) + // Minimum execution time: 43_444_000 picoseconds. + Weight::from_parts(45_087_000, 26628) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: `Skipped::Metadata` (r:0 w:0) @@ -742,8 +743,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `16618` // Estimated: `16618` - // Minimum execution time: 18_388_000 picoseconds. - Weight::from_parts(18_882_000, 16618) + // Minimum execution time: 15_616_000 picoseconds. + Weight::from_parts(16_010_000, 16618) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -753,8 +754,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `26628` // Estimated: `26628` - // Minimum execution time: 62_048_000 picoseconds. - Weight::from_parts(71_685_000, 26628) + // Minimum execution time: 47_020_000 picoseconds. + Weight::from_parts(50_152_000, 26628) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -766,12 +767,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `250 + o * (1 ±0)` // Estimated: `249 + o * (1 ±0)` - // Minimum execution time: 11_886_000 picoseconds. - Weight::from_parts(11_100_121, 249) - // Standard Error: 2 - .saturating_add(Weight::from_parts(258, 0).saturating_mul(n.into())) - // Standard Error: 2 - .saturating_add(Weight::from_parts(91, 0).saturating_mul(o.into())) + // Minimum execution time: 8_824_000 picoseconds. + Weight::from_parts(8_915_233, 249) + // Standard Error: 1 + .saturating_add(Weight::from_parts(255, 0).saturating_mul(n.into())) + // Standard Error: 1 + .saturating_add(Weight::from_parts(39, 0).saturating_mul(o.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(o.into())) @@ -783,10 +784,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `248 + n * (1 ±0)` // Estimated: `248 + n * (1 ±0)` - // Minimum execution time: 9_576_000 picoseconds. - Weight::from_parts(10_418_109, 248) + // Minimum execution time: 7_133_000 picoseconds. + Weight::from_parts(7_912_778, 248) // Standard Error: 1 - .saturating_add(Weight::from_parts(115, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(88, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) @@ -798,10 +799,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `248 + n * (1 ±0)` // Estimated: `248 + n * (1 ±0)` - // Minimum execution time: 8_903_000 picoseconds. - Weight::from_parts(10_108_260, 248) + // Minimum execution time: 6_746_000 picoseconds. + Weight::from_parts(7_647_236, 248) // Standard Error: 2 - .saturating_add(Weight::from_parts(626, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(603, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } @@ -812,10 +813,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `248 + n * (1 ±0)` // Estimated: `248 + n * (1 ±0)` - // Minimum execution time: 8_216_000 picoseconds. - Weight::from_parts(9_267_036, 248) + // Minimum execution time: 6_247_000 picoseconds. + Weight::from_parts(6_952_661, 248) // Standard Error: 1 - .saturating_add(Weight::from_parts(103, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(77, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } @@ -826,10 +827,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `248 + n * (1 ±0)` // Estimated: `248 + n * (1 ±0)` - // Minimum execution time: 9_713_000 picoseconds. - Weight::from_parts(10_998_797, 248) + // Minimum execution time: 7_428_000 picoseconds. + Weight::from_parts(8_384_015, 248) // Standard Error: 2 - .saturating_add(Weight::from_parts(639, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(625, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) @@ -838,36 +839,36 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_521_000 picoseconds. - Weight::from_parts(1_612_000, 0) + // Minimum execution time: 1_478_000 picoseconds. + Weight::from_parts(1_533_000, 0) } fn set_transient_storage_full() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_866_000 picoseconds. - Weight::from_parts(3_150_000, 0) + // Minimum execution time: 2_485_000 picoseconds. + Weight::from_parts(2_728_000, 0) } fn get_transient_storage_empty() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_200_000 picoseconds. - Weight::from_parts(3_373_000, 0) + // Minimum execution time: 3_195_000 picoseconds. + Weight::from_parts(3_811_000, 0) } fn get_transient_storage_full() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_138_000 picoseconds. - Weight::from_parts(4_488_000, 0) + // Minimum execution time: 3_902_000 picoseconds. + Weight::from_parts(4_118_000, 0) } fn rollback_transient_storage() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_594_000 picoseconds. - Weight::from_parts(1_799_000, 0) + // Minimum execution time: 1_571_000 picoseconds. + Weight::from_parts(1_662_000, 0) } /// The range of component `n` is `[0, 16384]`. /// The range of component `o` is `[0, 16384]`. @@ -875,57 +876,57 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_811_000 picoseconds. - Weight::from_parts(2_851_992, 0) + // Minimum execution time: 5_250_000 picoseconds. + Weight::from_parts(2_465_568, 0) // Standard Error: 0 - .saturating_add(Weight::from_parts(208, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(201, 0).saturating_mul(n.into())) // Standard Error: 0 - .saturating_add(Weight::from_parts(222, 0).saturating_mul(o.into())) + .saturating_add(Weight::from_parts(223, 0).saturating_mul(o.into())) } /// The range of component `n` is `[0, 16384]`. fn seal_clear_transient_storage(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_335_000 picoseconds. - Weight::from_parts(2_661_318, 0) - // Standard Error: 0 - .saturating_add(Weight::from_parts(234, 0).saturating_mul(n.into())) + // Minimum execution time: 2_012_000 picoseconds. + Weight::from_parts(2_288_004, 0) + // Standard Error: 3 + .saturating_add(Weight::from_parts(239, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 16384]`. fn seal_get_transient_storage(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_189_000 picoseconds. - Weight::from_parts(2_487_605, 0) - // Standard Error: 1 - .saturating_add(Weight::from_parts(220, 0).saturating_mul(n.into())) + // Minimum execution time: 1_906_000 picoseconds. + Weight::from_parts(2_121_040, 0) + // Standard Error: 0 + .saturating_add(Weight::from_parts(225, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 16384]`. fn seal_contains_transient_storage(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_831_000 picoseconds. - Weight::from_parts(2_071_548, 0) - // Standard Error: 3 - .saturating_add(Weight::from_parts(134, 0).saturating_mul(n.into())) + // Minimum execution time: 1_736_000 picoseconds. + Weight::from_parts(1_954_728, 0) + // Standard Error: 0 + .saturating_add(Weight::from_parts(111, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 16384]`. fn seal_take_transient_storage(_n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_106_000 picoseconds. - Weight::from_parts(8_556_699, 0) + // Minimum execution time: 7_872_000 picoseconds. + Weight::from_parts(8_125_644, 0) } fn seal_transfer() -> Weight { // Proof Size summary in bytes: // Measured: `140` // Estimated: `0` - // Minimum execution time: 10_433_000 picoseconds. - Weight::from_parts(10_873_000, 0) + // Minimum execution time: 8_489_000 picoseconds. + Weight::from_parts(8_791_000, 0) } /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) @@ -941,12 +942,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `620 + t * (280 ±0)` // Estimated: `4085 + t * (2182 ±0)` - // Minimum execution time: 140_018_000 picoseconds. - Weight::from_parts(142_816_362, 4085) - // Standard Error: 187_348 - .saturating_add(Weight::from_parts(42_978_763, 0).saturating_mul(t.into())) + // Minimum execution time: 122_759_000 picoseconds. + Weight::from_parts(120_016_020, 4085) + // Standard Error: 173_118 + .saturating_add(Weight::from_parts(42_848_338, 0).saturating_mul(t.into())) // Standard Error: 0 - .saturating_add(Weight::from_parts(3, 0).saturating_mul(i.into())) + .saturating_add(Weight::from_parts(6, 0).saturating_mul(i.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(t.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) @@ -961,8 +962,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `430` // Estimated: `3895` - // Minimum execution time: 130_708_000 picoseconds. - Weight::from_parts(134_865_000, 3895) + // Minimum execution time: 111_566_000 picoseconds. + Weight::from_parts(115_083_000, 3895) .saturating_add(T::DbWeight::get().reads(2_u64)) } /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) @@ -981,12 +982,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `676` // Estimated: `4132` - // Minimum execution time: 1_891_181_000 picoseconds. - Weight::from_parts(1_901_270_000, 4132) - // Standard Error: 26 - .saturating_add(Weight::from_parts(617, 0).saturating_mul(i.into())) - // Standard Error: 26 - .saturating_add(Weight::from_parts(983, 0).saturating_mul(s.into())) + // Minimum execution time: 1_871_402_000 picoseconds. + Weight::from_parts(1_890_038_000, 4132) + // Standard Error: 24 + .saturating_add(Weight::from_parts(581, 0).saturating_mul(i.into())) + // Standard Error: 24 + .saturating_add(Weight::from_parts(915, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -995,64 +996,64 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 979_000 picoseconds. - Weight::from_parts(12_708_667, 0) - // Standard Error: 0 - .saturating_add(Weight::from_parts(1_320, 0).saturating_mul(n.into())) + // Minimum execution time: 966_000 picoseconds. + Weight::from_parts(9_599_151, 0) + // Standard Error: 1 + .saturating_add(Weight::from_parts(1_336, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 1048576]`. fn seal_hash_keccak_256(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_402_000 picoseconds. - Weight::from_parts(12_527_035, 0) + // Minimum execution time: 1_416_000 picoseconds. + Weight::from_parts(10_964_255, 0) // Standard Error: 1 - .saturating_add(Weight::from_parts(3_526, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(3_593, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 1048576]`. fn seal_hash_blake2_256(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 787_000 picoseconds. - Weight::from_parts(8_175_079, 0) + // Minimum execution time: 821_000 picoseconds. + Weight::from_parts(6_579_283, 0) // Standard Error: 0 - .saturating_add(Weight::from_parts(1_460, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(1_466, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 1048576]`. fn seal_hash_blake2_128(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 807_000 picoseconds. - Weight::from_parts(6_418_831, 0) + // Minimum execution time: 773_000 picoseconds. + Weight::from_parts(10_990_209, 0) // Standard Error: 1 - .saturating_add(Weight::from_parts(1_468, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(1_457, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 125697]`. fn seal_sr25519_verify(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 49_651_000 picoseconds. - Weight::from_parts(48_834_618, 0) - // Standard Error: 10 - .saturating_add(Weight::from_parts(5_221, 0).saturating_mul(n.into())) + // Minimum execution time: 43_195_000 picoseconds. + Weight::from_parts(41_864_855, 0) + // Standard Error: 9 + .saturating_add(Weight::from_parts(5_154, 0).saturating_mul(n.into())) } fn seal_ecdsa_recover() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 48_222_000 picoseconds. - Weight::from_parts(49_638_000, 0) + // Minimum execution time: 47_747_000 picoseconds. + Weight::from_parts(49_219_000, 0) } fn seal_ecdsa_to_eth_address() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 12_739_000 picoseconds. - Weight::from_parts(12_958_000, 0) + // Minimum execution time: 12_854_000 picoseconds. + Weight::from_parts(12_962_000, 0) } /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) @@ -1062,8 +1063,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `430` // Estimated: `3895` - // Minimum execution time: 25_663_000 picoseconds. - Weight::from_parts(26_249_000, 3895) + // Minimum execution time: 17_868_000 picoseconds. + Weight::from_parts(18_486_000, 3895) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -1073,8 +1074,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `355` // Estimated: `3820` - // Minimum execution time: 14_726_000 picoseconds. - Weight::from_parts(15_392_000, 3820) + // Minimum execution time: 8_393_000 picoseconds. + Weight::from_parts(8_640_000, 3820) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -1084,8 +1085,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `355` // Estimated: `3558` - // Minimum execution time: 13_779_000 picoseconds. - Weight::from_parts(14_168_000, 3558) + // Minimum execution time: 7_489_000 picoseconds. + Weight::from_parts(7_815_000, 3558) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -1093,15 +1094,15 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 359_000 picoseconds. - Weight::from_parts(402_000, 0) + // Minimum execution time: 299_000 picoseconds. + Weight::from_parts(339_000, 0) } fn seal_account_reentrance_count() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 339_000 picoseconds. - Weight::from_parts(389_000, 0) + // Minimum execution time: 324_000 picoseconds. + Weight::from_parts(380_000, 0) } /// Storage: `Contracts::Nonce` (r:1 w:0) /// Proof: `Contracts::Nonce` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) @@ -1109,8 +1110,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `219` // Estimated: `1704` - // Minimum execution time: 4_079_000 picoseconds. - Weight::from_parts(4_355_000, 1704) + // Minimum execution time: 2_768_000 picoseconds. + Weight::from_parts(3_025_000, 1704) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// The range of component `r` is `[0, 5000]`. @@ -1118,10 +1119,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 836_000 picoseconds. - Weight::from_parts(591_552, 0) - // Standard Error: 17 - .saturating_add(Weight::from_parts(7_522, 0).saturating_mul(r.into())) + // Minimum execution time: 766_000 picoseconds. + Weight::from_parts(722_169, 0) + // Standard Error: 10 + .saturating_add(Weight::from_parts(7_191, 0).saturating_mul(r.into())) } } @@ -1133,8 +1134,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `1627` - // Minimum execution time: 2_809_000 picoseconds. - Weight::from_parts(2_956_000, 1627) + // Minimum execution time: 1_915_000 picoseconds. + Weight::from_parts(1_986_000, 1627) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: `Skipped::Metadata` (r:0 w:0) @@ -1144,10 +1145,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `452 + k * (69 ±0)` // Estimated: `442 + k * (70 ±0)` - // Minimum execution time: 17_559_000 picoseconds. - Weight::from_parts(17_850_000, 442) - // Standard Error: 2_722 - .saturating_add(Weight::from_parts(1_376_892, 0).saturating_mul(k.into())) + // Minimum execution time: 11_103_000 picoseconds. + Weight::from_parts(11_326_000, 442) + // Standard Error: 2_291 + .saturating_add(Weight::from_parts(1_196_329, 0).saturating_mul(k.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(k.into()))) .saturating_add(RocksDbWeight::get().writes(2_u64)) @@ -1161,10 +1162,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `211 + c * (1 ±0)` // Estimated: `6149 + c * (1 ±0)` - // Minimum execution time: 8_830_000 picoseconds. - Weight::from_parts(6_649_003, 6149) + // Minimum execution time: 7_783_000 picoseconds. + Weight::from_parts(4_462_075, 6149) // Standard Error: 5 - .saturating_add(Weight::from_parts(1_676, 0).saturating_mul(c.into())) + .saturating_add(Weight::from_parts(1_634, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) @@ -1177,8 +1178,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `510` // Estimated: `6450` - // Minimum execution time: 21_927_000 picoseconds. - Weight::from_parts(22_655_000, 6450) + // Minimum execution time: 15_971_000 picoseconds. + Weight::from_parts(16_730_000, 6450) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1191,10 +1192,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `171 + k * (1 ±0)` // Estimated: `3635 + k * (1 ±0)` - // Minimum execution time: 4_465_000 picoseconds. - Weight::from_parts(4_774_000, 3635) - // Standard Error: 867 - .saturating_add(Weight::from_parts(1_071_462, 0).saturating_mul(k.into())) + // Minimum execution time: 3_149_000 picoseconds. + Weight::from_parts(3_264_000, 3635) + // Standard Error: 559 + .saturating_add(Weight::from_parts(1_111_209, 0).saturating_mul(k.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(k.into()))) @@ -1213,10 +1214,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `325 + c * (1 ±0)` // Estimated: `6263 + c * (1 ±0)` - // Minimum execution time: 21_627_000 picoseconds. - Weight::from_parts(21_491_424, 6263) + // Minimum execution time: 15_072_000 picoseconds. + Weight::from_parts(15_721_891, 6263) // Standard Error: 2 - .saturating_add(Weight::from_parts(480, 0).saturating_mul(c.into())) + .saturating_add(Weight::from_parts(428, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) @@ -1227,8 +1228,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `440` // Estimated: `6380` - // Minimum execution time: 17_262_000 picoseconds. - Weight::from_parts(17_785_000, 6380) + // Minimum execution time: 12_047_000 picoseconds. + Weight::from_parts(12_500_000, 6380) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1237,13 +1238,13 @@ impl WeightInfo for () { /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) /// Storage: `Balances::Holds` (r:1 w:0) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `Measured`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) fn v14_migration_step() -> Weight { // Proof Size summary in bytes: // Measured: `352` // Estimated: `6292` - // Minimum execution time: 52_303_000 picoseconds. - Weight::from_parts(53_902_000, 6292) + // Minimum execution time: 47_488_000 picoseconds. + Weight::from_parts(48_482_000, 6292) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1255,8 +1256,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `594` // Estimated: `6534` - // Minimum execution time: 58_585_000 picoseconds. - Weight::from_parts(60_478_000, 6534) + // Minimum execution time: 52_801_000 picoseconds. + Weight::from_parts(54_230_000, 6534) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -1266,8 +1267,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `409` // Estimated: `6349` - // Minimum execution time: 16_673_000 picoseconds. - Weight::from_parts(17_325_000, 6349) + // Minimum execution time: 11_618_000 picoseconds. + Weight::from_parts(12_068_000, 6349) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1277,8 +1278,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `1627` - // Minimum execution time: 3_073_000 picoseconds. - Weight::from_parts(3_262_000, 1627) + // Minimum execution time: 2_131_000 picoseconds. + Weight::from_parts(2_255_000, 1627) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1290,8 +1291,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `166` // Estimated: `3631` - // Minimum execution time: 11_687_000 picoseconds. - Weight::from_parts(12_178_000, 3631) + // Minimum execution time: 10_773_000 picoseconds. + Weight::from_parts(11_118_000, 3631) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -1301,8 +1302,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3607` - // Minimum execution time: 4_553_000 picoseconds. - Weight::from_parts(4_826_000, 3607) + // Minimum execution time: 4_371_000 picoseconds. + Weight::from_parts(4_624_000, 3607) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) @@ -1313,8 +1314,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `167` // Estimated: `3632` - // Minimum execution time: 6_794_000 picoseconds. - Weight::from_parts(6_959_000, 3632) + // Minimum execution time: 5_612_000 picoseconds. + Weight::from_parts(5_838_000, 3632) .saturating_add(RocksDbWeight::get().reads(2_u64)) } /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) @@ -1325,8 +1326,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3607` - // Minimum execution time: 6_120_000 picoseconds. - Weight::from_parts(6_420_000, 3607) + // Minimum execution time: 5_487_000 picoseconds. + Weight::from_parts(5_693_000, 3607) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1346,11 +1347,11 @@ impl WeightInfo for () { fn call_with_code_per_byte(c: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `800 + c * (1 ±0)` - // Estimated: `4268 + c * (1 ±0)` - // Minimum execution time: 266_424_000 picoseconds. - Weight::from_parts(283_325_502, 4268) - // Standard Error: 12 - .saturating_add(Weight::from_parts(950, 0).saturating_mul(c.into())) + // Estimated: `4266 + c * (1 ±0)` + // Minimum execution time: 247_545_000 picoseconds. + Weight::from_parts(268_016_699, 4266) + // Standard Error: 4 + .saturating_add(Weight::from_parts(700, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) @@ -1360,7 +1361,7 @@ impl WeightInfo for () { /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) /// Storage: `Balances::Holds` (r:2 w:2) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `Measured`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) /// Storage: `Contracts::Nonce` (r:1 w:1) /// Proof: `Contracts::Nonce` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) @@ -1377,15 +1378,15 @@ impl WeightInfo for () { fn instantiate_with_code(c: u32, i: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `323` - // Estimated: `6267` - // Minimum execution time: 4_371_315_000 picoseconds. - Weight::from_parts(4_739_462_000, 6267) - // Standard Error: 329 - .saturating_add(Weight::from_parts(38_518, 0).saturating_mul(c.into())) - // Standard Error: 39 - .saturating_add(Weight::from_parts(605, 0).saturating_mul(i.into())) - // Standard Error: 39 - .saturating_add(Weight::from_parts(561, 0).saturating_mul(s.into())) + // Estimated: `6262` + // Minimum execution time: 4_396_772_000 picoseconds. + Weight::from_parts(235_107_907, 6262) + // Standard Error: 185 + .saturating_add(Weight::from_parts(53_843, 0).saturating_mul(c.into())) + // Standard Error: 22 + .saturating_add(Weight::from_parts(2_143, 0).saturating_mul(i.into())) + // Standard Error: 22 + .saturating_add(Weight::from_parts(2_210, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } @@ -1404,19 +1405,19 @@ impl WeightInfo for () { /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `Measured`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) /// The range of component `i` is `[0, 1048576]`. /// The range of component `s` is `[0, 1048576]`. fn instantiate(i: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `560` - // Estimated: `4016` - // Minimum execution time: 2_304_531_000 picoseconds. - Weight::from_parts(2_352_810_000, 4016) - // Standard Error: 35 - .saturating_add(Weight::from_parts(1_004, 0).saturating_mul(i.into())) - // Standard Error: 35 - .saturating_add(Weight::from_parts(936, 0).saturating_mul(s.into())) + // Estimated: `4017` + // Minimum execution time: 2_240_868_000 picoseconds. + Weight::from_parts(2_273_668_000, 4017) + // Standard Error: 32 + .saturating_add(Weight::from_parts(934, 0).saturating_mul(i.into())) + // Standard Error: 32 + .saturating_add(Weight::from_parts(920, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -1436,8 +1437,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `826` // Estimated: `4291` - // Minimum execution time: 183_658_000 picoseconds. - Weight::from_parts(189_507_000, 4291) + // Minimum execution time: 165_067_000 picoseconds. + Weight::from_parts(168_582_000, 4291) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -1446,7 +1447,7 @@ impl WeightInfo for () { /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `Measured`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) /// Storage: `Contracts::PristineCode` (r:0 w:1) /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) /// The range of component `c` is `[0, 125952]`. @@ -1454,10 +1455,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3607` - // Minimum execution time: 253_006_000 picoseconds. - Weight::from_parts(269_271_744, 3607) - // Standard Error: 79 - .saturating_add(Weight::from_parts(49_970, 0).saturating_mul(c.into())) + // Minimum execution time: 229_454_000 picoseconds. + Weight::from_parts(251_495_551, 3607) + // Standard Error: 71 + .saturating_add(Weight::from_parts(51_428, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -1466,7 +1467,7 @@ impl WeightInfo for () { /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `Measured`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) /// Storage: `Contracts::PristineCode` (r:0 w:1) /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) /// The range of component `c` is `[0, 125952]`. @@ -1474,10 +1475,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3607` - // Minimum execution time: 247_567_000 picoseconds. - Weight::from_parts(271_875_922, 3607) - // Standard Error: 78 - .saturating_add(Weight::from_parts(50_117, 0).saturating_mul(c.into())) + // Minimum execution time: 240_390_000 picoseconds. + Weight::from_parts(273_854_266, 3607) + // Standard Error: 243 + .saturating_add(Weight::from_parts(51_836, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -1486,15 +1487,15 @@ impl WeightInfo for () { /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `Measured`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) /// Storage: `Contracts::PristineCode` (r:0 w:1) /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) fn remove_code() -> Weight { // Proof Size summary in bytes: // Measured: `315` // Estimated: `3780` - // Minimum execution time: 48_151_000 picoseconds. - Weight::from_parts(49_407_000, 3780) + // Minimum execution time: 39_374_000 picoseconds. + Weight::from_parts(40_247_000, 3780) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -1508,8 +1509,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `552` // Estimated: `6492` - // Minimum execution time: 30_173_000 picoseconds. - Weight::from_parts(30_941_000, 6492) + // Minimum execution time: 24_473_000 picoseconds. + Weight::from_parts(25_890_000, 6492) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -1518,17 +1519,17 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_350_000 picoseconds. - Weight::from_parts(9_238_867, 0) - // Standard Error: 139 - .saturating_add(Weight::from_parts(52_355, 0).saturating_mul(r.into())) + // Minimum execution time: 8_528_000 picoseconds. + Weight::from_parts(9_301_010, 0) + // Standard Error: 98 + .saturating_add(Weight::from_parts(53_173, 0).saturating_mul(r.into())) } fn seal_caller() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 757_000 picoseconds. - Weight::from_parts(827_000, 0) + // Minimum execution time: 643_000 picoseconds. + Weight::from_parts(678_000, 0) } /// Storage: `Contracts::ContractInfoOf` (r:1 w:0) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) @@ -1536,8 +1537,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `354` // Estimated: `3819` - // Minimum execution time: 12_202_000 picoseconds. - Weight::from_parts(12_708_000, 3819) + // Minimum execution time: 6_107_000 picoseconds. + Weight::from_parts(6_235_000, 3819) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: `Contracts::ContractInfoOf` (r:1 w:0) @@ -1546,106 +1547,109 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `447` // Estimated: `3912` - // Minimum execution time: 13_492_000 picoseconds. - Weight::from_parts(13_845_000, 3912) + // Minimum execution time: 7_316_000 picoseconds. + Weight::from_parts(7_653_000, 3912) .saturating_add(RocksDbWeight::get().reads(1_u64)) } fn seal_own_code_hash() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 798_000 picoseconds. - Weight::from_parts(856_000, 0) + // Minimum execution time: 721_000 picoseconds. + Weight::from_parts(764_000, 0) } fn seal_caller_is_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 364_000 picoseconds. - Weight::from_parts(414_000, 0) + // Minimum execution time: 369_000 picoseconds. + Weight::from_parts(417_000, 0) } fn seal_caller_is_root() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 355_000 picoseconds. - Weight::from_parts(396_000, 0) + // Minimum execution time: 318_000 picoseconds. + Weight::from_parts(349_000, 0) } fn seal_address() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 653_000 picoseconds. - Weight::from_parts(719_000, 0) + // Minimum execution time: 590_000 picoseconds. + Weight::from_parts(628_000, 0) } fn seal_gas_left() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 770_000 picoseconds. - Weight::from_parts(827_000, 0) + // Minimum execution time: 660_000 picoseconds. + Weight::from_parts(730_000, 0) } fn seal_balance() -> Weight { // Proof Size summary in bytes: // Measured: `140` // Estimated: `0` - // Minimum execution time: 5_839_000 picoseconds. - Weight::from_parts(6_174_000, 0) + // Minimum execution time: 4_361_000 picoseconds. + Weight::from_parts(4_577_000, 0) } fn seal_value_transferred() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 681_000 picoseconds. - Weight::from_parts(757_000, 0) + // Minimum execution time: 560_000 picoseconds. + Weight::from_parts(603_000, 0) } fn seal_minimum_balance() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 696_000 picoseconds. - Weight::from_parts(730_000, 0) + // Minimum execution time: 561_000 picoseconds. + Weight::from_parts(610_000, 0) } fn seal_block_number() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 654_000 picoseconds. - Weight::from_parts(713_000, 0) + // Minimum execution time: 557_000 picoseconds. + Weight::from_parts(583_000, 0) } fn seal_now() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 707_000 picoseconds. - Weight::from_parts(752_000, 0) + // Minimum execution time: 550_000 picoseconds. + Weight::from_parts(602_000, 0) } + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `Measured`) fn seal_weight_to_fee() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_562_000 picoseconds. - Weight::from_parts(1_749_000, 0) + // Measured: `67` + // Estimated: `1552` + // Minimum execution time: 4_065_000 picoseconds. + Weight::from_parts(4_291_000, 1552) + .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// The range of component `n` is `[0, 1048572]`. fn seal_input(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 483_000 picoseconds. - Weight::from_parts(536_000, 0) - // Standard Error: 4 - .saturating_add(Weight::from_parts(329, 0).saturating_mul(n.into())) + // Minimum execution time: 487_000 picoseconds. + Weight::from_parts(517_000, 0) + // Standard Error: 3 + .saturating_add(Weight::from_parts(301, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 1048572]`. fn seal_return(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 372_000 picoseconds. - Weight::from_parts(384_000, 0) - // Standard Error: 11 - .saturating_add(Weight::from_parts(433, 0).saturating_mul(n.into())) + // Minimum execution time: 318_000 picoseconds. + Weight::from_parts(372_000, 0) + // Standard Error: 10 + .saturating_add(Weight::from_parts(411, 0).saturating_mul(n.into())) } /// Storage: `Contracts::DeletionQueueCounter` (r:1 w:1) /// Proof: `Contracts::DeletionQueueCounter` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) @@ -1658,10 +1662,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `319 + n * (78 ±0)` // Estimated: `3784 + n * (2553 ±0)` - // Minimum execution time: 19_308_000 picoseconds. - Weight::from_parts(20_544_934, 3784) - // Standard Error: 9_422 - .saturating_add(Weight::from_parts(4_431_910, 0).saturating_mul(n.into())) + // Minimum execution time: 13_251_000 picoseconds. + Weight::from_parts(15_257_892, 3784) + // Standard Error: 7_089 + .saturating_add(Weight::from_parts(3_443_907, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) @@ -1674,8 +1678,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `76` // Estimated: `1561` - // Minimum execution time: 4_503_000 picoseconds. - Weight::from_parts(4_743_000, 1561) + // Minimum execution time: 3_434_000 picoseconds. + Weight::from_parts(3_605_000, 1561) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: `System::EventTopics` (r:4 w:4) @@ -1686,12 +1690,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `990 + t * (2475 ±0)` - // Minimum execution time: 3_838_000 picoseconds. - Weight::from_parts(4_110_930, 990) - // Standard Error: 6_782 - .saturating_add(Weight::from_parts(2_241_357, 0).saturating_mul(t.into())) + // Minimum execution time: 3_668_000 picoseconds. + Weight::from_parts(3_999_591, 990) + // Standard Error: 5_767 + .saturating_add(Weight::from_parts(2_011_090, 0).saturating_mul(t.into())) // Standard Error: 1 - .saturating_add(Weight::from_parts(20, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(12, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(t.into()))) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(t.into()))) .saturating_add(Weight::from_parts(0, 2475).saturating_mul(t.into())) @@ -1701,10 +1705,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 506_000 picoseconds. - Weight::from_parts(526_000, 0) - // Standard Error: 11 - .saturating_add(Weight::from_parts(1_223, 0).saturating_mul(i.into())) + // Minimum execution time: 443_000 picoseconds. + Weight::from_parts(472_000, 0) + // Standard Error: 10 + .saturating_add(Weight::from_parts(1_207, 0).saturating_mul(i.into())) } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -1712,8 +1716,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `16618` // Estimated: `16618` - // Minimum execution time: 16_531_000 picoseconds. - Weight::from_parts(16_947_000, 16618) + // Minimum execution time: 13_752_000 picoseconds. + Weight::from_parts(14_356_000, 16618) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: `Skipped::Metadata` (r:0 w:0) @@ -1722,8 +1726,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `26628` // Estimated: `26628` - // Minimum execution time: 57_673_000 picoseconds. - Weight::from_parts(63_131_000, 26628) + // Minimum execution time: 43_444_000 picoseconds. + Weight::from_parts(45_087_000, 26628) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: `Skipped::Metadata` (r:0 w:0) @@ -1732,8 +1736,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `16618` // Estimated: `16618` - // Minimum execution time: 18_388_000 picoseconds. - Weight::from_parts(18_882_000, 16618) + // Minimum execution time: 15_616_000 picoseconds. + Weight::from_parts(16_010_000, 16618) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1743,8 +1747,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `26628` // Estimated: `26628` - // Minimum execution time: 62_048_000 picoseconds. - Weight::from_parts(71_685_000, 26628) + // Minimum execution time: 47_020_000 picoseconds. + Weight::from_parts(50_152_000, 26628) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1756,12 +1760,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `250 + o * (1 ±0)` // Estimated: `249 + o * (1 ±0)` - // Minimum execution time: 11_886_000 picoseconds. - Weight::from_parts(11_100_121, 249) - // Standard Error: 2 - .saturating_add(Weight::from_parts(258, 0).saturating_mul(n.into())) - // Standard Error: 2 - .saturating_add(Weight::from_parts(91, 0).saturating_mul(o.into())) + // Minimum execution time: 8_824_000 picoseconds. + Weight::from_parts(8_915_233, 249) + // Standard Error: 1 + .saturating_add(Weight::from_parts(255, 0).saturating_mul(n.into())) + // Standard Error: 1 + .saturating_add(Weight::from_parts(39, 0).saturating_mul(o.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(o.into())) @@ -1773,10 +1777,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `248 + n * (1 ±0)` // Estimated: `248 + n * (1 ±0)` - // Minimum execution time: 9_576_000 picoseconds. - Weight::from_parts(10_418_109, 248) + // Minimum execution time: 7_133_000 picoseconds. + Weight::from_parts(7_912_778, 248) // Standard Error: 1 - .saturating_add(Weight::from_parts(115, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(88, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) @@ -1788,10 +1792,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `248 + n * (1 ±0)` // Estimated: `248 + n * (1 ±0)` - // Minimum execution time: 8_903_000 picoseconds. - Weight::from_parts(10_108_260, 248) + // Minimum execution time: 6_746_000 picoseconds. + Weight::from_parts(7_647_236, 248) // Standard Error: 2 - .saturating_add(Weight::from_parts(626, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(603, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } @@ -1802,10 +1806,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `248 + n * (1 ±0)` // Estimated: `248 + n * (1 ±0)` - // Minimum execution time: 8_216_000 picoseconds. - Weight::from_parts(9_267_036, 248) + // Minimum execution time: 6_247_000 picoseconds. + Weight::from_parts(6_952_661, 248) // Standard Error: 1 - .saturating_add(Weight::from_parts(103, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(77, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } @@ -1816,10 +1820,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `248 + n * (1 ±0)` // Estimated: `248 + n * (1 ±0)` - // Minimum execution time: 9_713_000 picoseconds. - Weight::from_parts(10_998_797, 248) + // Minimum execution time: 7_428_000 picoseconds. + Weight::from_parts(8_384_015, 248) // Standard Error: 2 - .saturating_add(Weight::from_parts(639, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(625, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) @@ -1828,36 +1832,36 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_521_000 picoseconds. - Weight::from_parts(1_612_000, 0) + // Minimum execution time: 1_478_000 picoseconds. + Weight::from_parts(1_533_000, 0) } fn set_transient_storage_full() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_866_000 picoseconds. - Weight::from_parts(3_150_000, 0) + // Minimum execution time: 2_485_000 picoseconds. + Weight::from_parts(2_728_000, 0) } fn get_transient_storage_empty() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_200_000 picoseconds. - Weight::from_parts(3_373_000, 0) + // Minimum execution time: 3_195_000 picoseconds. + Weight::from_parts(3_811_000, 0) } fn get_transient_storage_full() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_138_000 picoseconds. - Weight::from_parts(4_488_000, 0) + // Minimum execution time: 3_902_000 picoseconds. + Weight::from_parts(4_118_000, 0) } fn rollback_transient_storage() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_594_000 picoseconds. - Weight::from_parts(1_799_000, 0) + // Minimum execution time: 1_571_000 picoseconds. + Weight::from_parts(1_662_000, 0) } /// The range of component `n` is `[0, 16384]`. /// The range of component `o` is `[0, 16384]`. @@ -1865,57 +1869,57 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_811_000 picoseconds. - Weight::from_parts(2_851_992, 0) + // Minimum execution time: 5_250_000 picoseconds. + Weight::from_parts(2_465_568, 0) // Standard Error: 0 - .saturating_add(Weight::from_parts(208, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(201, 0).saturating_mul(n.into())) // Standard Error: 0 - .saturating_add(Weight::from_parts(222, 0).saturating_mul(o.into())) + .saturating_add(Weight::from_parts(223, 0).saturating_mul(o.into())) } /// The range of component `n` is `[0, 16384]`. fn seal_clear_transient_storage(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_335_000 picoseconds. - Weight::from_parts(2_661_318, 0) - // Standard Error: 0 - .saturating_add(Weight::from_parts(234, 0).saturating_mul(n.into())) + // Minimum execution time: 2_012_000 picoseconds. + Weight::from_parts(2_288_004, 0) + // Standard Error: 3 + .saturating_add(Weight::from_parts(239, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 16384]`. fn seal_get_transient_storage(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_189_000 picoseconds. - Weight::from_parts(2_487_605, 0) - // Standard Error: 1 - .saturating_add(Weight::from_parts(220, 0).saturating_mul(n.into())) + // Minimum execution time: 1_906_000 picoseconds. + Weight::from_parts(2_121_040, 0) + // Standard Error: 0 + .saturating_add(Weight::from_parts(225, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 16384]`. fn seal_contains_transient_storage(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_831_000 picoseconds. - Weight::from_parts(2_071_548, 0) - // Standard Error: 3 - .saturating_add(Weight::from_parts(134, 0).saturating_mul(n.into())) + // Minimum execution time: 1_736_000 picoseconds. + Weight::from_parts(1_954_728, 0) + // Standard Error: 0 + .saturating_add(Weight::from_parts(111, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 16384]`. fn seal_take_transient_storage(_n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_106_000 picoseconds. - Weight::from_parts(8_556_699, 0) + // Minimum execution time: 7_872_000 picoseconds. + Weight::from_parts(8_125_644, 0) } fn seal_transfer() -> Weight { // Proof Size summary in bytes: // Measured: `140` // Estimated: `0` - // Minimum execution time: 10_433_000 picoseconds. - Weight::from_parts(10_873_000, 0) + // Minimum execution time: 8_489_000 picoseconds. + Weight::from_parts(8_791_000, 0) } /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) @@ -1931,12 +1935,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `620 + t * (280 ±0)` // Estimated: `4085 + t * (2182 ±0)` - // Minimum execution time: 140_018_000 picoseconds. - Weight::from_parts(142_816_362, 4085) - // Standard Error: 187_348 - .saturating_add(Weight::from_parts(42_978_763, 0).saturating_mul(t.into())) + // Minimum execution time: 122_759_000 picoseconds. + Weight::from_parts(120_016_020, 4085) + // Standard Error: 173_118 + .saturating_add(Weight::from_parts(42_848_338, 0).saturating_mul(t.into())) // Standard Error: 0 - .saturating_add(Weight::from_parts(3, 0).saturating_mul(i.into())) + .saturating_add(Weight::from_parts(6, 0).saturating_mul(i.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(t.into()))) .saturating_add(RocksDbWeight::get().writes(1_u64)) @@ -1951,8 +1955,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `430` // Estimated: `3895` - // Minimum execution time: 130_708_000 picoseconds. - Weight::from_parts(134_865_000, 3895) + // Minimum execution time: 111_566_000 picoseconds. + Weight::from_parts(115_083_000, 3895) .saturating_add(RocksDbWeight::get().reads(2_u64)) } /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) @@ -1971,12 +1975,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `676` // Estimated: `4132` - // Minimum execution time: 1_891_181_000 picoseconds. - Weight::from_parts(1_901_270_000, 4132) - // Standard Error: 26 - .saturating_add(Weight::from_parts(617, 0).saturating_mul(i.into())) - // Standard Error: 26 - .saturating_add(Weight::from_parts(983, 0).saturating_mul(s.into())) + // Minimum execution time: 1_871_402_000 picoseconds. + Weight::from_parts(1_890_038_000, 4132) + // Standard Error: 24 + .saturating_add(Weight::from_parts(581, 0).saturating_mul(i.into())) + // Standard Error: 24 + .saturating_add(Weight::from_parts(915, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -1985,64 +1989,64 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 979_000 picoseconds. - Weight::from_parts(12_708_667, 0) - // Standard Error: 0 - .saturating_add(Weight::from_parts(1_320, 0).saturating_mul(n.into())) + // Minimum execution time: 966_000 picoseconds. + Weight::from_parts(9_599_151, 0) + // Standard Error: 1 + .saturating_add(Weight::from_parts(1_336, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 1048576]`. fn seal_hash_keccak_256(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_402_000 picoseconds. - Weight::from_parts(12_527_035, 0) + // Minimum execution time: 1_416_000 picoseconds. + Weight::from_parts(10_964_255, 0) // Standard Error: 1 - .saturating_add(Weight::from_parts(3_526, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(3_593, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 1048576]`. fn seal_hash_blake2_256(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 787_000 picoseconds. - Weight::from_parts(8_175_079, 0) + // Minimum execution time: 821_000 picoseconds. + Weight::from_parts(6_579_283, 0) // Standard Error: 0 - .saturating_add(Weight::from_parts(1_460, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(1_466, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 1048576]`. fn seal_hash_blake2_128(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 807_000 picoseconds. - Weight::from_parts(6_418_831, 0) + // Minimum execution time: 773_000 picoseconds. + Weight::from_parts(10_990_209, 0) // Standard Error: 1 - .saturating_add(Weight::from_parts(1_468, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(1_457, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 125697]`. fn seal_sr25519_verify(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 49_651_000 picoseconds. - Weight::from_parts(48_834_618, 0) - // Standard Error: 10 - .saturating_add(Weight::from_parts(5_221, 0).saturating_mul(n.into())) + // Minimum execution time: 43_195_000 picoseconds. + Weight::from_parts(41_864_855, 0) + // Standard Error: 9 + .saturating_add(Weight::from_parts(5_154, 0).saturating_mul(n.into())) } fn seal_ecdsa_recover() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 48_222_000 picoseconds. - Weight::from_parts(49_638_000, 0) + // Minimum execution time: 47_747_000 picoseconds. + Weight::from_parts(49_219_000, 0) } fn seal_ecdsa_to_eth_address() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 12_739_000 picoseconds. - Weight::from_parts(12_958_000, 0) + // Minimum execution time: 12_854_000 picoseconds. + Weight::from_parts(12_962_000, 0) } /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) @@ -2052,8 +2056,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `430` // Estimated: `3895` - // Minimum execution time: 25_663_000 picoseconds. - Weight::from_parts(26_249_000, 3895) + // Minimum execution time: 17_868_000 picoseconds. + Weight::from_parts(18_486_000, 3895) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -2063,8 +2067,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `355` // Estimated: `3820` - // Minimum execution time: 14_726_000 picoseconds. - Weight::from_parts(15_392_000, 3820) + // Minimum execution time: 8_393_000 picoseconds. + Weight::from_parts(8_640_000, 3820) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -2074,8 +2078,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `355` // Estimated: `3558` - // Minimum execution time: 13_779_000 picoseconds. - Weight::from_parts(14_168_000, 3558) + // Minimum execution time: 7_489_000 picoseconds. + Weight::from_parts(7_815_000, 3558) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -2083,15 +2087,15 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 359_000 picoseconds. - Weight::from_parts(402_000, 0) + // Minimum execution time: 299_000 picoseconds. + Weight::from_parts(339_000, 0) } fn seal_account_reentrance_count() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 339_000 picoseconds. - Weight::from_parts(389_000, 0) + // Minimum execution time: 324_000 picoseconds. + Weight::from_parts(380_000, 0) } /// Storage: `Contracts::Nonce` (r:1 w:0) /// Proof: `Contracts::Nonce` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) @@ -2099,8 +2103,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `219` // Estimated: `1704` - // Minimum execution time: 4_079_000 picoseconds. - Weight::from_parts(4_355_000, 1704) + // Minimum execution time: 2_768_000 picoseconds. + Weight::from_parts(3_025_000, 1704) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// The range of component `r` is `[0, 5000]`. @@ -2108,9 +2112,9 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 836_000 picoseconds. - Weight::from_parts(591_552, 0) - // Standard Error: 17 - .saturating_add(Weight::from_parts(7_522, 0).saturating_mul(r.into())) + // Minimum execution time: 766_000 picoseconds. + Weight::from_parts(722_169, 0) + // Standard Error: 10 + .saturating_add(Weight::from_parts(7_191, 0).saturating_mul(r.into())) } } diff --git a/substrate/frame/contracts/uapi/Cargo.toml b/substrate/frame/contracts/uapi/Cargo.toml index 8297c35b31db..09c70c287899 100644 --- a/substrate/frame/contracts/uapi/Cargo.toml +++ b/substrate/frame/contracts/uapi/Cargo.toml @@ -12,16 +12,16 @@ description = "Exposes all the host functions that a contract can import." workspace = true [dependencies] +paste = { workspace = true } bitflags = { workspace = true } +scale-info = { features = ["derive"], optional = true, workspace = true } codec = { features = [ "derive", "max-encoded-len", ], optional = true, workspace = true } -paste = { workspace = true } -scale-info = { features = ["derive"], optional = true, workspace = true } [package.metadata.docs.rs] -targets = ["wasm32-unknown-unknown"] +default-target = ["wasm32-unknown-unknown"] [features] default = ["scale"] diff --git a/substrate/frame/conviction-voting/Cargo.toml b/substrate/frame/conviction-voting/Cargo.toml index 2d23f493ea01..fdb4310610d9 100644 --- a/substrate/frame/conviction-voting/Cargo.toml +++ b/substrate/frame/conviction-voting/Cargo.toml @@ -21,11 +21,11 @@ codec = { features = [ "derive", "max-encoded-len", ], workspace = true } +scale-info = { features = ["derive"], workspace = true } +serde = { features = ["derive"], optional = true, workspace = true, default-features = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } -scale-info = { features = ["derive"], workspace = true } -serde = { features = ["derive"], optional = true, workspace = true, default-features = true } sp-io = { workspace = true } sp-runtime = { workspace = true } diff --git a/substrate/frame/conviction-voting/src/lib.rs b/substrate/frame/conviction-voting/src/lib.rs index 31bd6b85ec86..85da1aed3c27 100644 --- a/substrate/frame/conviction-voting/src/lib.rs +++ b/substrate/frame/conviction-voting/src/lib.rs @@ -171,12 +171,10 @@ pub mod pallet { Delegated(T::AccountId, T::AccountId), /// An \[account\] has cancelled a previous delegation operation. Undelegated(T::AccountId), - /// An account has voted + /// An account that has voted Voted { who: T::AccountId, vote: AccountVote> }, - /// A vote has been removed + /// A vote that been removed VoteRemoved { who: T::AccountId, vote: AccountVote> }, - /// The lockup period of a conviction vote expired, and the funds have been unlocked. - VoteUnlocked { who: T::AccountId, class: ClassOf }, } #[pallet::error] @@ -317,7 +315,6 @@ pub mod pallet { ensure_signed(origin)?; let target = T::Lookup::lookup(target)?; Self::update_lock(&class, &target); - Self::deposit_event(Event::VoteUnlocked { who: target, class }); Ok(()) } diff --git a/substrate/frame/conviction-voting/src/tests.rs b/substrate/frame/conviction-voting/src/tests.rs index dd9ee33ee183..37cdd7a5b338 100644 --- a/substrate/frame/conviction-voting/src/tests.rs +++ b/substrate/frame/conviction-voting/src/tests.rs @@ -238,52 +238,27 @@ fn basic_stuff() { fn basic_voting_works() { new_test_ext().execute_with(|| { assert_ok!(Voting::vote(RuntimeOrigin::signed(1), 3, aye(2, 5))); - System::assert_last_event(tests::RuntimeEvent::Voting(Event::Voted { - who: 1, - vote: aye(2, 5), - })); assert_eq!(tally(3), Tally::from_parts(10, 0, 2)); assert_ok!(Voting::vote(RuntimeOrigin::signed(1), 3, nay(2, 5))); - System::assert_last_event(tests::RuntimeEvent::Voting(Event::Voted { - who: 1, - vote: nay(2, 5), - })); assert_eq!(tally(3), Tally::from_parts(0, 10, 0)); assert_eq!(Balances::usable_balance(1), 8); assert_ok!(Voting::vote(RuntimeOrigin::signed(1), 3, aye(5, 1))); - System::assert_last_event(tests::RuntimeEvent::Voting(Event::Voted { - who: 1, - vote: aye(5, 1), - })); assert_eq!(tally(3), Tally::from_parts(5, 0, 5)); assert_ok!(Voting::vote(RuntimeOrigin::signed(1), 3, nay(5, 1))); assert_eq!(tally(3), Tally::from_parts(0, 5, 0)); assert_eq!(Balances::usable_balance(1), 5); assert_ok!(Voting::vote(RuntimeOrigin::signed(1), 3, aye(10, 0))); - System::assert_last_event(tests::RuntimeEvent::Voting(Event::Voted { - who: 1, - vote: aye(10, 0), - })); assert_eq!(tally(3), Tally::from_parts(1, 0, 10)); - assert_ok!(Voting::vote(RuntimeOrigin::signed(1), 3, nay(10, 0))); assert_eq!(tally(3), Tally::from_parts(0, 1, 0)); assert_eq!(Balances::usable_balance(1), 0); assert_ok!(Voting::remove_vote(RuntimeOrigin::signed(1), None, 3)); - System::assert_last_event(tests::RuntimeEvent::Voting(Event::VoteRemoved { - who: 1, - vote: nay(10, 0), - })); assert_eq!(tally(3), Tally::from_parts(0, 0, 0)); assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), class(3), 1)); - System::assert_last_event(tests::RuntimeEvent::Voting(Event::VoteUnlocked { - who: 1, - class: class(3), - })); assert_eq!(Balances::usable_balance(1), 10); }); } @@ -292,32 +267,15 @@ fn basic_voting_works() { fn split_voting_works() { new_test_ext().execute_with(|| { assert_ok!(Voting::vote(RuntimeOrigin::signed(1), 3, split(10, 0))); - System::assert_last_event(tests::RuntimeEvent::Voting(Event::Voted { - who: 1, - vote: split(10, 0), - })); assert_eq!(tally(3), Tally::from_parts(1, 0, 10)); - assert_ok!(Voting::vote(RuntimeOrigin::signed(1), 3, split(5, 5))); - System::assert_last_event(tests::RuntimeEvent::Voting(Event::Voted { - who: 1, - vote: split(5, 5), - })); assert_eq!(tally(3), Tally::from_parts(0, 0, 5)); assert_eq!(Balances::usable_balance(1), 0); assert_ok!(Voting::remove_vote(RuntimeOrigin::signed(1), None, 3)); - System::assert_last_event(tests::RuntimeEvent::Voting(Event::VoteRemoved { - who: 1, - vote: split(5, 5), - })); assert_eq!(tally(3), Tally::from_parts(0, 0, 0)); assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), class(3), 1)); - System::assert_last_event(tests::RuntimeEvent::Voting(Event::VoteUnlocked { - who: 1, - class: class(3), - })); assert_eq!(Balances::usable_balance(1), 10); }); } @@ -326,48 +284,25 @@ fn split_voting_works() { fn abstain_voting_works() { new_test_ext().execute_with(|| { assert_ok!(Voting::vote(RuntimeOrigin::signed(1), 3, split_abstain(0, 0, 10))); - System::assert_last_event(tests::RuntimeEvent::Voting(Event::Voted { - who: 1, - vote: split_abstain(0, 0, 10), - })); assert_eq!(tally(3), Tally::from_parts(0, 0, 10)); - - assert_ok!(Voting::vote(RuntimeOrigin::signed(6), 3, split_abstain(10, 0, 20))); - System::assert_last_event(tests::RuntimeEvent::Voting(Event::Voted { - who: 6, - vote: split_abstain(10, 0, 20), - })); - assert_eq!(tally(3), Tally::from_parts(1, 0, 40)); - - assert_ok!(Voting::vote(RuntimeOrigin::signed(6), 3, split_abstain(0, 0, 40))); - System::assert_last_event(tests::RuntimeEvent::Voting(Event::Voted { - who: 6, - vote: split_abstain(0, 0, 40), - })); - - assert_eq!(tally(3), Tally::from_parts(0, 0, 50)); + assert_ok!(Voting::vote(RuntimeOrigin::signed(2), 3, split_abstain(0, 0, 20))); + assert_eq!(tally(3), Tally::from_parts(0, 0, 30)); + assert_ok!(Voting::vote(RuntimeOrigin::signed(2), 3, split_abstain(10, 0, 10))); + assert_eq!(tally(3), Tally::from_parts(1, 0, 30)); assert_eq!(Balances::usable_balance(1), 0); - assert_eq!(Balances::usable_balance(6), 20); + assert_eq!(Balances::usable_balance(2), 0); assert_ok!(Voting::remove_vote(RuntimeOrigin::signed(1), None, 3)); - System::assert_last_event(tests::RuntimeEvent::Voting(Event::VoteRemoved { - who: 1, - vote: split_abstain(0, 0, 10), - })); - assert_eq!(tally(3), Tally::from_parts(0, 0, 40)); - - assert_ok!(Voting::remove_vote(RuntimeOrigin::signed(6), Some(class(3)), 3)); - System::assert_last_event(tests::RuntimeEvent::Voting(Event::VoteRemoved { - who: 6, - vote: split_abstain(0, 0, 40), - })); + assert_eq!(tally(3), Tally::from_parts(1, 0, 20)); + + assert_ok!(Voting::remove_vote(RuntimeOrigin::signed(2), None, 3)); assert_eq!(tally(3), Tally::from_parts(0, 0, 0)); assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), class(3), 1)); assert_eq!(Balances::usable_balance(1), 10); - assert_ok!(Voting::unlock(RuntimeOrigin::signed(6), class(3), 6)); - assert_eq!(Balances::usable_balance(6), 60); + assert_ok!(Voting::unlock(RuntimeOrigin::signed(2), class(3), 2)); + assert_eq!(Balances::usable_balance(2), 20); }); } diff --git a/substrate/frame/conviction-voting/src/types.rs b/substrate/frame/conviction-voting/src/types.rs index aa7dd578fbad..d6bbb678a14b 100644 --- a/substrate/frame/conviction-voting/src/types.rs +++ b/substrate/frame/conviction-voting/src/types.rs @@ -117,9 +117,14 @@ impl< pub fn from_parts( ayes_with_conviction: Votes, nays_with_conviction: Votes, - support: Votes, + ayes: Votes, ) -> Self { - Self { ayes: ayes_with_conviction, nays: nays_with_conviction, support, dummy: PhantomData } + Self { + ayes: ayes_with_conviction, + nays: nays_with_conviction, + support: ayes, + dummy: PhantomData, + } } /// Add an account's vote into the tally. diff --git a/substrate/frame/conviction-voting/src/weights.rs b/substrate/frame/conviction-voting/src/weights.rs index 1abcd83e7d5c..d8f3ffcb3be6 100644 --- a/substrate/frame/conviction-voting/src/weights.rs +++ b/substrate/frame/conviction-voting/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_conviction_voting` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-04-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -81,8 +81,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `13141` // Estimated: `219984` - // Minimum execution time: 135_295_000 picoseconds. - Weight::from_parts(142_897_000, 219984) + // Minimum execution time: 114_422_000 picoseconds. + Weight::from_parts(118_642_000, 219984) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } @@ -104,8 +104,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `20283` // Estimated: `219984` - // Minimum execution time: 324_485_000 picoseconds. - Weight::from_parts(337_467_000, 219984) + // Minimum execution time: 290_934_000 picoseconds. + Weight::from_parts(303_286_000, 219984) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } @@ -121,8 +121,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `20035` // Estimated: `219984` - // Minimum execution time: 302_574_000 picoseconds. - Weight::from_parts(315_016_000, 219984) + // Minimum execution time: 277_464_000 picoseconds. + Weight::from_parts(284_288_000, 219984) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -134,8 +134,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `12742` // Estimated: `30706` - // Minimum execution time: 65_548_000 picoseconds. - Weight::from_parts(71_499_000, 30706) + // Minimum execution time: 54_538_000 picoseconds. + Weight::from_parts(55_758_000, 30706) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -158,10 +158,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `306 + r * (1628 ±0)` // Estimated: `109992 + r * (109992 ±0)` - // Minimum execution time: 61_383_000 picoseconds. - Weight::from_parts(70_695_789, 109992) - // Standard Error: 457_836 - .saturating_add(Weight::from_parts(44_163_910, 0).saturating_mul(r.into())) + // Minimum execution time: 47_243_000 picoseconds. + Weight::from_parts(50_023_534, 109992) + // Standard Error: 228_993 + .saturating_add(Weight::from_parts(43_173_465, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(4_u64)) @@ -181,10 +181,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `472 + r * (1377 ±0)` // Estimated: `109992 + r * (109992 ±0)` - // Minimum execution time: 33_466_000 picoseconds. - Weight::from_parts(39_261_420, 109992) - // Standard Error: 358_545 - .saturating_add(Weight::from_parts(43_197_579, 0).saturating_mul(r.into())) + // Minimum execution time: 23_529_000 picoseconds. + Weight::from_parts(25_071_526, 109992) + // Standard Error: 138_190 + .saturating_add(Weight::from_parts(40_350_973, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(2_u64)) @@ -203,8 +203,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `11800` // Estimated: `30706` - // Minimum execution time: 87_030_000 picoseconds. - Weight::from_parts(91_851_000, 30706) + // Minimum execution time: 69_473_000 picoseconds. + Weight::from_parts(71_519_000, 30706) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -230,8 +230,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `13141` // Estimated: `219984` - // Minimum execution time: 135_295_000 picoseconds. - Weight::from_parts(142_897_000, 219984) + // Minimum execution time: 114_422_000 picoseconds. + Weight::from_parts(118_642_000, 219984) .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } @@ -253,8 +253,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `20283` // Estimated: `219984` - // Minimum execution time: 324_485_000 picoseconds. - Weight::from_parts(337_467_000, 219984) + // Minimum execution time: 290_934_000 picoseconds. + Weight::from_parts(303_286_000, 219984) .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } @@ -270,8 +270,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `20035` // Estimated: `219984` - // Minimum execution time: 302_574_000 picoseconds. - Weight::from_parts(315_016_000, 219984) + // Minimum execution time: 277_464_000 picoseconds. + Weight::from_parts(284_288_000, 219984) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -283,8 +283,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `12742` // Estimated: `30706` - // Minimum execution time: 65_548_000 picoseconds. - Weight::from_parts(71_499_000, 30706) + // Minimum execution time: 54_538_000 picoseconds. + Weight::from_parts(55_758_000, 30706) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -307,10 +307,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `306 + r * (1628 ±0)` // Estimated: `109992 + r * (109992 ±0)` - // Minimum execution time: 61_383_000 picoseconds. - Weight::from_parts(70_695_789, 109992) - // Standard Error: 457_836 - .saturating_add(Weight::from_parts(44_163_910, 0).saturating_mul(r.into())) + // Minimum execution time: 47_243_000 picoseconds. + Weight::from_parts(50_023_534, 109992) + // Standard Error: 228_993 + .saturating_add(Weight::from_parts(43_173_465, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().reads((3_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(4_u64)) @@ -330,10 +330,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `472 + r * (1377 ±0)` // Estimated: `109992 + r * (109992 ±0)` - // Minimum execution time: 33_466_000 picoseconds. - Weight::from_parts(39_261_420, 109992) - // Standard Error: 358_545 - .saturating_add(Weight::from_parts(43_197_579, 0).saturating_mul(r.into())) + // Minimum execution time: 23_529_000 picoseconds. + Weight::from_parts(25_071_526, 109992) + // Standard Error: 138_190 + .saturating_add(Weight::from_parts(40_350_973, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((3_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(2_u64)) @@ -352,8 +352,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `11800` // Estimated: `30706` - // Minimum execution time: 87_030_000 picoseconds. - Weight::from_parts(91_851_000, 30706) + // Minimum execution time: 69_473_000 picoseconds. + Weight::from_parts(71_519_000, 30706) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } diff --git a/substrate/frame/core-fellowship/Cargo.toml b/substrate/frame/core-fellowship/Cargo.toml index c0017f477251..3d73ec58d613 100644 --- a/substrate/frame/core-fellowship/Cargo.toml +++ b/substrate/frame/core-fellowship/Cargo.toml @@ -17,16 +17,16 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { features = ["derive"], workspace = true } +log = { workspace = true } +scale-info = { features = ["derive"], workspace = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } -log = { workspace = true } -pallet-ranked-collective = { optional = true, workspace = true } -scale-info = { features = ["derive"], workspace = true } sp-arithmetic = { workspace = true } sp-core = { workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } +pallet-ranked-collective = { optional = true, workspace = true } [features] default = ["std"] diff --git a/substrate/frame/core-fellowship/src/tests/integration.rs b/substrate/frame/core-fellowship/src/tests/integration.rs index 7a48ed9783e7..bcf70c7beb10 100644 --- a/substrate/frame/core-fellowship/src/tests/integration.rs +++ b/substrate/frame/core-fellowship/src/tests/integration.rs @@ -21,15 +21,15 @@ use frame_support::{ assert_noop, assert_ok, derive_impl, hypothetically, ord_parameter_types, pallet_prelude::Weight, parameter_types, - traits::{ConstU16, EitherOf, IsInVec, MapSuccess, NoOpPoll, TryMapSuccess}, + traits::{ConstU16, EitherOf, IsInVec, MapSuccess, PollStatus, Polling, TryMapSuccess}, }; use frame_system::EnsureSignedBy; -use pallet_ranked_collective::{EnsureRanked, Geometric, Rank}; +use pallet_ranked_collective::{EnsureRanked, Geometric, Rank, TallyOf, Votes}; use sp_core::{ConstU32, Get}; use sp_runtime::{ bounded_vec, traits::{Convert, ReduceBy, ReplaceWithDefault, TryMorphInto}, - BuildStorage, + BuildStorage, DispatchError, }; type Class = Rank; @@ -83,6 +83,45 @@ impl Config for Test { type MaxRank = ConstU32<9>; } +pub struct TestPolls; +impl Polling> for TestPolls { + type Index = u8; + type Votes = Votes; + type Moment = u64; + type Class = Class; + + fn classes() -> Vec { + unimplemented!() + } + fn as_ongoing(_: u8) -> Option<(TallyOf, Self::Class)> { + unimplemented!() + } + fn access_poll( + _: Self::Index, + _: impl FnOnce(PollStatus<&mut TallyOf, Self::Moment, Self::Class>) -> R, + ) -> R { + unimplemented!() + } + fn try_access_poll( + _: Self::Index, + _: impl FnOnce( + PollStatus<&mut TallyOf, Self::Moment, Self::Class>, + ) -> Result, + ) -> Result { + unimplemented!() + } + + #[cfg(feature = "runtime-benchmarks")] + fn create_ongoing(_: Self::Class) -> Result { + unimplemented!() + } + + #[cfg(feature = "runtime-benchmarks")] + fn end_ongoing(_: Self::Index, _: bool) -> Result<(), ()> { + unimplemented!() + } +} + /// Convert the tally class into the minimum rank required to vote on the poll. /// MinRank(Class) = Class - Delta pub struct MinRankOfClass(PhantomData); @@ -115,7 +154,7 @@ impl pallet_ranked_collective::Config for Test { // Members can exchange up to the rank of 2 below them. MapSuccess, ReduceBy>>, >; - type Polls = NoOpPoll; + type Polls = TestPolls; type MinRankOfClass = MinRankOfClass; type MemberSwappedHandler = CoreFellowship; type VoteWeight = Geometric; diff --git a/substrate/frame/core-fellowship/src/weights.rs b/substrate/frame/core-fellowship/src/weights.rs index 9bca8cb56094..5e64600b662b 100644 --- a/substrate/frame/core-fellowship/src/weights.rs +++ b/substrate/frame/core-fellowship/src/weights.rs @@ -18,27 +18,25 @@ //! Autogenerated weights for `pallet_core_fellowship` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-06-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-x5tnzzy-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate-node +// target/production/substrate-node // benchmark // pallet -// --chain=dev // --steps=50 // --repeat=20 -// --pallet=pallet_core_fellowship -// --no-storage-info -// --no-median-slopes -// --no-min-squares // --extrinsic=* // --wasm-execution=compiled // --heap-pages=4096 -// --output=./substrate/frame/core-fellowship/src/weights.rs +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_core_fellowship +// --chain=dev // --header=./substrate/HEADER-APACHE2 +// --output=./substrate/frame/core-fellowship/src/weights.rs // --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -74,8 +72,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_652_000 picoseconds. - Weight::from_parts(7_082_000, 0) + // Minimum execution time: 5_772_000 picoseconds. + Weight::from_parts(6_000_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `CoreFellowship::Params` (r:1 w:1) @@ -84,8 +82,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `399` // Estimated: `1853` - // Minimum execution time: 12_485_000 picoseconds. - Weight::from_parts(12_784_000, 1853) + // Minimum execution time: 10_050_000 picoseconds. + Weight::from_parts(10_244_000, 1853) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -107,8 +105,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `17278` // Estimated: `19894` - // Minimum execution time: 61_243_000 picoseconds. - Weight::from_parts(63_033_000, 19894) + // Minimum execution time: 54_433_000 picoseconds. + Weight::from_parts(55_650_000, 19894) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -130,8 +128,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `17388` // Estimated: `19894` - // Minimum execution time: 65_063_000 picoseconds. - Weight::from_parts(67_047_000, 19894) + // Minimum execution time: 57_634_000 picoseconds. + Weight::from_parts(58_816_000, 19894) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -143,8 +141,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `388` // Estimated: `3514` - // Minimum execution time: 21_924_000 picoseconds. - Weight::from_parts(22_691_000, 3514) + // Minimum execution time: 14_527_000 picoseconds. + Weight::from_parts(14_948_000, 3514) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -162,8 +160,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `146` // Estimated: `3514` - // Minimum execution time: 24_720_000 picoseconds. - Weight::from_parts(25_580_000, 3514) + // Minimum execution time: 22_137_000 picoseconds. + Weight::from_parts(22_925_000, 3514) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -185,8 +183,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `16931` // Estimated: `19894` - // Minimum execution time: 58_481_000 picoseconds. - Weight::from_parts(59_510_000, 19894) + // Minimum execution time: 51_837_000 picoseconds. + Weight::from_parts(52_810_000, 19894) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -207,10 +205,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `16844` // Estimated: `19894 + r * (2489 ±0)` - // Minimum execution time: 53_570_000 picoseconds. - Weight::from_parts(42_220_685, 19894) - // Standard Error: 18_061 - .saturating_add(Weight::from_parts(13_858_309, 0).saturating_mul(r.into())) + // Minimum execution time: 45_065_000 picoseconds. + Weight::from_parts(34_090_392, 19894) + // Standard Error: 18_620 + .saturating_add(Weight::from_parts(13_578_046, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) @@ -227,8 +225,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `293` // Estimated: `3514` - // Minimum execution time: 17_492_000 picoseconds. - Weight::from_parts(18_324_000, 3514) + // Minimum execution time: 14_321_000 picoseconds. + Weight::from_parts(14_747_000, 3514) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -240,8 +238,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `313` // Estimated: `3514` - // Minimum execution time: 16_534_000 picoseconds. - Weight::from_parts(17_046_000, 3514) + // Minimum execution time: 13_525_000 picoseconds. + Weight::from_parts(13_843_000, 3514) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -255,8 +253,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `16843` // Estimated: `19894` - // Minimum execution time: 42_264_000 picoseconds. - Weight::from_parts(43_281_000, 19894) + // Minimum execution time: 34_719_000 picoseconds. + Weight::from_parts(35_162_000, 19894) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -268,8 +266,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `79` // Estimated: `19894` - // Minimum execution time: 25_461_000 picoseconds. - Weight::from_parts(26_014_000, 19894) + // Minimum execution time: 23_477_000 picoseconds. + Weight::from_parts(23_897_000, 19894) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -283,8 +281,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_652_000 picoseconds. - Weight::from_parts(7_082_000, 0) + // Minimum execution time: 5_772_000 picoseconds. + Weight::from_parts(6_000_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `CoreFellowship::Params` (r:1 w:1) @@ -293,8 +291,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `399` // Estimated: `1853` - // Minimum execution time: 12_485_000 picoseconds. - Weight::from_parts(12_784_000, 1853) + // Minimum execution time: 10_050_000 picoseconds. + Weight::from_parts(10_244_000, 1853) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -316,8 +314,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `17278` // Estimated: `19894` - // Minimum execution time: 61_243_000 picoseconds. - Weight::from_parts(63_033_000, 19894) + // Minimum execution time: 54_433_000 picoseconds. + Weight::from_parts(55_650_000, 19894) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -339,8 +337,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `17388` // Estimated: `19894` - // Minimum execution time: 65_063_000 picoseconds. - Weight::from_parts(67_047_000, 19894) + // Minimum execution time: 57_634_000 picoseconds. + Weight::from_parts(58_816_000, 19894) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -352,8 +350,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `388` // Estimated: `3514` - // Minimum execution time: 21_924_000 picoseconds. - Weight::from_parts(22_691_000, 3514) + // Minimum execution time: 14_527_000 picoseconds. + Weight::from_parts(14_948_000, 3514) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -371,8 +369,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `146` // Estimated: `3514` - // Minimum execution time: 24_720_000 picoseconds. - Weight::from_parts(25_580_000, 3514) + // Minimum execution time: 22_137_000 picoseconds. + Weight::from_parts(22_925_000, 3514) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -394,8 +392,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `16931` // Estimated: `19894` - // Minimum execution time: 58_481_000 picoseconds. - Weight::from_parts(59_510_000, 19894) + // Minimum execution time: 51_837_000 picoseconds. + Weight::from_parts(52_810_000, 19894) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -416,10 +414,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `16844` // Estimated: `19894 + r * (2489 ±0)` - // Minimum execution time: 53_570_000 picoseconds. - Weight::from_parts(42_220_685, 19894) - // Standard Error: 18_061 - .saturating_add(Weight::from_parts(13_858_309, 0).saturating_mul(r.into())) + // Minimum execution time: 45_065_000 picoseconds. + Weight::from_parts(34_090_392, 19894) + // Standard Error: 18_620 + .saturating_add(Weight::from_parts(13_578_046, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) @@ -436,8 +434,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `293` // Estimated: `3514` - // Minimum execution time: 17_492_000 picoseconds. - Weight::from_parts(18_324_000, 3514) + // Minimum execution time: 14_321_000 picoseconds. + Weight::from_parts(14_747_000, 3514) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -449,8 +447,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `313` // Estimated: `3514` - // Minimum execution time: 16_534_000 picoseconds. - Weight::from_parts(17_046_000, 3514) + // Minimum execution time: 13_525_000 picoseconds. + Weight::from_parts(13_843_000, 3514) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -464,8 +462,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `16843` // Estimated: `19894` - // Minimum execution time: 42_264_000 picoseconds. - Weight::from_parts(43_281_000, 19894) + // Minimum execution time: 34_719_000 picoseconds. + Weight::from_parts(35_162_000, 19894) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -477,8 +475,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `79` // Estimated: `19894` - // Minimum execution time: 25_461_000 picoseconds. - Weight::from_parts(26_014_000, 19894) + // Minimum execution time: 23_477_000 picoseconds. + Weight::from_parts(23_897_000, 19894) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/substrate/frame/delegated-staking/Cargo.toml b/substrate/frame/delegated-staking/Cargo.toml index 576276dced52..8d5ccd342b6b 100644 --- a/substrate/frame/delegated-staking/Cargo.toml +++ b/substrate/frame/delegated-staking/Cargo.toml @@ -15,23 +15,23 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { features = ["derive"], workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } -log = { workspace = true } scale-info = { features = ["derive"], workspace = true } -sp-io = { workspace = true } sp-runtime = { workspace = true } sp-staking = { workspace = true } +sp-io = { workspace = true } +log = { workspace = true } [dev-dependencies] -frame-election-provider-support = { workspace = true } -pallet-balances = { workspace = true, default-features = true } -pallet-nomination-pools = { workspace = true, default-features = true } -pallet-staking = { workspace = true, default-features = true } -pallet-staking-reward-curve = { workspace = true, default-features = true } -pallet-timestamp = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } sp-io = { workspace = true, default-features = true } -sp-tracing = { workspace = true, default-features = true } substrate-test-utils = { workspace = true } +sp-tracing = { workspace = true, default-features = true } +pallet-staking = { workspace = true, default-features = true } +pallet-nomination-pools = { workspace = true, default-features = true } +pallet-balances = { workspace = true, default-features = true } +pallet-timestamp = { workspace = true, default-features = true } +pallet-staking-reward-curve = { workspace = true, default-features = true } +frame-election-provider-support = { workspace = true } [features] default = ["std"] diff --git a/substrate/frame/democracy/Cargo.toml b/substrate/frame/democracy/Cargo.toml index 189d64ccaa74..3cfea8bb3129 100644 --- a/substrate/frame/democracy/Cargo.toml +++ b/substrate/frame/democracy/Cargo.toml @@ -19,20 +19,20 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { features = [ "derive", ], workspace = true } +scale-info = { features = ["derive"], workspace = true } +serde = { features = ["derive"], optional = true, workspace = true, default-features = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } -log = { workspace = true } -scale-info = { features = ["derive"], workspace = true } -serde = { features = ["derive"], optional = true, workspace = true, default-features = true } -sp-core = { workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } +sp-core = { workspace = true } +log = { workspace = true } [dev-dependencies] pallet-balances = { workspace = true, default-features = true } -pallet-preimage = { workspace = true, default-features = true } pallet-scheduler = { workspace = true, default-features = true } +pallet-preimage = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/frame/democracy/src/benchmarking.rs b/substrate/frame/democracy/src/benchmarking.rs index f9c810e56192..ee36e9212f52 100644 --- a/substrate/frame/democracy/src/benchmarking.rs +++ b/substrate/frame/democracy/src/benchmarking.rs @@ -17,11 +17,9 @@ //! Democracy pallet benchmarking. -#![cfg(feature = "runtime-benchmarks")] - use super::*; -use frame_benchmarking::v2::*; +use frame_benchmarking::v1::{account, benchmarks, whitelist_account, BenchmarkError}; use frame_support::{ assert_noop, assert_ok, traits::{Currency, EnsureOrigin, Get, OnInitialize, UnfilteredDispatchable}, @@ -96,15 +94,11 @@ fn note_preimage() -> T::Hash { hash } -#[benchmarks] -mod benchmarks { - use super::*; - - #[benchmark] - fn propose() -> Result<(), BenchmarkError> { +benchmarks! { + propose { let p = T::MaxProposals::get(); - for i in 0..(p - 1) { + for i in 0 .. (p - 1) { add_proposal::(i)?; } @@ -112,22 +106,18 @@ mod benchmarks { let proposal = make_proposal::(0); let value = T::MinimumDeposit::get(); whitelist_account!(caller); - - #[extrinsic_call] - _(RawOrigin::Signed(caller), proposal, value); - + }: _(RawOrigin::Signed(caller), proposal, value) + verify { assert_eq!(PublicProps::::get().len(), p as usize, "Proposals not created."); - Ok(()) } - #[benchmark] - fn second() -> Result<(), BenchmarkError> { + second { let caller = funded_account::("caller", 0); add_proposal::(0)?; // Create s existing "seconds" // we must reserve one deposit for the `proposal` and one for our benchmarked `second` call. - for i in 0..T::MaxDeposits::get() - 2 { + for i in 0 .. T::MaxDeposits::get() - 2 { let seconder = funded_account::("seconder", i); Democracy::::second(RawOrigin::Signed(seconder).into(), 0)?; } @@ -135,32 +125,20 @@ mod benchmarks { let deposits = DepositOf::::get(0).ok_or("Proposal not created")?; assert_eq!(deposits.0.len(), (T::MaxDeposits::get() - 1) as usize, "Seconds not recorded"); whitelist_account!(caller); - - #[extrinsic_call] - _(RawOrigin::Signed(caller), 0); - + }: _(RawOrigin::Signed(caller), 0) + verify { let deposits = DepositOf::::get(0).ok_or("Proposal not created")?; - assert_eq!( - deposits.0.len(), - (T::MaxDeposits::get()) as usize, - "`second` benchmark did not work" - ); - Ok(()) + assert_eq!(deposits.0.len(), (T::MaxDeposits::get()) as usize, "`second` benchmark did not work"); } - #[benchmark] - fn vote_new() -> Result<(), BenchmarkError> { + vote_new { let caller = funded_account::("caller", 0); let account_vote = account_vote::(100u32.into()); // We need to create existing direct votes - for i in 0..T::MaxVotes::get() - 1 { + for i in 0 .. T::MaxVotes::get() - 1 { let ref_index = add_referendum::(i).0; - Democracy::::vote( - RawOrigin::Signed(caller.clone()).into(), - ref_index, - account_vote, - )?; + Democracy::::vote(RawOrigin::Signed(caller.clone()).into(), ref_index, account_vote)?; } let votes = match VotingOf::::get(&caller) { Voting::Direct { votes, .. } => votes, @@ -170,32 +148,23 @@ mod benchmarks { let ref_index = add_referendum::(T::MaxVotes::get() - 1).0; whitelist_account!(caller); - - #[extrinsic_call] - vote(RawOrigin::Signed(caller.clone()), ref_index, account_vote); - + }: vote(RawOrigin::Signed(caller.clone()), ref_index, account_vote) + verify { let votes = match VotingOf::::get(&caller) { Voting::Direct { votes, .. } => votes, _ => return Err("Votes are not direct".into()), }; - assert_eq!(votes.len(), T::MaxVotes::get() as usize, "Vote was not recorded."); - Ok(()) } - #[benchmark] - fn vote_existing() -> Result<(), BenchmarkError> { + vote_existing { let caller = funded_account::("caller", 0); let account_vote = account_vote::(100u32.into()); // We need to create existing direct votes for i in 0..T::MaxVotes::get() { let ref_index = add_referendum::(i).0; - Democracy::::vote( - RawOrigin::Signed(caller.clone()).into(), - ref_index, - account_vote, - )?; + Democracy::::vote(RawOrigin::Signed(caller.clone()).into(), ref_index, account_vote)?; } let votes = match VotingOf::::get(&caller) { Voting::Direct { votes, .. } => votes, @@ -210,50 +179,43 @@ mod benchmarks { // This tests when a user changes a vote whitelist_account!(caller); - - #[extrinsic_call] - vote(RawOrigin::Signed(caller.clone()), ref_index, new_vote); - + }: vote(RawOrigin::Signed(caller.clone()), ref_index, new_vote) + verify { let votes = match VotingOf::::get(&caller) { Voting::Direct { votes, .. } => votes, _ => return Err("Votes are not direct".into()), }; assert_eq!(votes.len(), T::MaxVotes::get() as usize, "Vote was incorrectly added"); - let referendum_info = - ReferendumInfoOf::::get(ref_index).ok_or("referendum doesn't exist")?; - let tally = match referendum_info { + let referendum_info = ReferendumInfoOf::::get(ref_index) + .ok_or("referendum doesn't exist")?; + let tally = match referendum_info { ReferendumInfo::Ongoing(r) => r.tally, _ => return Err("referendum not ongoing".into()), }; assert_eq!(tally.nays, 1000u32.into(), "changed vote was not recorded"); - Ok(()) } - #[benchmark] - fn emergency_cancel() -> Result<(), BenchmarkError> { - let origin = T::CancellationOrigin::try_successful_origin() - .map_err(|_| BenchmarkError::Weightless)?; + emergency_cancel { + let origin = + T::CancellationOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; let (ref_index, _, preimage_hash) = add_referendum::(0); assert_ok!(Democracy::::referendum_status(ref_index)); - - #[extrinsic_call] - _(origin as T::RuntimeOrigin, ref_index); + }: _(origin, ref_index) + verify { // Referendum has been canceled - assert_noop!(Democracy::::referendum_status(ref_index), Error::::ReferendumInvalid,); - assert_last_event::( - crate::Event::MetadataCleared { - owner: MetadataOwner::Referendum(ref_index), - hash: preimage_hash, - } - .into(), + assert_noop!( + Democracy::::referendum_status(ref_index), + Error::::ReferendumInvalid, ); - Ok(()) + assert_last_event::(crate::Event::MetadataCleared { + owner: MetadataOwner::Referendum(ref_index), + hash: preimage_hash, + }.into()); } - #[benchmark] - fn blacklist() -> Result<(), BenchmarkError> { + blacklist { // Place our proposal at the end to make sure it's worst case. - for i in 0..T::MaxProposals::get() - 1 { + for i in 0 .. T::MaxProposals::get() - 1 { add_proposal::(i)?; } // We should really add a lot of seconds here, but we're not doing it elsewhere. @@ -269,24 +231,21 @@ mod benchmarks { )); let origin = T::BlacklistOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; - #[extrinsic_call] - _(origin as T::RuntimeOrigin, hash, Some(ref_index)); - + }: _(origin, hash, Some(ref_index)) + verify { // Referendum has been canceled - assert_noop!(Democracy::::referendum_status(ref_index), Error::::ReferendumInvalid); - assert_has_event::( - crate::Event::MetadataCleared { - owner: MetadataOwner::Referendum(ref_index), - hash: preimage_hash, - } - .into(), + assert_noop!( + Democracy::::referendum_status(ref_index), + Error::::ReferendumInvalid ); - Ok(()) + assert_has_event::(crate::Event::MetadataCleared { + owner: MetadataOwner::Referendum(ref_index), + hash: preimage_hash, + }.into()); } // Worst case scenario, we external propose a previously blacklisted proposal - #[benchmark] - fn external_propose() -> Result<(), BenchmarkError> { + external_propose { let origin = T::ExternalOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; let proposal = make_proposal::(0); @@ -299,42 +258,33 @@ mod benchmarks { .try_into() .unwrap(); Blacklist::::insert(proposal.hash(), (BlockNumberFor::::zero(), addresses)); - #[extrinsic_call] - _(origin as T::RuntimeOrigin, proposal); - + }: _(origin, proposal) + verify { // External proposal created ensure!(NextExternal::::exists(), "External proposal didn't work"); - Ok(()) } - #[benchmark] - fn external_propose_majority() -> Result<(), BenchmarkError> { + external_propose_majority { let origin = T::ExternalMajorityOrigin::try_successful_origin() .map_err(|_| BenchmarkError::Weightless)?; let proposal = make_proposal::(0); - #[extrinsic_call] - _(origin as T::RuntimeOrigin, proposal); - + }: _(origin, proposal) + verify { // External proposal created ensure!(NextExternal::::exists(), "External proposal didn't work"); - Ok(()) } - #[benchmark] - fn external_propose_default() -> Result<(), BenchmarkError> { + external_propose_default { let origin = T::ExternalDefaultOrigin::try_successful_origin() .map_err(|_| BenchmarkError::Weightless)?; let proposal = make_proposal::(0); - #[extrinsic_call] - _(origin as T::RuntimeOrigin, proposal); - + }: _(origin, proposal) + verify { // External proposal created ensure!(NextExternal::::exists(), "External proposal didn't work"); - Ok(()) } - #[benchmark] - fn fast_track() -> Result<(), BenchmarkError> { + fast_track { let origin_propose = T::ExternalDefaultOrigin::try_successful_origin() .expect("ExternalDefaultOrigin has no successful origin required for the benchmark"); let proposal = make_proposal::(0); @@ -345,30 +295,23 @@ mod benchmarks { assert_ok!(Democracy::::set_metadata( origin_propose, MetadataOwner::External, - Some(preimage_hash) - )); + Some(preimage_hash))); // NOTE: Instant origin may invoke a little bit more logic, but may not always succeed. let origin_fast_track = T::FastTrackOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; let voting_period = T::FastTrackVotingPeriod::get(); let delay = 0u32; - #[extrinsic_call] - _(origin_fast_track as T::RuntimeOrigin, proposal_hash, voting_period, delay.into()); - + }: _(origin_fast_track, proposal_hash, voting_period, delay.into()) + verify { assert_eq!(ReferendumCount::::get(), 1, "referendum not created"); - assert_last_event::( - crate::Event::MetadataTransferred { - prev_owner: MetadataOwner::External, - owner: MetadataOwner::Referendum(0), - hash: preimage_hash, - } - .into(), - ); - Ok(()) + assert_last_event::(crate::Event::MetadataTransferred { + prev_owner: MetadataOwner::External, + owner: MetadataOwner::Referendum(0), + hash: preimage_hash, + }.into()); } - #[benchmark] - fn veto_external() -> Result<(), BenchmarkError> { + veto_external { let proposal = make_proposal::(0); let proposal_hash = proposal.hash(); @@ -380,32 +323,28 @@ mod benchmarks { assert_ok!(Democracy::::set_metadata( origin_propose, MetadataOwner::External, - Some(preimage_hash) - )); + Some(preimage_hash)) + ); let mut vetoers: BoundedVec = Default::default(); - for i in 0..(T::MaxBlacklisted::get() - 1) { + for i in 0 .. (T::MaxBlacklisted::get() - 1) { vetoers.try_push(account::("vetoer", i, SEED)).unwrap(); } vetoers.sort(); Blacklist::::insert(proposal_hash, (BlockNumberFor::::zero(), vetoers)); - let origin = - T::VetoOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; + let origin = T::VetoOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; ensure!(NextExternal::::get().is_some(), "no external proposal"); - #[extrinsic_call] - _(origin as T::RuntimeOrigin, proposal_hash); - + }: _(origin, proposal_hash) + verify { assert!(NextExternal::::get().is_none()); let (_, new_vetoers) = Blacklist::::get(&proposal_hash).ok_or("no blacklist")?; assert_eq!(new_vetoers.len(), T::MaxBlacklisted::get() as usize, "vetoers not added"); - Ok(()) } - #[benchmark] - fn cancel_proposal() -> Result<(), BenchmarkError> { + cancel_proposal { // Place our proposal at the end to make sure it's worst case. - for i in 0..T::MaxProposals::get() { + for i in 0 .. T::MaxProposals::get() { add_proposal::(i)?; } // Add metadata to the first proposal. @@ -414,41 +353,31 @@ mod benchmarks { assert_ok!(Democracy::::set_metadata( RawOrigin::Signed(proposer).into(), MetadataOwner::Proposal(0), - Some(preimage_hash) - )); + Some(preimage_hash))); let cancel_origin = T::CancelProposalOrigin::try_successful_origin() .map_err(|_| BenchmarkError::Weightless)?; - #[extrinsic_call] - _(cancel_origin as T::RuntimeOrigin, 0); - - assert_last_event::( - crate::Event::MetadataCleared { - owner: MetadataOwner::Proposal(0), - hash: preimage_hash, - } - .into(), - ); - Ok(()) + }: _(cancel_origin, 0) + verify { + assert_last_event::(crate::Event::MetadataCleared { + owner: MetadataOwner::Proposal(0), + hash: preimage_hash, + }.into()); } - #[benchmark] - fn cancel_referendum() -> Result<(), BenchmarkError> { + cancel_referendum { let (ref_index, _, preimage_hash) = add_referendum::(0); - #[extrinsic_call] - _(RawOrigin::Root, ref_index); - - assert_last_event::( - crate::Event::MetadataCleared { - owner: MetadataOwner::Referendum(0), - hash: preimage_hash, - } - .into(), - ); - Ok(()) + }: _(RawOrigin::Root, ref_index) + verify { + assert_last_event::(crate::Event::MetadataCleared { + owner: MetadataOwner::Referendum(0), + hash: preimage_hash, + }.into()); } - #[benchmark(extra)] - fn on_initialize_external(r: Linear<0, REFERENDUM_COUNT_HINT>) -> Result<(), BenchmarkError> { + #[extra] + on_initialize_external { + let r in 0 .. REFERENDUM_COUNT_HINT; + for i in 0..r { add_referendum::(i); } @@ -468,17 +397,14 @@ mod benchmarks { let block_number = T::LaunchPeriod::get(); - #[block] - { - Democracy::::on_initialize(block_number); - } - + }: { Democracy::::on_initialize(block_number) } + verify { // One extra because of next external assert_eq!(ReferendumCount::::get(), r + 1, "referenda not created"); ensure!(!NextExternal::::exists(), "External wasn't taken"); // All but the new next external should be finished - for i in 0..r { + for i in 0 .. r { if let Some(value) = ReferendumInfoOf::::get(i) { match value { ReferendumInfo::Finished { .. } => (), @@ -486,13 +412,12 @@ mod benchmarks { } } } - Ok(()) } - #[benchmark(extra)] - fn on_initialize_public( - r: Linear<0, { T::MaxVotes::get() - 1 }>, - ) -> Result<(), BenchmarkError> { + #[extra] + on_initialize_public { + let r in 0 .. (T::MaxVotes::get() - 1); + for i in 0..r { add_referendum::(i); } @@ -505,16 +430,13 @@ mod benchmarks { let block_number = T::LaunchPeriod::get(); - #[block] - { - Democracy::::on_initialize(block_number); - } - + }: { Democracy::::on_initialize(block_number) } + verify { // One extra because of next public assert_eq!(ReferendumCount::::get(), r + 1, "proposal not accepted"); // All should be finished - for i in 0..r { + for i in 0 .. r { if let Some(value) = ReferendumInfoOf::::get(i) { match value { ReferendumInfo::Finished { .. } => (), @@ -522,12 +444,12 @@ mod benchmarks { } } } - Ok(()) } // No launch no maturing referenda. - #[benchmark] - fn on_initialize_base(r: Linear<0, { T::MaxVotes::get() - 1 }>) -> Result<(), BenchmarkError> { + on_initialize_base { + let r in 0 .. (T::MaxVotes::get() - 1); + for i in 0..r { add_referendum::(i); } @@ -542,28 +464,22 @@ mod benchmarks { assert_eq!(ReferendumCount::::get(), r, "referenda not created"); assert_eq!(LowestUnbaked::::get(), 0, "invalid referenda init"); - #[block] - { - Democracy::::on_initialize(1u32.into()); - } - + }: { Democracy::::on_initialize(1u32.into()) } + verify { // All should be on going - for i in 0..r { + for i in 0 .. r { if let Some(value) = ReferendumInfoOf::::get(i) { match value { - ReferendumInfo::Finished { .. } => - return Err("Referendum has been finished".into()), + ReferendumInfo::Finished { .. } => return Err("Referendum has been finished".into()), ReferendumInfo::Ongoing(_) => (), } } } - Ok(()) } - #[benchmark] - fn on_initialize_base_with_launch_period( - r: Linear<0, { T::MaxVotes::get() - 1 }>, - ) -> Result<(), BenchmarkError> { + on_initialize_base_with_launch_period { + let r in 0 .. (T::MaxVotes::get() - 1); + for i in 0..r { add_referendum::(i); } @@ -580,26 +496,22 @@ mod benchmarks { let block_number = T::LaunchPeriod::get(); - #[block] - { - Democracy::::on_initialize(block_number); - } - + }: { Democracy::::on_initialize(block_number) } + verify { // All should be on going - for i in 0..r { + for i in 0 .. r { if let Some(value) = ReferendumInfoOf::::get(i) { match value { - ReferendumInfo::Finished { .. } => - return Err("Referendum has been finished".into()), + ReferendumInfo::Finished { .. } => return Err("Referendum has been finished".into()), ReferendumInfo::Ongoing(_) => (), } } } - Ok(()) } - #[benchmark] - fn delegate(r: Linear<0, { T::MaxVotes::get() - 1 }>) -> Result<(), BenchmarkError> { + delegate { + let r in 0 .. (T::MaxVotes::get() - 1); + let initial_balance: BalanceOf = 100u32.into(); let delegated_balance: BalanceOf = 1000u32.into(); @@ -626,11 +538,7 @@ mod benchmarks { // We need to create existing direct votes for the `new_delegate` for i in 0..r { let ref_index = add_referendum::(i).0; - Democracy::::vote( - RawOrigin::Signed(new_delegate.clone()).into(), - ref_index, - account_vote, - )?; + Democracy::::vote(RawOrigin::Signed(new_delegate.clone()).into(), ref_index, account_vote)?; } let votes = match VotingOf::::get(&new_delegate) { Voting::Direct { votes, .. } => votes, @@ -638,15 +546,8 @@ mod benchmarks { }; assert_eq!(votes.len(), r as usize, "Votes were not recorded."); whitelist_account!(caller); - - #[extrinsic_call] - _( - RawOrigin::Signed(caller.clone()), - new_delegate_lookup, - Conviction::Locked1x, - delegated_balance, - ); - + }: _(RawOrigin::Signed(caller.clone()), new_delegate_lookup, Conviction::Locked1x, delegated_balance) + verify { let (target, balance) = match VotingOf::::get(&caller) { Voting::Delegating { target, balance, .. } => (target, balance), _ => return Err("Votes are not direct".into()), @@ -658,11 +559,11 @@ mod benchmarks { _ => return Err("Votes are not direct".into()), }; assert_eq!(delegations.capital, delegated_balance, "delegation was not recorded."); - Ok(()) } - #[benchmark] - fn undelegate(r: Linear<0, { T::MaxVotes::get() - 1 }>) -> Result<(), BenchmarkError> { + undelegate { + let r in 0 .. (T::MaxVotes::get() - 1); + let initial_balance: BalanceOf = 100u32.into(); let delegated_balance: BalanceOf = 1000u32.into(); @@ -689,7 +590,7 @@ mod benchmarks { Democracy::::vote( RawOrigin::Signed(the_delegate.clone()).into(), ref_index, - account_vote, + account_vote )?; } let votes = match VotingOf::::get(&the_delegate) { @@ -698,38 +599,31 @@ mod benchmarks { }; assert_eq!(votes.len(), r as usize, "Votes were not recorded."); whitelist_account!(caller); - - #[extrinsic_call] - _(RawOrigin::Signed(caller.clone())); - + }: _(RawOrigin::Signed(caller.clone())) + verify { // Voting should now be direct match VotingOf::::get(&caller) { Voting::Direct { .. } => (), _ => return Err("undelegation failed".into()), } - Ok(()) } - #[benchmark] - fn clear_public_proposals() -> Result<(), BenchmarkError> { + clear_public_proposals { add_proposal::(0)?; - #[extrinsic_call] - _(RawOrigin::Root); - - Ok(()) - } + }: _(RawOrigin::Root) // Test when unlock will remove locks - #[benchmark] - fn unlock_remove(r: Linear<0, { T::MaxVotes::get() - 1 }>) -> Result<(), BenchmarkError> { + unlock_remove { + let r in 0 .. (T::MaxVotes::get() - 1); + let locker = funded_account::("locker", 0); let locker_lookup = T::Lookup::unlookup(locker.clone()); // Populate votes so things are locked let base_balance: BalanceOf = 100u32.into(); let small_vote = account_vote::(base_balance); // Vote and immediately unvote - for i in 0..r { + for i in 0 .. r { let ref_index = add_referendum::(i).0; Democracy::::vote(RawOrigin::Signed(locker.clone()).into(), ref_index, small_vote)?; Democracy::::remove_vote(RawOrigin::Signed(locker.clone()).into(), ref_index)?; @@ -737,25 +631,23 @@ mod benchmarks { let caller = funded_account::("caller", 0); whitelist_account!(caller); - - #[extrinsic_call] - unlock(RawOrigin::Signed(caller), locker_lookup); - + }: unlock(RawOrigin::Signed(caller), locker_lookup) + verify { // Note that we may want to add a `get_lock` api to actually verify let voting = VotingOf::::get(&locker); assert_eq!(voting.locked_balance(), BalanceOf::::zero()); - Ok(()) } // Test when unlock will set a new value - #[benchmark] - fn unlock_set(r: Linear<0, { T::MaxVotes::get() - 1 }>) -> Result<(), BenchmarkError> { + unlock_set { + let r in 0 .. (T::MaxVotes::get() - 1); + let locker = funded_account::("locker", 0); let locker_lookup = T::Lookup::unlookup(locker.clone()); // Populate votes so things are locked let base_balance: BalanceOf = 100u32.into(); let small_vote = account_vote::(base_balance); - for i in 0..r { + for i in 0 .. r { let ref_index = add_referendum::(i).0; Democracy::::vote(RawOrigin::Signed(locker.clone()).into(), ref_index, small_vote)?; } @@ -778,10 +670,8 @@ mod benchmarks { let caller = funded_account::("caller", 0); whitelist_account!(caller); - - #[extrinsic_call] - unlock(RawOrigin::Signed(caller), locker_lookup); - + }: unlock(RawOrigin::Signed(caller), locker_lookup) + verify { let votes = match VotingOf::::get(&locker) { Voting::Direct { votes, .. } => votes, _ => return Err("Votes are not direct".into()), @@ -791,21 +681,17 @@ mod benchmarks { let voting = VotingOf::::get(&locker); // Note that we may want to add a `get_lock` api to actually verify assert_eq!(voting.locked_balance(), if r > 0 { base_balance } else { 0u32.into() }); - Ok(()) } - #[benchmark] - fn remove_vote(r: Linear<1, { T::MaxVotes::get() }>) -> Result<(), BenchmarkError> { + remove_vote { + let r in 1 .. T::MaxVotes::get(); + let caller = funded_account::("caller", 0); let account_vote = account_vote::(100u32.into()); - for i in 0..r { + for i in 0 .. r { let ref_index = add_referendum::(i).0; - Democracy::::vote( - RawOrigin::Signed(caller.clone()).into(), - ref_index, - account_vote, - )?; + Democracy::::vote(RawOrigin::Signed(caller.clone()).into(), ref_index, account_vote)?; } let votes = match VotingOf::::get(&caller) { @@ -816,32 +702,26 @@ mod benchmarks { let ref_index = r - 1; whitelist_account!(caller); - - #[extrinsic_call] - _(RawOrigin::Signed(caller.clone()), ref_index); - + }: _(RawOrigin::Signed(caller.clone()), ref_index) + verify { let votes = match VotingOf::::get(&caller) { Voting::Direct { votes, .. } => votes, _ => return Err("Votes are not direct".into()), }; assert_eq!(votes.len(), (r - 1) as usize, "Vote was not removed"); - Ok(()) } // Worst case is when target == caller and referendum is ongoing - #[benchmark] - fn remove_other_vote(r: Linear<1, { T::MaxVotes::get() }>) -> Result<(), BenchmarkError> { + remove_other_vote { + let r in 1 .. T::MaxVotes::get(); + let caller = funded_account::("caller", r); let caller_lookup = T::Lookup::unlookup(caller.clone()); let account_vote = account_vote::(100u32.into()); - for i in 0..r { + for i in 0 .. r { let ref_index = add_referendum::(i).0; - Democracy::::vote( - RawOrigin::Signed(caller.clone()).into(), - ref_index, - account_vote, - )?; + Democracy::::vote(RawOrigin::Signed(caller.clone()).into(), ref_index, account_vote)?; } let votes = match VotingOf::::get(&caller) { @@ -852,71 +732,68 @@ mod benchmarks { let ref_index = r - 1; whitelist_account!(caller); - - #[extrinsic_call] - _(RawOrigin::Signed(caller.clone()), caller_lookup, ref_index); - + }: _(RawOrigin::Signed(caller.clone()), caller_lookup, ref_index) + verify { let votes = match VotingOf::::get(&caller) { Voting::Direct { votes, .. } => votes, _ => return Err("Votes are not direct".into()), }; assert_eq!(votes.len(), (r - 1) as usize, "Vote was not removed"); - Ok(()) } - #[benchmark] - fn set_external_metadata() -> Result<(), BenchmarkError> { + set_external_metadata { let origin = T::ExternalOrigin::try_successful_origin() .expect("ExternalOrigin has no successful origin required for the benchmark"); - assert_ok!(Democracy::::external_propose(origin.clone(), make_proposal::(0))); + assert_ok!( + Democracy::::external_propose(origin.clone(), make_proposal::(0)) + ); let owner = MetadataOwner::External; let hash = note_preimage::(); - - #[extrinsic_call] - set_metadata(origin as T::RuntimeOrigin, owner.clone(), Some(hash)); - - assert_last_event::(crate::Event::MetadataSet { owner, hash }.into()); - Ok(()) + }: set_metadata(origin, owner.clone(), Some(hash)) + verify { + assert_last_event::(crate::Event::MetadataSet { + owner, + hash, + }.into()); } - #[benchmark] - fn clear_external_metadata() -> Result<(), BenchmarkError> { + clear_external_metadata { let origin = T::ExternalOrigin::try_successful_origin() .expect("ExternalOrigin has no successful origin required for the benchmark"); - assert_ok!(Democracy::::external_propose(origin.clone(), make_proposal::(0))); + assert_ok!( + Democracy::::external_propose(origin.clone(), make_proposal::(0)) + ); let owner = MetadataOwner::External; - let _proposer = funded_account::("proposer", 0); + let proposer = funded_account::("proposer", 0); let hash = note_preimage::(); assert_ok!(Democracy::::set_metadata(origin.clone(), owner.clone(), Some(hash))); - - #[extrinsic_call] - set_metadata(origin as T::RuntimeOrigin, owner.clone(), None); - - assert_last_event::(crate::Event::MetadataCleared { owner, hash }.into()); - Ok(()) + }: set_metadata(origin, owner.clone(), None) + verify { + assert_last_event::(crate::Event::MetadataCleared { + owner, + hash, + }.into()); } - #[benchmark] - fn set_proposal_metadata() -> Result<(), BenchmarkError> { + set_proposal_metadata { // Place our proposal at the end to make sure it's worst case. - for i in 0..T::MaxProposals::get() { + for i in 0 .. T::MaxProposals::get() { add_proposal::(i)?; } let owner = MetadataOwner::Proposal(0); let proposer = funded_account::("proposer", 0); let hash = note_preimage::(); - - #[extrinsic_call] - set_metadata(RawOrigin::Signed(proposer), owner.clone(), Some(hash)); - - assert_last_event::(crate::Event::MetadataSet { owner, hash }.into()); - Ok(()) + }: set_metadata(RawOrigin::Signed(proposer).into(), owner.clone(), Some(hash)) + verify { + assert_last_event::(crate::Event::MetadataSet { + owner, + hash, + }.into()); } - #[benchmark] - fn clear_proposal_metadata() -> Result<(), BenchmarkError> { + clear_proposal_metadata { // Place our proposal at the end to make sure it's worst case. - for i in 0..T::MaxProposals::get() { + for i in 0 .. T::MaxProposals::get() { add_proposal::(i)?; } let proposer = funded_account::("proposer", 0); @@ -925,36 +802,33 @@ mod benchmarks { assert_ok!(Democracy::::set_metadata( RawOrigin::Signed(proposer.clone()).into(), owner.clone(), - Some(hash) - )); - - #[extrinsic_call] - set_metadata::(RawOrigin::Signed(proposer), owner.clone(), None); - - assert_last_event::(crate::Event::MetadataCleared { owner, hash }.into()); - Ok(()) + Some(hash))); + }: set_metadata(RawOrigin::Signed(proposer).into(), owner.clone(), None) + verify { + assert_last_event::(crate::Event::MetadataCleared { + owner, + hash, + }.into()); } - #[benchmark] - fn set_referendum_metadata() -> Result<(), BenchmarkError> { + set_referendum_metadata { // create not ongoing referendum. ReferendumInfoOf::::insert( 0, ReferendumInfo::Finished { end: BlockNumberFor::::zero(), approved: true }, ); let owner = MetadataOwner::Referendum(0); - let _caller = funded_account::("caller", 0); + let caller = funded_account::("caller", 0); let hash = note_preimage::(); - - #[extrinsic_call] - set_metadata::(RawOrigin::Root, owner.clone(), Some(hash)); - - assert_last_event::(crate::Event::MetadataSet { owner, hash }.into()); - Ok(()) + }: set_metadata(RawOrigin::Root.into(), owner.clone(), Some(hash)) + verify { + assert_last_event::(crate::Event::MetadataSet { + owner, + hash, + }.into()); } - #[benchmark] - fn clear_referendum_metadata() -> Result<(), BenchmarkError> { + clear_referendum_metadata { // create not ongoing referendum. ReferendumInfoOf::::insert( 0, @@ -964,13 +838,17 @@ mod benchmarks { let hash = note_preimage::(); MetadataOf::::insert(owner.clone(), hash); let caller = funded_account::("caller", 0); - - #[extrinsic_call] - set_metadata::(RawOrigin::Signed(caller), owner.clone(), None); - - assert_last_event::(crate::Event::MetadataCleared { owner, hash }.into()); - Ok(()) + }: set_metadata(RawOrigin::Signed(caller).into(), owner.clone(), None) + verify { + assert_last_event::(crate::Event::MetadataCleared { + owner, + hash, + }.into()); } - impl_benchmark_test_suite!(Democracy, crate::tests::new_test_ext(), crate::tests::Test); + impl_benchmark_test_suite!( + Democracy, + crate::tests::new_test_ext(), + crate::tests::Test + ); } diff --git a/substrate/frame/democracy/src/weights.rs b/substrate/frame/democracy/src/weights.rs index 765ee57f0eb3..6eb82c631a2a 100644 --- a/substrate/frame/democracy/src/weights.rs +++ b/substrate/frame/democracy/src/weights.rs @@ -18,25 +18,27 @@ //! Autogenerated weights for `pallet_democracy` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-11-19, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-04-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// target/production/substrate-node +// ./target/production/substrate-node // benchmark // pallet +// --chain=dev // --steps=50 // --repeat=20 +// --pallet=pallet_democracy +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --extrinsic=* // --wasm-execution=compiled // --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_democracy -// --chain=dev -// --header=./substrate/HEADER-APACHE2 // --output=./substrate/frame/democracy/src/weights.rs +// --header=./substrate/HEADER-APACHE2 // --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -94,8 +96,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `4834` // Estimated: `18187` - // Minimum execution time: 49_681_000 picoseconds. - Weight::from_parts(51_578_000, 18187) + // Minimum execution time: 42_266_000 picoseconds. + Weight::from_parts(43_382_000, 18187) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -105,8 +107,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `3589` // Estimated: `6695` - // Minimum execution time: 45_001_000 picoseconds. - Weight::from_parts(45_990_000, 6695) + // Minimum execution time: 37_765_000 picoseconds. + Weight::from_parts(38_679_000, 6695) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -122,8 +124,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `3503` // Estimated: `7260` - // Minimum execution time: 65_095_000 picoseconds. - Weight::from_parts(67_484_000, 7260) + // Minimum execution time: 56_200_000 picoseconds. + Weight::from_parts(57_320_000, 7260) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -139,8 +141,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `3525` // Estimated: `7260` - // Minimum execution time: 66_877_000 picoseconds. - Weight::from_parts(68_910_000, 7260) + // Minimum execution time: 58_633_000 picoseconds. + Weight::from_parts(60_809_000, 7260) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -154,8 +156,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `399` // Estimated: `3666` - // Minimum execution time: 29_312_000 picoseconds. - Weight::from_parts(30_040_000, 3666) + // Minimum execution time: 23_908_000 picoseconds. + Weight::from_parts(24_659_000, 3666) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -177,8 +179,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `5943` // Estimated: `18187` - // Minimum execution time: 107_932_000 picoseconds. - Weight::from_parts(108_940_000, 18187) + // Minimum execution time: 100_268_000 picoseconds. + Weight::from_parts(101_309_000, 18187) .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } @@ -190,8 +192,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `3449` // Estimated: `6703` - // Minimum execution time: 17_703_000 picoseconds. - Weight::from_parts(18_188_000, 6703) + // Minimum execution time: 12_143_000 picoseconds. + Weight::from_parts(12_843_000, 6703) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -201,8 +203,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_672_000 picoseconds. - Weight::from_parts(2_814_000, 0) + // Minimum execution time: 2_792_000 picoseconds. + Weight::from_parts(2_922_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Democracy::NextExternal` (r:0 w:1) @@ -211,8 +213,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_584_000 picoseconds. - Weight::from_parts(2_846_000, 0) + // Minimum execution time: 2_792_000 picoseconds. + Weight::from_parts(2_953_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Democracy::NextExternal` (r:1 w:1) @@ -227,8 +229,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `319` // Estimated: `3518` - // Minimum execution time: 24_603_000 picoseconds. - Weight::from_parts(25_407_000, 3518) + // Minimum execution time: 23_948_000 picoseconds. + Weight::from_parts(24_773_000, 3518) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -242,8 +244,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `3552` // Estimated: `6703` - // Minimum execution time: 31_721_000 picoseconds. - Weight::from_parts(32_785_000, 6703) + // Minimum execution time: 27_233_000 picoseconds. + Weight::from_parts(28_327_000, 6703) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -259,8 +261,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `5854` // Estimated: `18187` - // Minimum execution time: 86_981_000 picoseconds. - Weight::from_parts(89_140_000, 18187) + // Minimum execution time: 82_141_000 picoseconds. + Weight::from_parts(83_511_000, 18187) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -272,8 +274,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `304` // Estimated: `3518` - // Minimum execution time: 17_465_000 picoseconds. - Weight::from_parts(18_018_000, 3518) + // Minimum execution time: 16_650_000 picoseconds. + Weight::from_parts(17_140_000, 3518) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -288,10 +290,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `277 + r * (86 ±0)` // Estimated: `1489 + r * (2676 ±0)` - // Minimum execution time: 6_746_000 picoseconds. - Weight::from_parts(7_381_932, 1489) - // Standard Error: 10_311 - .saturating_add(Weight::from_parts(4_107_935, 0).saturating_mul(r.into())) + // Minimum execution time: 5_308_000 picoseconds. + Weight::from_parts(6_320_667, 1489) + // Standard Error: 6_714 + .saturating_add(Weight::from_parts(3_307_440, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) @@ -314,10 +316,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `277 + r * (86 ±0)` // Estimated: `18187 + r * (2676 ±0)` - // Minimum execution time: 9_766_000 picoseconds. - Weight::from_parts(9_788_895, 18187) - // Standard Error: 11_913 - .saturating_add(Weight::from_parts(4_130_441, 0).saturating_mul(r.into())) + // Minimum execution time: 8_287_000 picoseconds. + Weight::from_parts(7_834_729, 18187) + // Standard Error: 7_499 + .saturating_add(Weight::from_parts(3_333_021, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) @@ -336,10 +338,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `863 + r * (108 ±0)` // Estimated: `19800 + r * (2676 ±0)` - // Minimum execution time: 48_992_000 picoseconds. - Weight::from_parts(55_524_560, 19800) - // Standard Error: 11_278 - .saturating_add(Weight::from_parts(4_987_109, 0).saturating_mul(r.into())) + // Minimum execution time: 40_681_000 picoseconds. + Weight::from_parts(46_603_677, 19800) + // Standard Error: 7_453 + .saturating_add(Weight::from_parts(4_269_926, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(4_u64)) @@ -355,10 +357,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `526 + r * (108 ±0)` // Estimated: `13530 + r * (2676 ±0)` - // Minimum execution time: 23_828_000 picoseconds. - Weight::from_parts(23_638_577, 13530) - // Standard Error: 10_946 - .saturating_add(Weight::from_parts(4_971_245, 0).saturating_mul(r.into())) + // Minimum execution time: 18_176_000 picoseconds. + Weight::from_parts(19_473_041, 13530) + // Standard Error: 6_046 + .saturating_add(Weight::from_parts(4_259_914, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(2_u64)) @@ -371,8 +373,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_759_000 picoseconds. - Weight::from_parts(2_850_000, 0) + // Minimum execution time: 2_828_000 picoseconds. + Weight::from_parts(2_979_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Democracy::VotingOf` (r:1 w:1) @@ -388,10 +390,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `596` // Estimated: `7260` - // Minimum execution time: 30_804_000 picoseconds. - Weight::from_parts(42_750_018, 7260) - // Standard Error: 3_300 - .saturating_add(Weight::from_parts(99_997, 0).saturating_mul(r.into())) + // Minimum execution time: 24_256_000 picoseconds. + Weight::from_parts(35_489_844, 7260) + // Standard Error: 2_809 + .saturating_add(Weight::from_parts(82_542, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -408,10 +410,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `597 + r * (22 ±0)` // Estimated: `7260` - // Minimum execution time: 39_946_000 picoseconds. - Weight::from_parts(44_500_306, 7260) - // Standard Error: 1_914 - .saturating_add(Weight::from_parts(116_987, 0).saturating_mul(r.into())) + // Minimum execution time: 32_306_000 picoseconds. + Weight::from_parts(35_288_926, 7260) + // Standard Error: 1_742 + .saturating_add(Weight::from_parts(118_566, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -424,10 +426,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `761 + r * (26 ±0)` // Estimated: `7260` - // Minimum execution time: 21_677_000 picoseconds. - Weight::from_parts(25_329_290, 7260) - // Standard Error: 1_998 - .saturating_add(Weight::from_parts(157_800, 0).saturating_mul(r.into())) + // Minimum execution time: 15_269_000 picoseconds. + Weight::from_parts(18_595_547, 7260) + // Standard Error: 1_952 + .saturating_add(Weight::from_parts(122_967, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -440,10 +442,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `761 + r * (26 ±0)` // Estimated: `7260` - // Minimum execution time: 21_777_000 picoseconds. - Weight::from_parts(26_635_600, 7260) - // Standard Error: 2_697 - .saturating_add(Weight::from_parts(135_641, 0).saturating_mul(r.into())) + // Minimum execution time: 15_213_000 picoseconds. + Weight::from_parts(18_870_570, 7260) + // Standard Error: 1_802 + .saturating_add(Weight::from_parts(124_205, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -457,10 +459,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Democracy::MetadataOf` (`max_values`: None, `max_size`: Some(53), added: 2528, mode: `MaxEncodedLen`) fn set_external_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `351` + // Measured: `456` // Estimated: `3556` - // Minimum execution time: 19_914_000 picoseconds. - Weight::from_parts(20_450_000, 3556) + // Minimum execution time: 17_827_000 picoseconds. + Weight::from_parts(18_255_000, 3556) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -472,8 +474,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `319` // Estimated: `3518` - // Minimum execution time: 16_212_000 picoseconds. - Weight::from_parts(16_745_000, 3518) + // Minimum execution time: 14_205_000 picoseconds. + Weight::from_parts(14_631_000, 3518) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -487,10 +489,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Democracy::MetadataOf` (`max_values`: None, `max_size`: Some(53), added: 2528, mode: `MaxEncodedLen`) fn set_proposal_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `4883` + // Measured: `4988` // Estimated: `18187` - // Minimum execution time: 47_225_000 picoseconds. - Weight::from_parts(47_976_000, 18187) + // Minimum execution time: 40_868_000 picoseconds. + Weight::from_parts(41_688_000, 18187) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -502,8 +504,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `4855` // Estimated: `18187` - // Minimum execution time: 43_140_000 picoseconds. - Weight::from_parts(43_924_000, 18187) + // Minimum execution time: 36_573_000 picoseconds. + Weight::from_parts(37_017_000, 18187) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -515,10 +517,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Democracy::MetadataOf` (`max_values`: None, `max_size`: Some(53), added: 2528, mode: `MaxEncodedLen`) fn set_referendum_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `106` + // Measured: `211` // Estimated: `3556` - // Minimum execution time: 14_614_000 picoseconds. - Weight::from_parts(15_376_000, 3556) + // Minimum execution time: 13_741_000 picoseconds. + Weight::from_parts(14_337_000, 3556) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -530,8 +532,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `335` // Estimated: `3666` - // Minimum execution time: 22_588_000 picoseconds. - Weight::from_parts(23_267_000, 3666) + // Minimum execution time: 16_358_000 picoseconds. + Weight::from_parts(17_157_000, 3666) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -551,8 +553,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `4834` // Estimated: `18187` - // Minimum execution time: 49_681_000 picoseconds. - Weight::from_parts(51_578_000, 18187) + // Minimum execution time: 42_266_000 picoseconds. + Weight::from_parts(43_382_000, 18187) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -562,8 +564,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `3589` // Estimated: `6695` - // Minimum execution time: 45_001_000 picoseconds. - Weight::from_parts(45_990_000, 6695) + // Minimum execution time: 37_765_000 picoseconds. + Weight::from_parts(38_679_000, 6695) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -579,8 +581,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `3503` // Estimated: `7260` - // Minimum execution time: 65_095_000 picoseconds. - Weight::from_parts(67_484_000, 7260) + // Minimum execution time: 56_200_000 picoseconds. + Weight::from_parts(57_320_000, 7260) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -596,8 +598,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `3525` // Estimated: `7260` - // Minimum execution time: 66_877_000 picoseconds. - Weight::from_parts(68_910_000, 7260) + // Minimum execution time: 58_633_000 picoseconds. + Weight::from_parts(60_809_000, 7260) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -611,8 +613,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `399` // Estimated: `3666` - // Minimum execution time: 29_312_000 picoseconds. - Weight::from_parts(30_040_000, 3666) + // Minimum execution time: 23_908_000 picoseconds. + Weight::from_parts(24_659_000, 3666) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -634,8 +636,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `5943` // Estimated: `18187` - // Minimum execution time: 107_932_000 picoseconds. - Weight::from_parts(108_940_000, 18187) + // Minimum execution time: 100_268_000 picoseconds. + Weight::from_parts(101_309_000, 18187) .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } @@ -647,8 +649,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `3449` // Estimated: `6703` - // Minimum execution time: 17_703_000 picoseconds. - Weight::from_parts(18_188_000, 6703) + // Minimum execution time: 12_143_000 picoseconds. + Weight::from_parts(12_843_000, 6703) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -658,8 +660,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_672_000 picoseconds. - Weight::from_parts(2_814_000, 0) + // Minimum execution time: 2_792_000 picoseconds. + Weight::from_parts(2_922_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Democracy::NextExternal` (r:0 w:1) @@ -668,8 +670,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_584_000 picoseconds. - Weight::from_parts(2_846_000, 0) + // Minimum execution time: 2_792_000 picoseconds. + Weight::from_parts(2_953_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Democracy::NextExternal` (r:1 w:1) @@ -684,8 +686,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `319` // Estimated: `3518` - // Minimum execution time: 24_603_000 picoseconds. - Weight::from_parts(25_407_000, 3518) + // Minimum execution time: 23_948_000 picoseconds. + Weight::from_parts(24_773_000, 3518) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -699,8 +701,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `3552` // Estimated: `6703` - // Minimum execution time: 31_721_000 picoseconds. - Weight::from_parts(32_785_000, 6703) + // Minimum execution time: 27_233_000 picoseconds. + Weight::from_parts(28_327_000, 6703) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -716,8 +718,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `5854` // Estimated: `18187` - // Minimum execution time: 86_981_000 picoseconds. - Weight::from_parts(89_140_000, 18187) + // Minimum execution time: 82_141_000 picoseconds. + Weight::from_parts(83_511_000, 18187) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -729,8 +731,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `304` // Estimated: `3518` - // Minimum execution time: 17_465_000 picoseconds. - Weight::from_parts(18_018_000, 3518) + // Minimum execution time: 16_650_000 picoseconds. + Weight::from_parts(17_140_000, 3518) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -745,10 +747,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `277 + r * (86 ±0)` // Estimated: `1489 + r * (2676 ±0)` - // Minimum execution time: 6_746_000 picoseconds. - Weight::from_parts(7_381_932, 1489) - // Standard Error: 10_311 - .saturating_add(Weight::from_parts(4_107_935, 0).saturating_mul(r.into())) + // Minimum execution time: 5_308_000 picoseconds. + Weight::from_parts(6_320_667, 1489) + // Standard Error: 6_714 + .saturating_add(Weight::from_parts(3_307_440, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(1_u64)) @@ -771,10 +773,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `277 + r * (86 ±0)` // Estimated: `18187 + r * (2676 ±0)` - // Minimum execution time: 9_766_000 picoseconds. - Weight::from_parts(9_788_895, 18187) - // Standard Error: 11_913 - .saturating_add(Weight::from_parts(4_130_441, 0).saturating_mul(r.into())) + // Minimum execution time: 8_287_000 picoseconds. + Weight::from_parts(7_834_729, 18187) + // Standard Error: 7_499 + .saturating_add(Weight::from_parts(3_333_021, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(1_u64)) @@ -793,10 +795,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `863 + r * (108 ±0)` // Estimated: `19800 + r * (2676 ±0)` - // Minimum execution time: 48_992_000 picoseconds. - Weight::from_parts(55_524_560, 19800) - // Standard Error: 11_278 - .saturating_add(Weight::from_parts(4_987_109, 0).saturating_mul(r.into())) + // Minimum execution time: 40_681_000 picoseconds. + Weight::from_parts(46_603_677, 19800) + // Standard Error: 7_453 + .saturating_add(Weight::from_parts(4_269_926, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(4_u64)) @@ -812,10 +814,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `526 + r * (108 ±0)` // Estimated: `13530 + r * (2676 ±0)` - // Minimum execution time: 23_828_000 picoseconds. - Weight::from_parts(23_638_577, 13530) - // Standard Error: 10_946 - .saturating_add(Weight::from_parts(4_971_245, 0).saturating_mul(r.into())) + // Minimum execution time: 18_176_000 picoseconds. + Weight::from_parts(19_473_041, 13530) + // Standard Error: 6_046 + .saturating_add(Weight::from_parts(4_259_914, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(2_u64)) @@ -828,8 +830,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_759_000 picoseconds. - Weight::from_parts(2_850_000, 0) + // Minimum execution time: 2_828_000 picoseconds. + Weight::from_parts(2_979_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Democracy::VotingOf` (r:1 w:1) @@ -845,10 +847,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `596` // Estimated: `7260` - // Minimum execution time: 30_804_000 picoseconds. - Weight::from_parts(42_750_018, 7260) - // Standard Error: 3_300 - .saturating_add(Weight::from_parts(99_997, 0).saturating_mul(r.into())) + // Minimum execution time: 24_256_000 picoseconds. + Weight::from_parts(35_489_844, 7260) + // Standard Error: 2_809 + .saturating_add(Weight::from_parts(82_542, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -865,10 +867,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `597 + r * (22 ±0)` // Estimated: `7260` - // Minimum execution time: 39_946_000 picoseconds. - Weight::from_parts(44_500_306, 7260) - // Standard Error: 1_914 - .saturating_add(Weight::from_parts(116_987, 0).saturating_mul(r.into())) + // Minimum execution time: 32_306_000 picoseconds. + Weight::from_parts(35_288_926, 7260) + // Standard Error: 1_742 + .saturating_add(Weight::from_parts(118_566, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -881,10 +883,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `761 + r * (26 ±0)` // Estimated: `7260` - // Minimum execution time: 21_677_000 picoseconds. - Weight::from_parts(25_329_290, 7260) - // Standard Error: 1_998 - .saturating_add(Weight::from_parts(157_800, 0).saturating_mul(r.into())) + // Minimum execution time: 15_269_000 picoseconds. + Weight::from_parts(18_595_547, 7260) + // Standard Error: 1_952 + .saturating_add(Weight::from_parts(122_967, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -897,10 +899,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `761 + r * (26 ±0)` // Estimated: `7260` - // Minimum execution time: 21_777_000 picoseconds. - Weight::from_parts(26_635_600, 7260) - // Standard Error: 2_697 - .saturating_add(Weight::from_parts(135_641, 0).saturating_mul(r.into())) + // Minimum execution time: 15_213_000 picoseconds. + Weight::from_parts(18_870_570, 7260) + // Standard Error: 1_802 + .saturating_add(Weight::from_parts(124_205, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -914,10 +916,10 @@ impl WeightInfo for () { /// Proof: `Democracy::MetadataOf` (`max_values`: None, `max_size`: Some(53), added: 2528, mode: `MaxEncodedLen`) fn set_external_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `351` + // Measured: `456` // Estimated: `3556` - // Minimum execution time: 19_914_000 picoseconds. - Weight::from_parts(20_450_000, 3556) + // Minimum execution time: 17_827_000 picoseconds. + Weight::from_parts(18_255_000, 3556) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -929,8 +931,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `319` // Estimated: `3518` - // Minimum execution time: 16_212_000 picoseconds. - Weight::from_parts(16_745_000, 3518) + // Minimum execution time: 14_205_000 picoseconds. + Weight::from_parts(14_631_000, 3518) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -944,10 +946,10 @@ impl WeightInfo for () { /// Proof: `Democracy::MetadataOf` (`max_values`: None, `max_size`: Some(53), added: 2528, mode: `MaxEncodedLen`) fn set_proposal_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `4883` + // Measured: `4988` // Estimated: `18187` - // Minimum execution time: 47_225_000 picoseconds. - Weight::from_parts(47_976_000, 18187) + // Minimum execution time: 40_868_000 picoseconds. + Weight::from_parts(41_688_000, 18187) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -959,8 +961,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `4855` // Estimated: `18187` - // Minimum execution time: 43_140_000 picoseconds. - Weight::from_parts(43_924_000, 18187) + // Minimum execution time: 36_573_000 picoseconds. + Weight::from_parts(37_017_000, 18187) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -972,10 +974,10 @@ impl WeightInfo for () { /// Proof: `Democracy::MetadataOf` (`max_values`: None, `max_size`: Some(53), added: 2528, mode: `MaxEncodedLen`) fn set_referendum_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `106` + // Measured: `211` // Estimated: `3556` - // Minimum execution time: 14_614_000 picoseconds. - Weight::from_parts(15_376_000, 3556) + // Minimum execution time: 13_741_000 picoseconds. + Weight::from_parts(14_337_000, 3556) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -987,8 +989,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `335` // Estimated: `3666` - // Minimum execution time: 22_588_000 picoseconds. - Weight::from_parts(23_267_000, 3666) + // Minimum execution time: 16_358_000 picoseconds. + Weight::from_parts(17_157_000, 3666) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/substrate/frame/election-provider-multi-phase/Cargo.toml b/substrate/frame/election-provider-multi-phase/Cargo.toml index 9a4a2a839346..ff2a997fafe0 100644 --- a/substrate/frame/election-provider-multi-phase/Cargo.toml +++ b/substrate/frame/election-provider-multi-phase/Cargo.toml @@ -18,20 +18,20 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { features = [ "derive", ], workspace = true } -log = { workspace = true } scale-info = { features = [ "derive", ], workspace = true } +log = { workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } -frame-election-provider-support = { workspace = true } -sp-arithmetic = { workspace = true } -sp-core = { workspace = true } sp-io = { workspace = true } -sp-npos-elections = { workspace = true } +sp-core = { workspace = true } sp-runtime = { workspace = true } +sp-npos-elections = { workspace = true } +sp-arithmetic = { workspace = true } +frame-election-provider-support = { workspace = true } # Optional imports for benchmarking frame-benchmarking = { optional = true, workspace = true } @@ -40,14 +40,14 @@ rand = { features = ["alloc", "small_rng"], optional = true, workspace = true } strum = { features = ["derive"], optional = true, workspace = true } [dev-dependencies] -frame-benchmarking = { workspace = true, default-features = true } -pallet-balances = { workspace = true, default-features = true } parking_lot = { workspace = true, default-features = true } rand = { workspace = true, default-features = true } sp-core = { workspace = true } sp-io = { workspace = true, default-features = true } sp-npos-elections = { workspace = true } sp-tracing = { workspace = true, default-features = true } +pallet-balances = { workspace = true, default-features = true } +frame-benchmarking = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/frame/election-provider-multi-phase/src/weights.rs b/substrate/frame/election-provider-multi-phase/src/weights.rs index 2569e46e351e..1398ed047784 100644 --- a/substrate/frame/election-provider-multi-phase/src/weights.rs +++ b/substrate/frame/election-provider-multi-phase/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_election_provider_multi_phase` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-04-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -84,10 +84,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `ElectionProviderMultiPhase::CurrentPhase` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn on_initialize_nothing() -> Weight { // Proof Size summary in bytes: - // Measured: `1094` + // Measured: `1061` // Estimated: `3481` - // Minimum execution time: 27_022_000 picoseconds. - Weight::from_parts(27_654_000, 3481) + // Minimum execution time: 19_436_000 picoseconds. + Weight::from_parts(20_138_000, 3481) .saturating_add(T::DbWeight::get().reads(8_u64)) } /// Storage: `ElectionProviderMultiPhase::Round` (r:1 w:0) @@ -98,8 +98,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `148` // Estimated: `1633` - // Minimum execution time: 9_613_000 picoseconds. - Weight::from_parts(9_845_000, 1633) + // Minimum execution time: 8_356_000 picoseconds. + Weight::from_parts(8_708_000, 1633) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -111,8 +111,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `148` // Estimated: `1633` - // Minimum execution time: 10_404_000 picoseconds. - Weight::from_parts(10_847_000, 1633) + // Minimum execution time: 9_088_000 picoseconds. + Weight::from_parts(9_382_000, 1633) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -124,8 +124,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `174` // Estimated: `3593` - // Minimum execution time: 26_673_000 picoseconds. - Weight::from_parts(27_349_000, 3593) + // Minimum execution time: 25_899_000 picoseconds. + Weight::from_parts(26_456_000, 3593) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -135,8 +135,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `174` // Estimated: `3593` - // Minimum execution time: 19_544_000 picoseconds. - Weight::from_parts(19_818_000, 3593) + // Minimum execution time: 17_671_000 picoseconds. + Weight::from_parts(18_131_000, 3593) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -152,10 +152,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 485_154_000 picoseconds. - Weight::from_parts(498_991_000, 0) - // Standard Error: 3_249 - .saturating_add(Weight::from_parts(337_425, 0).saturating_mul(v.into())) + // Minimum execution time: 251_900_000 picoseconds. + Weight::from_parts(257_174_000, 0) + // Standard Error: 1_606 + .saturating_add(Weight::from_parts(250_961, 0).saturating_mul(v.into())) .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: `ElectionProviderMultiPhase::SignedSubmissionIndices` (r:1 w:1) @@ -182,10 +182,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `371 + a * (768 ±0) + d * (48 ±0)` // Estimated: `3923 + a * (768 ±0) + d * (49 ±0)` - // Minimum execution time: 352_979_000 picoseconds. - Weight::from_parts(383_783_000, 3923) - // Standard Error: 6_259 - .saturating_add(Weight::from_parts(426_032, 0).saturating_mul(a.into())) + // Minimum execution time: 331_717_000 picoseconds. + Weight::from_parts(29_922_189, 3923) + // Standard Error: 9_972 + .saturating_add(Weight::from_parts(570_967, 0).saturating_mul(a.into())) + // Standard Error: 14_948 + .saturating_add(Weight::from_parts(159_043, 0).saturating_mul(d.into())) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(8_u64)) .saturating_add(Weight::from_parts(0, 768).saturating_mul(a.into())) @@ -201,15 +203,17 @@ impl WeightInfo for SubstrateWeight { /// Proof: `ElectionProviderMultiPhase::SignedSubmissionIndices` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `ElectionProviderMultiPhase::SignedSubmissionNextIndex` (r:1 w:1) /// Proof: `ElectionProviderMultiPhase::SignedSubmissionNextIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `ElectionProviderMultiPhase::SignedSubmissionsMap` (r:0 w:1) /// Proof: `ElectionProviderMultiPhase::SignedSubmissionsMap` (`max_values`: None, `max_size`: None, mode: `Measured`) fn submit() -> Weight { // Proof Size summary in bytes: - // Measured: `860` - // Estimated: `2345` - // Minimum execution time: 50_191_000 picoseconds. - Weight::from_parts(51_531_000, 2345) - .saturating_add(T::DbWeight::get().reads(5_u64)) + // Measured: `927` + // Estimated: `2412` + // Minimum execution time: 44_129_000 picoseconds. + Weight::from_parts(46_420_000, 2412) + .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: `ElectionProviderMultiPhase::CurrentPhase` (r:1 w:0) @@ -234,12 +238,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `253 + t * (32 ±0) + v * (553 ±0)` // Estimated: `1738 + t * (32 ±0) + v * (553 ±0)` - // Minimum execution time: 5_946_406_000 picoseconds. - Weight::from_parts(6_087_882_000, 1738) - // Standard Error: 20_145 - .saturating_add(Weight::from_parts(348_338, 0).saturating_mul(v.into())) - // Standard Error: 59_699 - .saturating_add(Weight::from_parts(4_596_494, 0).saturating_mul(a.into())) + // Minimum execution time: 5_585_830_000 picoseconds. + Weight::from_parts(5_662_741_000, 1738) + // Standard Error: 17_454 + .saturating_add(Weight::from_parts(352_514, 0).saturating_mul(v.into())) + // Standard Error: 51_723 + .saturating_add(Weight::from_parts(4_182_087, 0).saturating_mul(a.into())) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 32).saturating_mul(t.into())) @@ -261,12 +265,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `228 + t * (32 ±0) + v * (553 ±0)` // Estimated: `1713 + t * (32 ±0) + v * (553 ±0)` - // Minimum execution time: 5_004_146_000 picoseconds. - Weight::from_parts(5_166_030_000, 1713) + // Minimum execution time: 4_902_422_000 picoseconds. + Weight::from_parts(5_001_852_000, 1713) // Standard Error: 15_536 - .saturating_add(Weight::from_parts(306_715, 0).saturating_mul(v.into())) - // Standard Error: 46_039 - .saturating_add(Weight::from_parts(3_418_885, 0).saturating_mul(a.into())) + .saturating_add(Weight::from_parts(354_309, 0).saturating_mul(v.into())) + // Standard Error: 46_041 + .saturating_add(Weight::from_parts(3_090_094, 0).saturating_mul(a.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(Weight::from_parts(0, 32).saturating_mul(t.into())) .saturating_add(Weight::from_parts(0, 553).saturating_mul(v.into())) @@ -293,10 +297,10 @@ impl WeightInfo for () { /// Proof: `ElectionProviderMultiPhase::CurrentPhase` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn on_initialize_nothing() -> Weight { // Proof Size summary in bytes: - // Measured: `1094` + // Measured: `1061` // Estimated: `3481` - // Minimum execution time: 27_022_000 picoseconds. - Weight::from_parts(27_654_000, 3481) + // Minimum execution time: 19_436_000 picoseconds. + Weight::from_parts(20_138_000, 3481) .saturating_add(RocksDbWeight::get().reads(8_u64)) } /// Storage: `ElectionProviderMultiPhase::Round` (r:1 w:0) @@ -307,8 +311,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `148` // Estimated: `1633` - // Minimum execution time: 9_613_000 picoseconds. - Weight::from_parts(9_845_000, 1633) + // Minimum execution time: 8_356_000 picoseconds. + Weight::from_parts(8_708_000, 1633) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -320,8 +324,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `148` // Estimated: `1633` - // Minimum execution time: 10_404_000 picoseconds. - Weight::from_parts(10_847_000, 1633) + // Minimum execution time: 9_088_000 picoseconds. + Weight::from_parts(9_382_000, 1633) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -333,8 +337,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `174` // Estimated: `3593` - // Minimum execution time: 26_673_000 picoseconds. - Weight::from_parts(27_349_000, 3593) + // Minimum execution time: 25_899_000 picoseconds. + Weight::from_parts(26_456_000, 3593) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -344,8 +348,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `174` // Estimated: `3593` - // Minimum execution time: 19_544_000 picoseconds. - Weight::from_parts(19_818_000, 3593) + // Minimum execution time: 17_671_000 picoseconds. + Weight::from_parts(18_131_000, 3593) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -361,10 +365,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 485_154_000 picoseconds. - Weight::from_parts(498_991_000, 0) - // Standard Error: 3_249 - .saturating_add(Weight::from_parts(337_425, 0).saturating_mul(v.into())) + // Minimum execution time: 251_900_000 picoseconds. + Weight::from_parts(257_174_000, 0) + // Standard Error: 1_606 + .saturating_add(Weight::from_parts(250_961, 0).saturating_mul(v.into())) .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: `ElectionProviderMultiPhase::SignedSubmissionIndices` (r:1 w:1) @@ -391,10 +395,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `371 + a * (768 ±0) + d * (48 ±0)` // Estimated: `3923 + a * (768 ±0) + d * (49 ±0)` - // Minimum execution time: 352_979_000 picoseconds. - Weight::from_parts(383_783_000, 3923) - // Standard Error: 6_259 - .saturating_add(Weight::from_parts(426_032, 0).saturating_mul(a.into())) + // Minimum execution time: 331_717_000 picoseconds. + Weight::from_parts(29_922_189, 3923) + // Standard Error: 9_972 + .saturating_add(Weight::from_parts(570_967, 0).saturating_mul(a.into())) + // Standard Error: 14_948 + .saturating_add(Weight::from_parts(159_043, 0).saturating_mul(d.into())) .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().writes(8_u64)) .saturating_add(Weight::from_parts(0, 768).saturating_mul(a.into())) @@ -410,15 +416,17 @@ impl WeightInfo for () { /// Proof: `ElectionProviderMultiPhase::SignedSubmissionIndices` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `ElectionProviderMultiPhase::SignedSubmissionNextIndex` (r:1 w:1) /// Proof: `ElectionProviderMultiPhase::SignedSubmissionNextIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `ElectionProviderMultiPhase::SignedSubmissionsMap` (r:0 w:1) /// Proof: `ElectionProviderMultiPhase::SignedSubmissionsMap` (`max_values`: None, `max_size`: None, mode: `Measured`) fn submit() -> Weight { // Proof Size summary in bytes: - // Measured: `860` - // Estimated: `2345` - // Minimum execution time: 50_191_000 picoseconds. - Weight::from_parts(51_531_000, 2345) - .saturating_add(RocksDbWeight::get().reads(5_u64)) + // Measured: `927` + // Estimated: `2412` + // Minimum execution time: 44_129_000 picoseconds. + Weight::from_parts(46_420_000, 2412) + .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: `ElectionProviderMultiPhase::CurrentPhase` (r:1 w:0) @@ -443,12 +451,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `253 + t * (32 ±0) + v * (553 ±0)` // Estimated: `1738 + t * (32 ±0) + v * (553 ±0)` - // Minimum execution time: 5_946_406_000 picoseconds. - Weight::from_parts(6_087_882_000, 1738) - // Standard Error: 20_145 - .saturating_add(Weight::from_parts(348_338, 0).saturating_mul(v.into())) - // Standard Error: 59_699 - .saturating_add(Weight::from_parts(4_596_494, 0).saturating_mul(a.into())) + // Minimum execution time: 5_585_830_000 picoseconds. + Weight::from_parts(5_662_741_000, 1738) + // Standard Error: 17_454 + .saturating_add(Weight::from_parts(352_514, 0).saturating_mul(v.into())) + // Standard Error: 51_723 + .saturating_add(Weight::from_parts(4_182_087, 0).saturating_mul(a.into())) .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 32).saturating_mul(t.into())) @@ -470,12 +478,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `228 + t * (32 ±0) + v * (553 ±0)` // Estimated: `1713 + t * (32 ±0) + v * (553 ±0)` - // Minimum execution time: 5_004_146_000 picoseconds. - Weight::from_parts(5_166_030_000, 1713) + // Minimum execution time: 4_902_422_000 picoseconds. + Weight::from_parts(5_001_852_000, 1713) // Standard Error: 15_536 - .saturating_add(Weight::from_parts(306_715, 0).saturating_mul(v.into())) - // Standard Error: 46_039 - .saturating_add(Weight::from_parts(3_418_885, 0).saturating_mul(a.into())) + .saturating_add(Weight::from_parts(354_309, 0).saturating_mul(v.into())) + // Standard Error: 46_041 + .saturating_add(Weight::from_parts(3_090_094, 0).saturating_mul(a.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(Weight::from_parts(0, 32).saturating_mul(t.into())) .saturating_add(Weight::from_parts(0, 553).saturating_mul(v.into())) diff --git a/substrate/frame/election-provider-multi-phase/test-staking-e2e/Cargo.toml b/substrate/frame/election-provider-multi-phase/test-staking-e2e/Cargo.toml index 5009d3d54d56..771376e06656 100644 --- a/substrate/frame/election-provider-multi-phase/test-staking-e2e/Cargo.toml +++ b/substrate/frame/election-provider-multi-phase/test-staking-e2e/Cargo.toml @@ -16,30 +16,30 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dev-dependencies] -codec = { features = ["derive"], workspace = true, default-features = true } -log = { workspace = true } parking_lot = { workspace = true, default-features = true } +codec = { features = ["derive"], workspace = true, default-features = true } scale-info = { features = ["derive"], workspace = true, default-features = true } +log = { workspace = true } -sp-core = { workspace = true, default-features = true } -sp-io = { workspace = true, default-features = true } -sp-npos-elections = { workspace = true } sp-runtime = { workspace = true, default-features = true } -sp-staking = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } sp-std = { workspace = true, default-features = true } +sp-staking = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-npos-elections = { workspace = true } sp-tracing = { workspace = true, default-features = true } -frame-election-provider-support = { workspace = true, default-features = true } -frame-support = { workspace = true, default-features = true } frame-system = { workspace = true, default-features = true } +frame-support = { workspace = true, default-features = true } +frame-election-provider-support = { workspace = true, default-features = true } -pallet-bags-list = { workspace = true, default-features = true } -pallet-balances = { workspace = true, default-features = true } pallet-election-provider-multi-phase = { workspace = true, default-features = true } -pallet-nomination-pools = { workspace = true, default-features = true } -pallet-session = { workspace = true, default-features = true } pallet-staking = { workspace = true, default-features = true } +pallet-nomination-pools = { workspace = true, default-features = true } +pallet-bags-list = { workspace = true, default-features = true } +pallet-balances = { workspace = true, default-features = true } pallet-timestamp = { workspace = true, default-features = true } +pallet-session = { workspace = true, default-features = true } [features] try-runtime = [ diff --git a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/lib.rs b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/lib.rs index 26a6345e145f..135149694387 100644 --- a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/lib.rs +++ b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/lib.rs @@ -47,7 +47,7 @@ fn log_current_time() { "block: {:?}, session: {:?}, era: {:?}, EPM phase: {:?} ts: {:?}", System::block_number(), Session::current_index(), - pallet_staking::CurrentEra::::get(), + Staking::current_era(), CurrentPhase::::get(), Now::::get() ); @@ -147,35 +147,30 @@ fn mass_slash_doesnt_enter_emergency_phase() { let active_set_size_before_slash = Session::validators().len(); - // assuming half is above the disabling limit (default 1/3), otherwise test will break - let slashed = slash_half_the_active_set(); + // Slash more than 1/3 of the active validators + let mut slashed = slash_half_the_active_set(); let active_set_size_after_slash = Session::validators().len(); // active set should stay the same before and after the slash assert_eq!(active_set_size_before_slash, active_set_size_after_slash); + // Slashed validators are disabled up to a limit + slashed.truncate( + pallet_staking::UpToLimitDisablingStrategy::::disable_limit( + active_set_size_after_slash, + ), + ); + // Find the indices of the disabled validators let active_set = Session::validators(); - let potentially_disabled = slashed + let expected_disabled = slashed .into_iter() .map(|d| active_set.iter().position(|a| *a == d).unwrap() as u32) .collect::>(); - // Ensure that every actually disabled validator is also in the potentially disabled set - // (not necessarily the other way around) - let disabled = Session::disabled_validators(); - for d in disabled.iter() { - assert!(potentially_disabled.contains(d)); - } - - // Ensure no more than disabling limit of validators (default 1/3) is disabled - let disabling_limit = pallet_staking::UpToLimitWithReEnablingDisablingStrategy::< - SLASHING_DISABLING_FACTOR, - >::disable_limit(active_set_size_before_slash); - assert!(disabled.len() == disabling_limit); - assert_eq!(pallet_staking::ForceEra::::get(), pallet_staking::Forcing::NotForcing); + assert_eq!(Session::disabled_validators(), expected_disabled); }); } diff --git a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs index eaab848c1694..360f14913fcc 100644 --- a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs +++ b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs @@ -49,7 +49,7 @@ use pallet_election_provider_multi_phase::{ unsigned::MinerConfig, Call, CurrentPhase, ElectionCompute, GeometricDepositBase, QueuedSolution, SolutionAccuracyOf, }; -use pallet_staking::{ActiveEra, CurrentEra, ErasStartSessionIndex, StakerStatus}; +use pallet_staking::StakerStatus; use parking_lot::RwLock; use std::sync::Arc; @@ -304,8 +304,7 @@ impl pallet_staking::Config for Runtime { type MaxUnlockingChunks = MaxUnlockingChunks; type EventListeners = Pools; type WeightInfo = pallet_staking::weights::SubstrateWeight; - type DisablingStrategy = - pallet_staking::UpToLimitWithReEnablingDisablingStrategy; + type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; } @@ -807,11 +806,11 @@ pub(crate) fn start_active_era( } pub(crate) fn active_era() -> EraIndex { - ActiveEra::::get().unwrap().index + Staking::active_era().unwrap().index } pub(crate) fn current_era() -> EraIndex { - CurrentEra::::get().unwrap() + Staking::current_era().unwrap() } // Fast forward until EPM signed phase. @@ -863,11 +862,11 @@ pub(crate) fn on_offence_now( >], slash_fraction: &[Perbill], ) { - let now = ActiveEra::::get().unwrap().index; + let now = Staking::active_era().unwrap().index; let _ = Staking::on_offence( offenders, slash_fraction, - ErasStartSessionIndex::::get(now).unwrap(), + Staking::eras_start_session_index(now).unwrap(), ); } diff --git a/substrate/frame/election-provider-support/Cargo.toml b/substrate/frame/election-provider-support/Cargo.toml index 32fa381e1d27..cae20d1b46a4 100644 --- a/substrate/frame/election-provider-support/Cargo.toml +++ b/substrate/frame/election-provider-support/Cargo.toml @@ -16,14 +16,14 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } frame-election-provider-solution-type = { workspace = true, default-features = true } frame-support = { workspace = true } frame-system = { workspace = true } -scale-info = { features = ["derive"], workspace = true } sp-arithmetic = { workspace = true } -sp-core = { workspace = true } sp-npos-elections = { workspace = true } sp-runtime = { workspace = true } +sp-core = { workspace = true } [dev-dependencies] rand = { features = ["small_rng"], workspace = true, default-features = true } diff --git a/substrate/frame/election-provider-support/solution-type/Cargo.toml b/substrate/frame/election-provider-support/solution-type/Cargo.toml index c2f307016f6b..e24ed7f079fe 100644 --- a/substrate/frame/election-provider-support/solution-type/Cargo.toml +++ b/substrate/frame/election-provider-support/solution-type/Cargo.toml @@ -18,10 +18,10 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -proc-macro-crate = { workspace = true } -proc-macro2 = { workspace = true } -quote = { workspace = true } syn = { features = ["full", "visit"], workspace = true } +quote = { workspace = true } +proc-macro2 = { workspace = true } +proc-macro-crate = { workspace = true } [dev-dependencies] codec = { workspace = true, default-features = true } diff --git a/substrate/frame/election-provider-support/solution-type/fuzzer/Cargo.toml b/substrate/frame/election-provider-support/solution-type/fuzzer/Cargo.toml index d82a8acb2f84..86abbf9677e0 100644 --- a/substrate/frame/election-provider-support/solution-type/fuzzer/Cargo.toml +++ b/substrate/frame/election-provider-support/solution-type/fuzzer/Cargo.toml @@ -21,14 +21,14 @@ honggfuzz = { workspace = true } rand = { features = ["small_rng", "std"], workspace = true, default-features = true } codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } frame-election-provider-solution-type = { workspace = true, default-features = true } frame-election-provider-support = { workspace = true, default-features = true } -scale-info = { features = ["derive"], workspace = true } sp-arithmetic = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } # used by generate_solution_type: -frame-support = { workspace = true, default-features = true } sp-npos-elections = { workspace = true } +frame-support = { workspace = true, default-features = true } [[bin]] name = "compact" diff --git a/substrate/frame/elections-phragmen/Cargo.toml b/substrate/frame/elections-phragmen/Cargo.toml index b24ec7bd637e..c1b12b3da4d8 100644 --- a/substrate/frame/elections-phragmen/Cargo.toml +++ b/substrate/frame/elections-phragmen/Cargo.toml @@ -19,11 +19,11 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { features = [ "derive", ], workspace = true } +log = { workspace = true } +scale-info = { features = ["derive"], workspace = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } -log = { workspace = true } -scale-info = { features = ["derive"], workspace = true } sp-core = { workspace = true } sp-io = { workspace = true } sp-npos-elections = { workspace = true } diff --git a/substrate/frame/elections-phragmen/src/weights.rs b/substrate/frame/elections-phragmen/src/weights.rs index f71106a47978..fb2e10f9f066 100644 --- a/substrate/frame/elections-phragmen/src/weights.rs +++ b/substrate/frame/elections-phragmen/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_elections_phragmen` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-04-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -83,12 +83,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `v` is `[1, 16]`. fn vote_equal(v: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `436 + v * (80 ±0)` + // Measured: `403 + v * (80 ±0)` // Estimated: `4764 + v * (80 ±0)` - // Minimum execution time: 39_685_000 picoseconds. - Weight::from_parts(40_878_043, 4764) - // Standard Error: 3_272 - .saturating_add(Weight::from_parts(168_519, 0).saturating_mul(v.into())) + // Minimum execution time: 30_160_000 picoseconds. + Weight::from_parts(31_473_640, 4764) + // Standard Error: 3_581 + .saturating_add(Weight::from_parts(135_663, 0).saturating_mul(v.into())) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 80).saturating_mul(v.into())) @@ -108,12 +108,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `v` is `[2, 16]`. fn vote_more(v: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `404 + v * (80 ±0)` + // Measured: `371 + v * (80 ±0)` // Estimated: `4764 + v * (80 ±0)` - // Minimum execution time: 51_703_000 picoseconds. - Weight::from_parts(53_305_901, 4764) - // Standard Error: 5_269 - .saturating_add(Weight::from_parts(167_784, 0).saturating_mul(v.into())) + // Minimum execution time: 41_429_000 picoseconds. + Weight::from_parts(42_684_714, 4764) + // Standard Error: 4_828 + .saturating_add(Weight::from_parts(173_254, 0).saturating_mul(v.into())) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 80).saturating_mul(v.into())) @@ -133,12 +133,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `v` is `[2, 16]`. fn vote_less(v: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `436 + v * (80 ±0)` + // Measured: `403 + v * (80 ±0)` // Estimated: `4764 + v * (80 ±0)` - // Minimum execution time: 51_554_000 picoseconds. - Weight::from_parts(53_523_254, 4764) - // Standard Error: 5_642 - .saturating_add(Weight::from_parts(156_053, 0).saturating_mul(v.into())) + // Minimum execution time: 41_013_000 picoseconds. + Weight::from_parts(42_555_632, 4764) + // Standard Error: 4_627 + .saturating_add(Weight::from_parts(162_225, 0).saturating_mul(v.into())) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 80).saturating_mul(v.into())) @@ -151,10 +151,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) fn remove_voter() -> Weight { // Proof Size summary in bytes: - // Measured: `958` + // Measured: `925` // Estimated: `4764` - // Minimum execution time: 51_835_000 picoseconds. - Weight::from_parts(56_349_000, 4764) + // Minimum execution time: 43_431_000 picoseconds. + Weight::from_parts(44_500_000, 4764) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -167,12 +167,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `c` is `[1, 64]`. fn submit_candidacy(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1603 + c * (48 ±0)` - // Estimated: `3088 + c * (48 ±0)` - // Minimum execution time: 40_974_000 picoseconds. - Weight::from_parts(42_358_018, 3088) - // Standard Error: 1_472 - .saturating_add(Weight::from_parts(85_881, 0).saturating_mul(c.into())) + // Measured: `1570 + c * (48 ±0)` + // Estimated: `3055 + c * (48 ±0)` + // Minimum execution time: 34_520_000 picoseconds. + Weight::from_parts(35_911_881, 3055) + // Standard Error: 1_885 + .saturating_add(Weight::from_parts(123_837, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 48).saturating_mul(c.into())) @@ -182,12 +182,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `c` is `[1, 64]`. fn renounce_candidacy_candidate(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `318 + c * (48 ±0)` - // Estimated: `1803 + c * (48 ±0)` - // Minimum execution time: 33_286_000 picoseconds. - Weight::from_parts(34_809_065, 1803) - // Standard Error: 1_507 - .saturating_add(Weight::from_parts(67_115, 0).saturating_mul(c.into())) + // Measured: `285 + c * (48 ±0)` + // Estimated: `1770 + c * (48 ±0)` + // Minimum execution time: 28_020_000 picoseconds. + Weight::from_parts(29_227_248, 1770) + // Standard Error: 1_202 + .saturating_add(Weight::from_parts(83_328, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 48).saturating_mul(c.into())) @@ -204,10 +204,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Council::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn renounce_candidacy_members() -> Weight { // Proof Size summary in bytes: - // Measured: `1999` - // Estimated: `3484` - // Minimum execution time: 49_223_000 picoseconds. - Weight::from_parts(50_790_000, 3484) + // Measured: `1933` + // Estimated: `3418` + // Minimum execution time: 42_489_000 picoseconds. + Weight::from_parts(43_710_000, 3418) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -215,10 +215,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Elections::RunnersUp` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn renounce_candidacy_runners_up() -> Weight { // Proof Size summary in bytes: - // Measured: `913` - // Estimated: `2398` - // Minimum execution time: 36_995_000 picoseconds. - Weight::from_parts(37_552_000, 2398) + // Measured: `880` + // Estimated: `2365` + // Minimum execution time: 29_228_000 picoseconds. + Weight::from_parts(30_343_000, 2365) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -245,10 +245,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Council::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn remove_member_with_replacement() -> Weight { // Proof Size summary in bytes: - // Measured: `1999` + // Measured: `1933` // Estimated: `3593` - // Minimum execution time: 54_506_000 picoseconds. - Weight::from_parts(55_765_000, 3593) + // Minimum execution time: 46_909_000 picoseconds. + Weight::from_parts(47_907_000, 3593) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -271,13 +271,13 @@ impl WeightInfo for SubstrateWeight { fn clean_defunct_voters(v: u32, d: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0 + d * (818 ±0) + v * (57 ±0)` - // Estimated: `24939 + d * (3774 ±1) + v * (24 ±0)` - // Minimum execution time: 7_043_000 picoseconds. - Weight::from_parts(7_628_000, 24939) - // Standard Error: 17_891 - .saturating_add(Weight::from_parts(357_049, 0).saturating_mul(v.into())) - // Standard Error: 38_964 - .saturating_add(Weight::from_parts(61_698_254, 0).saturating_mul(d.into())) + // Estimated: `24906 + d * (3774 ±0) + v * (24 ±0)` + // Minimum execution time: 5_175_000 picoseconds. + Weight::from_parts(5_797_000, 24906) + // Standard Error: 10_951 + .saturating_add(Weight::from_parts(39_675, 0).saturating_mul(v.into())) + // Standard Error: 23_850 + .saturating_add(Weight::from_parts(53_959_224, 0).saturating_mul(d.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().reads((4_u64).saturating_mul(d.into()))) .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(d.into()))) @@ -308,13 +308,13 @@ impl WeightInfo for SubstrateWeight { fn election_phragmen(c: u32, v: u32, e: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0 + e * (28 ±0) + v * (606 ±0)` - // Estimated: `179052 + c * (2135 ±7) + e * (12 ±0) + v * (2653 ±6)` - // Minimum execution time: 1_343_974_000 picoseconds. - Weight::from_parts(1_352_233_000, 179052) - // Standard Error: 597_762 - .saturating_add(Weight::from_parts(20_404_086, 0).saturating_mul(v.into())) - // Standard Error: 38_353 - .saturating_add(Weight::from_parts(793_851, 0).saturating_mul(e.into())) + // Estimated: `178920 + c * (2135 ±7) + e * (12 ±0) + v * (2653 ±6)` + // Minimum execution time: 1_136_994_000 picoseconds. + Weight::from_parts(1_142_143_000, 178920) + // Standard Error: 595_387 + .saturating_add(Weight::from_parts(19_373_386, 0).saturating_mul(v.into())) + // Standard Error: 38_201 + .saturating_add(Weight::from_parts(797_696, 0).saturating_mul(e.into())) .saturating_add(T::DbWeight::get().reads(21_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(c.into()))) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(v.into()))) @@ -343,12 +343,12 @@ impl WeightInfo for () { /// The range of component `v` is `[1, 16]`. fn vote_equal(v: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `436 + v * (80 ±0)` + // Measured: `403 + v * (80 ±0)` // Estimated: `4764 + v * (80 ±0)` - // Minimum execution time: 39_685_000 picoseconds. - Weight::from_parts(40_878_043, 4764) - // Standard Error: 3_272 - .saturating_add(Weight::from_parts(168_519, 0).saturating_mul(v.into())) + // Minimum execution time: 30_160_000 picoseconds. + Weight::from_parts(31_473_640, 4764) + // Standard Error: 3_581 + .saturating_add(Weight::from_parts(135_663, 0).saturating_mul(v.into())) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 80).saturating_mul(v.into())) @@ -368,12 +368,12 @@ impl WeightInfo for () { /// The range of component `v` is `[2, 16]`. fn vote_more(v: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `404 + v * (80 ±0)` + // Measured: `371 + v * (80 ±0)` // Estimated: `4764 + v * (80 ±0)` - // Minimum execution time: 51_703_000 picoseconds. - Weight::from_parts(53_305_901, 4764) - // Standard Error: 5_269 - .saturating_add(Weight::from_parts(167_784, 0).saturating_mul(v.into())) + // Minimum execution time: 41_429_000 picoseconds. + Weight::from_parts(42_684_714, 4764) + // Standard Error: 4_828 + .saturating_add(Weight::from_parts(173_254, 0).saturating_mul(v.into())) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 80).saturating_mul(v.into())) @@ -393,12 +393,12 @@ impl WeightInfo for () { /// The range of component `v` is `[2, 16]`. fn vote_less(v: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `436 + v * (80 ±0)` + // Measured: `403 + v * (80 ±0)` // Estimated: `4764 + v * (80 ±0)` - // Minimum execution time: 51_554_000 picoseconds. - Weight::from_parts(53_523_254, 4764) - // Standard Error: 5_642 - .saturating_add(Weight::from_parts(156_053, 0).saturating_mul(v.into())) + // Minimum execution time: 41_013_000 picoseconds. + Weight::from_parts(42_555_632, 4764) + // Standard Error: 4_627 + .saturating_add(Weight::from_parts(162_225, 0).saturating_mul(v.into())) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 80).saturating_mul(v.into())) @@ -411,10 +411,10 @@ impl WeightInfo for () { /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) fn remove_voter() -> Weight { // Proof Size summary in bytes: - // Measured: `958` + // Measured: `925` // Estimated: `4764` - // Minimum execution time: 51_835_000 picoseconds. - Weight::from_parts(56_349_000, 4764) + // Minimum execution time: 43_431_000 picoseconds. + Weight::from_parts(44_500_000, 4764) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -427,12 +427,12 @@ impl WeightInfo for () { /// The range of component `c` is `[1, 64]`. fn submit_candidacy(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1603 + c * (48 ±0)` - // Estimated: `3088 + c * (48 ±0)` - // Minimum execution time: 40_974_000 picoseconds. - Weight::from_parts(42_358_018, 3088) - // Standard Error: 1_472 - .saturating_add(Weight::from_parts(85_881, 0).saturating_mul(c.into())) + // Measured: `1570 + c * (48 ±0)` + // Estimated: `3055 + c * (48 ±0)` + // Minimum execution time: 34_520_000 picoseconds. + Weight::from_parts(35_911_881, 3055) + // Standard Error: 1_885 + .saturating_add(Weight::from_parts(123_837, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 48).saturating_mul(c.into())) @@ -442,12 +442,12 @@ impl WeightInfo for () { /// The range of component `c` is `[1, 64]`. fn renounce_candidacy_candidate(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `318 + c * (48 ±0)` - // Estimated: `1803 + c * (48 ±0)` - // Minimum execution time: 33_286_000 picoseconds. - Weight::from_parts(34_809_065, 1803) - // Standard Error: 1_507 - .saturating_add(Weight::from_parts(67_115, 0).saturating_mul(c.into())) + // Measured: `285 + c * (48 ±0)` + // Estimated: `1770 + c * (48 ±0)` + // Minimum execution time: 28_020_000 picoseconds. + Weight::from_parts(29_227_248, 1770) + // Standard Error: 1_202 + .saturating_add(Weight::from_parts(83_328, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 48).saturating_mul(c.into())) @@ -464,10 +464,10 @@ impl WeightInfo for () { /// Proof: `Council::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn renounce_candidacy_members() -> Weight { // Proof Size summary in bytes: - // Measured: `1999` - // Estimated: `3484` - // Minimum execution time: 49_223_000 picoseconds. - Weight::from_parts(50_790_000, 3484) + // Measured: `1933` + // Estimated: `3418` + // Minimum execution time: 42_489_000 picoseconds. + Weight::from_parts(43_710_000, 3418) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -475,10 +475,10 @@ impl WeightInfo for () { /// Proof: `Elections::RunnersUp` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn renounce_candidacy_runners_up() -> Weight { // Proof Size summary in bytes: - // Measured: `913` - // Estimated: `2398` - // Minimum execution time: 36_995_000 picoseconds. - Weight::from_parts(37_552_000, 2398) + // Measured: `880` + // Estimated: `2365` + // Minimum execution time: 29_228_000 picoseconds. + Weight::from_parts(30_343_000, 2365) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -505,10 +505,10 @@ impl WeightInfo for () { /// Proof: `Council::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn remove_member_with_replacement() -> Weight { // Proof Size summary in bytes: - // Measured: `1999` + // Measured: `1933` // Estimated: `3593` - // Minimum execution time: 54_506_000 picoseconds. - Weight::from_parts(55_765_000, 3593) + // Minimum execution time: 46_909_000 picoseconds. + Weight::from_parts(47_907_000, 3593) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -531,13 +531,13 @@ impl WeightInfo for () { fn clean_defunct_voters(v: u32, d: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0 + d * (818 ±0) + v * (57 ±0)` - // Estimated: `24939 + d * (3774 ±1) + v * (24 ±0)` - // Minimum execution time: 7_043_000 picoseconds. - Weight::from_parts(7_628_000, 24939) - // Standard Error: 17_891 - .saturating_add(Weight::from_parts(357_049, 0).saturating_mul(v.into())) - // Standard Error: 38_964 - .saturating_add(Weight::from_parts(61_698_254, 0).saturating_mul(d.into())) + // Estimated: `24906 + d * (3774 ±0) + v * (24 ±0)` + // Minimum execution time: 5_175_000 picoseconds. + Weight::from_parts(5_797_000, 24906) + // Standard Error: 10_951 + .saturating_add(Weight::from_parts(39_675, 0).saturating_mul(v.into())) + // Standard Error: 23_850 + .saturating_add(Weight::from_parts(53_959_224, 0).saturating_mul(d.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().reads((4_u64).saturating_mul(d.into()))) .saturating_add(RocksDbWeight::get().writes((3_u64).saturating_mul(d.into()))) @@ -568,13 +568,13 @@ impl WeightInfo for () { fn election_phragmen(c: u32, v: u32, e: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0 + e * (28 ±0) + v * (606 ±0)` - // Estimated: `179052 + c * (2135 ±7) + e * (12 ±0) + v * (2653 ±6)` - // Minimum execution time: 1_343_974_000 picoseconds. - Weight::from_parts(1_352_233_000, 179052) - // Standard Error: 597_762 - .saturating_add(Weight::from_parts(20_404_086, 0).saturating_mul(v.into())) - // Standard Error: 38_353 - .saturating_add(Weight::from_parts(793_851, 0).saturating_mul(e.into())) + // Estimated: `178920 + c * (2135 ±7) + e * (12 ±0) + v * (2653 ±6)` + // Minimum execution time: 1_136_994_000 picoseconds. + Weight::from_parts(1_142_143_000, 178920) + // Standard Error: 595_387 + .saturating_add(Weight::from_parts(19_373_386, 0).saturating_mul(v.into())) + // Standard Error: 38_201 + .saturating_add(Weight::from_parts(797_696, 0).saturating_mul(e.into())) .saturating_add(RocksDbWeight::get().reads(21_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(c.into()))) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(v.into()))) diff --git a/substrate/frame/examples/Cargo.toml b/substrate/frame/examples/Cargo.toml index 9eac53f0d98b..0c6ad5ef0978 100644 --- a/substrate/frame/examples/Cargo.toml +++ b/substrate/frame/examples/Cargo.toml @@ -18,14 +18,14 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] pallet-default-config-example = { workspace = true } pallet-dev-mode = { workspace = true } -pallet-example-authorization-tx-extension = { workspace = true } pallet-example-basic = { workspace = true } pallet-example-frame-crate = { workspace = true } pallet-example-kitchensink = { workspace = true } pallet-example-offchain-worker = { workspace = true } -pallet-example-single-block-migrations = { workspace = true } pallet-example-split = { workspace = true } +pallet-example-single-block-migrations = { workspace = true } pallet-example-tasks = { workspace = true } +pallet-example-authorization-tx-extension = { workspace = true } [features] default = ["std"] diff --git a/substrate/frame/examples/authorization-tx-extension/src/extensions.rs b/substrate/frame/examples/authorization-tx-extension/src/extensions.rs index dcbe171c183a..d1e56916d3a2 100644 --- a/substrate/frame/examples/authorization-tx-extension/src/extensions.rs +++ b/substrate/frame/examples/authorization-tx-extension/src/extensions.rs @@ -18,7 +18,7 @@ use core::{fmt, marker::PhantomData}; use codec::{Decode, Encode}; -use frame_support::{pallet_prelude::TransactionSource, traits::OriginTrait, Parameter}; +use frame_support::{traits::OriginTrait, Parameter}; use scale_info::TypeInfo; use sp_runtime::{ impl_tx_ext_default, @@ -94,7 +94,6 @@ where _len: usize, _self_implicit: Self::Implicit, inherited_implication: &impl codec::Encode, - _source: TransactionSource, ) -> ValidateResult { // If the extension is inactive, just move on in the pipeline. let Some(auth) = &self.inner else { diff --git a/substrate/frame/examples/authorization-tx-extension/src/tests.rs b/substrate/frame/examples/authorization-tx-extension/src/tests.rs index 5579e7a98416..7ede549a2f13 100644 --- a/substrate/frame/examples/authorization-tx-extension/src/tests.rs +++ b/substrate/frame/examples/authorization-tx-extension/src/tests.rs @@ -24,9 +24,8 @@ use frame_support::{ pallet_prelude::{InvalidTransaction, TransactionValidityError}, }; use pallet_verify_signature::VerifySignature; -use sp_keyring::Sr25519Keyring; +use sp_keyring::AccountKeyring; use sp_runtime::{ - generic::ExtensionVersion, traits::{Applyable, Checkable, IdentityLookup, TransactionExtension}, MultiSignature, MultiSigner, }; @@ -36,12 +35,11 @@ use crate::{extensions::AuthorizeCoownership, mock::*, pallet_assets}; #[test] fn create_asset_works() { new_test_ext().execute_with(|| { - let alice_keyring = Sr25519Keyring::Alice; + let alice_keyring = AccountKeyring::Alice; let alice_account = AccountId::from(alice_keyring.public()); // Simple call to create asset with Id `42`. let create_asset_call = RuntimeCall::Assets(pallet_assets::Call::create_asset { asset_id: 42 }); - let ext_version: ExtensionVersion = 0; // Create extension that will be used for dispatch. let initial_nonce = 23; let tx_ext = ( @@ -54,7 +52,7 @@ fn create_asset_works() { // Create the transaction signature, to be used in the top level `VerifyMultiSignature` // extension. let tx_sign = MultiSignature::Sr25519( - (&(ext_version, &create_asset_call), &tx_ext, tx_ext.implicit().unwrap()) + (&create_asset_call, &tx_ext, tx_ext.implicit().unwrap()) .using_encoded(|e| alice_keyring.sign(&sp_io::hashing::blake2_256(e))), ); // Add the signature to the extension. @@ -99,16 +97,15 @@ fn create_asset_works() { #[test] fn create_coowned_asset_works() { new_test_ext().execute_with(|| { - let alice_keyring = Sr25519Keyring::Alice; - let bob_keyring = Sr25519Keyring::Bob; - let charlie_keyring = Sr25519Keyring::Charlie; + let alice_keyring = AccountKeyring::Alice; + let bob_keyring = AccountKeyring::Bob; + let charlie_keyring = AccountKeyring::Charlie; let alice_account = AccountId::from(alice_keyring.public()); let bob_account = AccountId::from(bob_keyring.public()); let charlie_account = AccountId::from(charlie_keyring.public()); // Simple call to create asset with Id `42`. let create_asset_call = RuntimeCall::Assets(pallet_assets::Call::create_asset { asset_id: 42 }); - let ext_version: ExtensionVersion = 0; // Create the inner transaction extension, to be signed by our coowners, Alice and Bob. let inner_ext: InnerTxExtension = ( frame_system::CheckGenesis::::new(), @@ -116,8 +113,7 @@ fn create_coowned_asset_works() { frame_system::CheckEra::::from(sp_runtime::generic::Era::immortal()), ); // Create the payload Alice and Bob need to sign. - let inner_payload = - (&(ext_version, &create_asset_call), &inner_ext, inner_ext.implicit().unwrap()); + let inner_payload = (&create_asset_call, &inner_ext, inner_ext.implicit().unwrap()); // Create Alice's signature. let alice_inner_sig = MultiSignature::Sr25519( inner_payload.using_encoded(|e| alice_keyring.sign(&sp_io::hashing::blake2_256(e))), @@ -142,7 +138,7 @@ fn create_coowned_asset_works() { // Create Charlie's transaction signature, to be used in the top level // `VerifyMultiSignature` extension. let tx_sign = MultiSignature::Sr25519( - (&(ext_version, &create_asset_call), &tx_ext, tx_ext.implicit().unwrap()) + (&create_asset_call, &tx_ext, tx_ext.implicit().unwrap()) .using_encoded(|e| charlie_keyring.sign(&sp_io::hashing::blake2_256(e))), ); // Add the signature to the extension. @@ -189,14 +185,13 @@ fn create_coowned_asset_works() { #[test] fn inner_authorization_works() { new_test_ext().execute_with(|| { - let alice_keyring = Sr25519Keyring::Alice; - let bob_keyring = Sr25519Keyring::Bob; - let charlie_keyring = Sr25519Keyring::Charlie; + let alice_keyring = AccountKeyring::Alice; + let bob_keyring = AccountKeyring::Bob; + let charlie_keyring = AccountKeyring::Charlie; let charlie_account = AccountId::from(charlie_keyring.public()); // Simple call to create asset with Id `42`. let create_asset_call = RuntimeCall::Assets(pallet_assets::Call::create_asset { asset_id: 42 }); - let ext_version: ExtensionVersion = 0; // Create the inner transaction extension, to be signed by our coowners, Alice and Bob. They // are going to sign this transaction as a mortal one. let inner_ext: InnerTxExtension = ( @@ -232,7 +227,7 @@ fn inner_authorization_works() { // Create Charlie's transaction signature, to be used in the top level // `VerifyMultiSignature` extension. let tx_sign = MultiSignature::Sr25519( - (&(ext_version, &create_asset_call), &tx_ext, tx_ext.implicit().unwrap()) + (&create_asset_call, &tx_ext, tx_ext.implicit().unwrap()) .using_encoded(|e| charlie_keyring.sign(&sp_io::hashing::blake2_256(e))), ); // Add the signature to the extension that Charlie signed. diff --git a/substrate/frame/examples/basic/Cargo.toml b/substrate/frame/examples/basic/Cargo.toml index 1deb82cc6ea5..f7e2b653c2d1 100644 --- a/substrate/frame/examples/basic/Cargo.toml +++ b/substrate/frame/examples/basic/Cargo.toml @@ -18,12 +18,12 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { workspace = true } +log = { workspace = true } +scale-info = { features = ["derive"], workspace = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } -log = { workspace = true } pallet-balances = { workspace = true } -scale-info = { features = ["derive"], workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } diff --git a/substrate/frame/examples/basic/src/lib.rs b/substrate/frame/examples/basic/src/lib.rs index efdf4332e329..2f1b32d964e4 100644 --- a/substrate/frame/examples/basic/src/lib.rs +++ b/substrate/frame/examples/basic/src/lib.rs @@ -61,7 +61,6 @@ use codec::{Decode, Encode}; use core::marker::PhantomData; use frame_support::{ dispatch::{ClassifyDispatch, DispatchClass, DispatchResult, Pays, PaysFee, WeighData}, - pallet_prelude::TransactionSource, traits::IsSubType, weights::Weight, }; @@ -509,7 +508,6 @@ where len: usize, _self_implicit: Self::Implicit, _inherited_implication: &impl Encode, - _source: TransactionSource, ) -> ValidateResult::RuntimeCall> { // if the transaction is too big, just drop it. if len > 200 { diff --git a/substrate/frame/examples/basic/src/tests.rs b/substrate/frame/examples/basic/src/tests.rs index 5ec253ebecf4..8e33d3d0a348 100644 --- a/substrate/frame/examples/basic/src/tests.rs +++ b/substrate/frame/examples/basic/src/tests.rs @@ -28,7 +28,6 @@ use sp_core::H256; // or public keys. `u64` is used as the `AccountId` and no `Signature`s are required. use sp_runtime::{ traits::{BlakeTwo256, DispatchTransaction, IdentityLookup}, - transaction_validity::TransactionSource::External, BuildStorage, }; // Reexport crate as its pallet name for construct_runtime. @@ -147,7 +146,7 @@ fn signed_ext_watch_dummy_works() { assert_eq!( WatchDummy::(PhantomData) - .validate_only(Some(1).into(), &call, &info, 150, External, 0) + .validate_only(Some(1).into(), &call, &info, 150) .unwrap() .0 .priority, @@ -155,7 +154,7 @@ fn signed_ext_watch_dummy_works() { ); assert_eq!( WatchDummy::(PhantomData) - .validate_only(Some(1).into(), &call, &info, 250, External, 0) + .validate_only(Some(1).into(), &call, &info, 250) .unwrap_err(), InvalidTransaction::ExhaustsResources.into(), ); diff --git a/substrate/frame/examples/default-config/Cargo.toml b/substrate/frame/examples/default-config/Cargo.toml index 87485aa08ef0..fa376b4f9136 100644 --- a/substrate/frame/examples/default-config/Cargo.toml +++ b/substrate/frame/examples/default-config/Cargo.toml @@ -18,10 +18,10 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { workspace = true } -frame-support = { workspace = true } -frame-system = { workspace = true } log = { workspace = true } scale-info = { features = ["derive"], workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } diff --git a/substrate/frame/examples/default-config/src/lib.rs b/substrate/frame/examples/default-config/src/lib.rs index f690bffe0998..ccdcd4968598 100644 --- a/substrate/frame/examples/default-config/src/lib.rs +++ b/substrate/frame/examples/default-config/src/lib.rs @@ -62,10 +62,10 @@ pub mod pallet { type OverwrittenDefaultValue: Get; /// An input parameter that relies on `::AccountId`. This can - /// too have a default, as long as it is present in `frame_system::DefaultConfig`. + /// too have a default, as long as as it is present in `frame_system::DefaultConfig`. type CanDeriveDefaultFromSystem: Get; - /// We might choose to declare as one that doesn't have a default, for whatever semantical + /// We might chose to declare as one that doesn't have a default, for whatever semantical /// reason. #[pallet::no_default] type HasNoDefault: Get; diff --git a/substrate/frame/examples/dev-mode/Cargo.toml b/substrate/frame/examples/dev-mode/Cargo.toml index 7589abb929d5..6625fb3a5851 100644 --- a/substrate/frame/examples/dev-mode/Cargo.toml +++ b/substrate/frame/examples/dev-mode/Cargo.toml @@ -17,11 +17,11 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { workspace = true } +log = { workspace = true } +scale-info = { features = ["derive"], workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } -log = { workspace = true } pallet-balances = { workspace = true } -scale-info = { features = ["derive"], workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } diff --git a/substrate/frame/examples/multi-block-migrations/Cargo.toml b/substrate/frame/examples/multi-block-migrations/Cargo.toml index 6e8e89784266..98569964a9c9 100644 --- a/substrate/frame/examples/multi-block-migrations/Cargo.toml +++ b/substrate/frame/examples/multi-block-migrations/Cargo.toml @@ -14,11 +14,11 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { workspace = true } -frame-benchmarking = { optional = true, workspace = true } +pallet-migrations = { workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } +frame-benchmarking = { optional = true, workspace = true } log = { workspace = true } -pallet-migrations = { workspace = true } scale-info = { workspace = true } sp-io = { workspace = true } diff --git a/substrate/frame/examples/offchain-worker/Cargo.toml b/substrate/frame/examples/offchain-worker/Cargo.toml index fabdfb0f9e0c..a5664dd912d4 100644 --- a/substrate/frame/examples/offchain-worker/Cargo.toml +++ b/substrate/frame/examples/offchain-worker/Cargo.toml @@ -18,11 +18,11 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { workspace = true } -frame-support = { workspace = true } -frame-system = { workspace = true } lite-json = { workspace = true } log = { workspace = true } scale-info = { features = ["derive"], workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } sp-core = { workspace = true } sp-io = { workspace = true } sp-keystore = { optional = true, workspace = true } diff --git a/substrate/frame/examples/offchain-worker/src/tests.rs b/substrate/frame/examples/offchain-worker/src/tests.rs index df5cf02594f6..755beb8b82ec 100644 --- a/substrate/frame/examples/offchain-worker/src/tests.rs +++ b/substrate/frame/examples/offchain-worker/src/tests.rs @@ -240,7 +240,7 @@ fn should_submit_signed_transaction_on_chain() { let tx = pool_state.write().transactions.pop().unwrap(); assert!(pool_state.read().transactions.is_empty()); let tx = Extrinsic::decode(&mut &*tx).unwrap(); - assert!(matches!(tx.preamble, sp_runtime::generic::Preamble::Signed(0, (), (),))); + assert!(matches!(tx.preamble, sp_runtime::generic::Preamble::Signed(0, (), 0, (),))); assert_eq!(tx.function, RuntimeCall::Example(crate::Call::submit_price { price: 15523 })); }); } diff --git a/substrate/frame/examples/single-block-migrations/Cargo.toml b/substrate/frame/examples/single-block-migrations/Cargo.toml index 4df8693e0f37..26a3a9fff753 100644 --- a/substrate/frame/examples/single-block-migrations/Cargo.toml +++ b/substrate/frame/examples/single-block-migrations/Cargo.toml @@ -13,18 +13,18 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { features = ["derive"], workspace = true } docify = { workspace = true } -frame-executive = { workspace = true } +log = { workspace = true } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } frame-support = { workspace = true } +frame-executive = { workspace = true } frame-system = { workspace = true } frame-try-runtime = { optional = true, workspace = true } -log = { workspace = true } pallet-balances = { workspace = true } -scale-info = { features = ["derive"], workspace = true } +sp-runtime = { workspace = true } sp-core = { workspace = true } sp-io = { workspace = true } -sp-runtime = { workspace = true } sp-version = { workspace = true } [features] diff --git a/substrate/frame/examples/tasks/Cargo.toml b/substrate/frame/examples/tasks/Cargo.toml index 48f4d9e66e9c..00695ceddf19 100644 --- a/substrate/frame/examples/tasks/Cargo.toml +++ b/substrate/frame/examples/tasks/Cargo.toml @@ -22,9 +22,9 @@ scale-info = { features = ["derive"], workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } -sp-core = { workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } +sp-core = { workspace = true } frame-benchmarking = { optional = true, workspace = true } diff --git a/substrate/frame/executive/Cargo.toml b/substrate/frame/executive/Cargo.toml index ee24a9fef13d..76d084f49d9f 100644 --- a/substrate/frame/executive/Cargo.toml +++ b/substrate/frame/executive/Cargo.toml @@ -20,11 +20,11 @@ aquamarine = { workspace = true } codec = { features = [ "derive", ], workspace = true } +log = { workspace = true } +scale-info = { features = ["derive"], workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } frame-try-runtime = { optional = true, workspace = true } -log = { workspace = true } -scale-info = { features = ["derive"], workspace = true } sp-core = { workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } diff --git a/substrate/frame/fast-unstake/Cargo.toml b/substrate/frame/fast-unstake/Cargo.toml index 98a9655074e7..c1d0e80551c2 100644 --- a/substrate/frame/fast-unstake/Cargo.toml +++ b/substrate/frame/fast-unstake/Cargo.toml @@ -22,23 +22,23 @@ scale-info = { features = ["derive"], workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } -frame-election-provider-support = { workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } sp-staking = { workspace = true } +frame-election-provider-support = { workspace = true } frame-benchmarking = { optional = true, workspace = true } docify = { workspace = true } [dev-dependencies] -pallet-balances = { workspace = true, default-features = true } -pallet-staking = { workspace = true, default-features = true } pallet-staking-reward-curve = { workspace = true, default-features = true } -pallet-timestamp = { workspace = true, default-features = true } sp-core = { workspace = true } -sp-tracing = { workspace = true, default-features = true } substrate-test-utils = { workspace = true } +sp-tracing = { workspace = true, default-features = true } +pallet-staking = { workspace = true, default-features = true } +pallet-balances = { workspace = true, default-features = true } +pallet-timestamp = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/frame/fast-unstake/src/benchmarking.rs b/substrate/frame/fast-unstake/src/benchmarking.rs index 750f348c4596..d01ff715ca4f 100644 --- a/substrate/frame/fast-unstake/src/benchmarking.rs +++ b/substrate/frame/fast-unstake/src/benchmarking.rs @@ -19,9 +19,9 @@ #![cfg(feature = "runtime-benchmarks")] -use crate::{types::*, *}; +use crate::{types::*, Pallet as FastUnstake, *}; use alloc::{vec, vec::Vec}; -use frame_benchmarking::v2::*; +use frame_benchmarking::v1::{benchmarks, whitelist_account, BenchmarkError}; use frame_support::{ assert_ok, traits::{Currency, EnsureOrigin, Get, Hooks}, @@ -89,21 +89,22 @@ fn setup_staking(v: u32, until: EraIndex) { fn on_idle_full_block() { let remaining_weight = ::BlockWeights::get().max_block; - Pallet::::on_idle(Zero::zero(), remaining_weight); + FastUnstake::::on_idle(Zero::zero(), remaining_weight); } -#[benchmarks] -mod benchmarks { - use super::*; +benchmarks! { // on_idle, we don't check anyone, but fully unbond them. - #[benchmark] - fn on_idle_unstake(b: Linear<1, { T::BatchSize::get() }>) { + on_idle_unstake { + let b in 1 .. T::BatchSize::get(); + ErasToCheckPerBlock::::put(1); for who in create_unexposed_batch::(b).into_iter() { - assert_ok!(Pallet::::register_fast_unstake(RawOrigin::Signed(who.clone()).into(),)); + assert_ok!(FastUnstake::::register_fast_unstake( + RawOrigin::Signed(who.clone()).into(), + )); } - // Run on_idle once. This will check era 0. + // run on_idle once. This will check era 0. assert_eq!(Head::::get(), None); on_idle_full_block::(); @@ -115,19 +116,21 @@ mod benchmarks { .. }) if checked.len() == 1 && stashes.len() as u32 == b )); - - #[block] - { - on_idle_full_block::(); - } - - assert_eq!(fast_unstake_events::().last(), Some(&Event::BatchFinished { size: b })); + } + : { + on_idle_full_block::(); + } + verify { + assert!(matches!( + fast_unstake_events::().last(), + Some(Event::BatchFinished { size: b }) + )); } - #[benchmark] - fn on_idle_check(v: Linear<1, 256>, b: Linear<1, { T::BatchSize::get() }>) { - // on_idle: When we check some number of eras and the queue is already set. - + // on_idle, when we check some number of eras and the queue is already set. + on_idle_check { + let v in 1 .. 256; + let b in 1 .. T::BatchSize::get(); let u = T::MaxErasToCheckPerBlock::get().min(T::Staking::bonding_duration()); ErasToCheckPerBlock::::put(u); @@ -136,73 +139,64 @@ mod benchmarks { // setup staking with v validators and u eras of data (0..=u+1) setup_staking::(v, u); - let stashes = create_unexposed_batch::(b) - .into_iter() - .map(|s| { - assert_ok!( - Pallet::::register_fast_unstake(RawOrigin::Signed(s.clone()).into(),) - ); - (s, T::Deposit::get()) - }) - .collect::>(); + let stashes = create_unexposed_batch::(b).into_iter().map(|s| { + assert_ok!(FastUnstake::::register_fast_unstake( + RawOrigin::Signed(s.clone()).into(), + )); + (s, T::Deposit::get()) + }).collect::>(); // no one is queued thus far. assert_eq!(Head::::get(), None); - Head::::put(UnstakeRequest { - stashes: stashes.clone().try_into().unwrap(), - checked: Default::default(), - }); - - #[block] - { - on_idle_full_block::(); - } - + Head::::put(UnstakeRequest { stashes: stashes.clone().try_into().unwrap(), checked: Default::default() }); + } + : { + on_idle_full_block::(); + } + verify { let checked = (1..=u).rev().collect::>(); let request = Head::::get().unwrap(); assert_eq!(checked, request.checked.into_inner()); - assert!(matches!(fast_unstake_events::().last(), Some(Event::BatchChecked { .. }))); + assert!(matches!( + fast_unstake_events::().last(), + Some(Event::BatchChecked { .. }) + )); assert!(stashes.iter().all(|(s, _)| request.stashes.iter().any(|(ss, _)| ss == s))); } - #[benchmark] - fn register_fast_unstake() { + register_fast_unstake { ErasToCheckPerBlock::::put(1); let who = create_unexposed_batch::(1).get(0).cloned().unwrap(); whitelist_account!(who); assert_eq!(Queue::::count(), 0); - #[extrinsic_call] - _(RawOrigin::Signed(who.clone())); - + } + :_(RawOrigin::Signed(who.clone())) + verify { assert_eq!(Queue::::count(), 1); } - #[benchmark] - fn deregister() { + deregister { ErasToCheckPerBlock::::put(1); let who = create_unexposed_batch::(1).get(0).cloned().unwrap(); - assert_ok!(Pallet::::register_fast_unstake(RawOrigin::Signed(who.clone()).into(),)); + assert_ok!(FastUnstake::::register_fast_unstake( + RawOrigin::Signed(who.clone()).into(), + )); assert_eq!(Queue::::count(), 1); whitelist_account!(who); - - #[extrinsic_call] - _(RawOrigin::Signed(who.clone())); - + } + :_(RawOrigin::Signed(who.clone())) + verify { assert_eq!(Queue::::count(), 0); } - #[benchmark] - fn control() -> Result<(), BenchmarkError> { + control { let origin = ::ControlOrigin::try_successful_origin() .map_err(|_| BenchmarkError::Weightless)?; - - #[extrinsic_call] - _(origin as T::RuntimeOrigin, T::MaxErasToCheckPerBlock::get()); - - Ok(()) } + : _(origin, T::MaxErasToCheckPerBlock::get()) + verify {} - impl_benchmark_test_suite!(Pallet, mock::ExtBuilder::default().build(), mock::Runtime); + impl_benchmark_test_suite!(Pallet, crate::mock::ExtBuilder::default().build(), crate::mock::Runtime) } diff --git a/substrate/frame/fast-unstake/src/weights.rs b/substrate/frame/fast-unstake/src/weights.rs index efa2a67ae35d..dc875e93229e 100644 --- a/substrate/frame/fast-unstake/src/weights.rs +++ b/substrate/frame/fast-unstake/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_fast_unstake` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-04-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -79,8 +79,6 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Ledger` (r:64 w:64) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) - /// Storage: `Staking::VirtualStakers` (r:64 w:64) - /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) /// Storage: `Balances::Locks` (r:64 w:64) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:64 w:0) @@ -96,16 +94,16 @@ impl WeightInfo for SubstrateWeight { /// The range of component `b` is `[1, 64]`. fn on_idle_unstake(b: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1575 + b * (452 ±0)` + // Measured: `1475 + b * (452 ±0)` // Estimated: `7253 + b * (3774 ±0)` - // Minimum execution time: 99_430_000 picoseconds. - Weight::from_parts(47_845_798, 7253) - // Standard Error: 35_454 - .saturating_add(Weight::from_parts(61_016_013, 0).saturating_mul(b.into())) + // Minimum execution time: 84_536_000 picoseconds. + Weight::from_parts(41_949_894, 7253) + // Standard Error: 28_494 + .saturating_add(Weight::from_parts(52_945_820, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(6_u64)) - .saturating_add(T::DbWeight::get().reads((9_u64).saturating_mul(b.into()))) + .saturating_add(T::DbWeight::get().reads((8_u64).saturating_mul(b.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) - .saturating_add(T::DbWeight::get().writes((6_u64).saturating_mul(b.into()))) + .saturating_add(T::DbWeight::get().writes((5_u64).saturating_mul(b.into()))) .saturating_add(Weight::from_parts(0, 3774).saturating_mul(b.into())) } /// Storage: `FastUnstake::ErasToCheckPerBlock` (r:1 w:0) @@ -128,14 +126,14 @@ impl WeightInfo for SubstrateWeight { /// The range of component `b` is `[1, 64]`. fn on_idle_check(v: u32, b: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1912 + b * (55 ±0) + v * (10055 ±0)` + // Measured: `1879 + b * (55 ±0) + v * (10055 ±0)` // Estimated: `7253 + b * (56 ±0) + v * (12531 ±0)` - // Minimum execution time: 1_839_591_000 picoseconds. - Weight::from_parts(1_849_618_000, 7253) - // Standard Error: 13_246_289 - .saturating_add(Weight::from_parts(424_466_486, 0).saturating_mul(v.into())) - // Standard Error: 52_999_911 - .saturating_add(Weight::from_parts(1_664_762_641, 0).saturating_mul(b.into())) + // Minimum execution time: 1_745_807_000 picoseconds. + Weight::from_parts(1_757_648_000, 7253) + // Standard Error: 12_994_693 + .saturating_add(Weight::from_parts(416_410_247, 0).saturating_mul(v.into())) + // Standard Error: 51_993_247 + .saturating_add(Weight::from_parts(1_654_551_441, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(v.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) @@ -166,8 +164,6 @@ impl WeightInfo for SubstrateWeight { /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `Staking::CurrentEra` (r:1 w:0) /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `Staking::VirtualStakers` (r:1 w:0) - /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) /// Storage: `Balances::Locks` (r:1 w:1) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:0) @@ -176,11 +172,11 @@ impl WeightInfo for SubstrateWeight { /// Proof: `FastUnstake::CounterForQueue` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn register_fast_unstake() -> Weight { // Proof Size summary in bytes: - // Measured: `2020` + // Measured: `1955` // Estimated: `7253` - // Minimum execution time: 151_529_000 picoseconds. - Weight::from_parts(155_498_000, 7253) - .saturating_add(T::DbWeight::get().reads(16_u64)) + // Minimum execution time: 136_437_000 picoseconds. + Weight::from_parts(138_827_000, 7253) + .saturating_add(T::DbWeight::get().reads(15_u64)) .saturating_add(T::DbWeight::get().writes(9_u64)) } /// Storage: `FastUnstake::ErasToCheckPerBlock` (r:1 w:0) @@ -197,10 +193,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `FastUnstake::CounterForQueue` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn deregister() -> Weight { // Proof Size summary in bytes: - // Measured: `1383` + // Measured: `1350` // Estimated: `7253` - // Minimum execution time: 55_859_000 picoseconds. - Weight::from_parts(56_949_000, 7253) + // Minimum execution time: 45_337_000 picoseconds. + Weight::from_parts(47_359_000, 7253) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -210,8 +206,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_226_000 picoseconds. - Weight::from_parts(2_356_000, 0) + // Minimum execution time: 2_258_000 picoseconds. + Weight::from_parts(2_406_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } } @@ -236,8 +232,6 @@ impl WeightInfo for () { /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Ledger` (r:64 w:64) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) - /// Storage: `Staking::VirtualStakers` (r:64 w:64) - /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) /// Storage: `Balances::Locks` (r:64 w:64) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:64 w:0) @@ -253,16 +247,16 @@ impl WeightInfo for () { /// The range of component `b` is `[1, 64]`. fn on_idle_unstake(b: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1575 + b * (452 ±0)` + // Measured: `1475 + b * (452 ±0)` // Estimated: `7253 + b * (3774 ±0)` - // Minimum execution time: 99_430_000 picoseconds. - Weight::from_parts(47_845_798, 7253) - // Standard Error: 35_454 - .saturating_add(Weight::from_parts(61_016_013, 0).saturating_mul(b.into())) + // Minimum execution time: 84_536_000 picoseconds. + Weight::from_parts(41_949_894, 7253) + // Standard Error: 28_494 + .saturating_add(Weight::from_parts(52_945_820, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(6_u64)) - .saturating_add(RocksDbWeight::get().reads((9_u64).saturating_mul(b.into()))) + .saturating_add(RocksDbWeight::get().reads((8_u64).saturating_mul(b.into()))) .saturating_add(RocksDbWeight::get().writes(1_u64)) - .saturating_add(RocksDbWeight::get().writes((6_u64).saturating_mul(b.into()))) + .saturating_add(RocksDbWeight::get().writes((5_u64).saturating_mul(b.into()))) .saturating_add(Weight::from_parts(0, 3774).saturating_mul(b.into())) } /// Storage: `FastUnstake::ErasToCheckPerBlock` (r:1 w:0) @@ -285,14 +279,14 @@ impl WeightInfo for () { /// The range of component `b` is `[1, 64]`. fn on_idle_check(v: u32, b: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1912 + b * (55 ±0) + v * (10055 ±0)` + // Measured: `1879 + b * (55 ±0) + v * (10055 ±0)` // Estimated: `7253 + b * (56 ±0) + v * (12531 ±0)` - // Minimum execution time: 1_839_591_000 picoseconds. - Weight::from_parts(1_849_618_000, 7253) - // Standard Error: 13_246_289 - .saturating_add(Weight::from_parts(424_466_486, 0).saturating_mul(v.into())) - // Standard Error: 52_999_911 - .saturating_add(Weight::from_parts(1_664_762_641, 0).saturating_mul(b.into())) + // Minimum execution time: 1_745_807_000 picoseconds. + Weight::from_parts(1_757_648_000, 7253) + // Standard Error: 12_994_693 + .saturating_add(Weight::from_parts(416_410_247, 0).saturating_mul(v.into())) + // Standard Error: 51_993_247 + .saturating_add(Weight::from_parts(1_654_551_441, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(v.into()))) .saturating_add(RocksDbWeight::get().writes(1_u64)) @@ -323,8 +317,6 @@ impl WeightInfo for () { /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `Staking::CurrentEra` (r:1 w:0) /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `Staking::VirtualStakers` (r:1 w:0) - /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) /// Storage: `Balances::Locks` (r:1 w:1) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:0) @@ -333,11 +325,11 @@ impl WeightInfo for () { /// Proof: `FastUnstake::CounterForQueue` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn register_fast_unstake() -> Weight { // Proof Size summary in bytes: - // Measured: `2020` + // Measured: `1955` // Estimated: `7253` - // Minimum execution time: 151_529_000 picoseconds. - Weight::from_parts(155_498_000, 7253) - .saturating_add(RocksDbWeight::get().reads(16_u64)) + // Minimum execution time: 136_437_000 picoseconds. + Weight::from_parts(138_827_000, 7253) + .saturating_add(RocksDbWeight::get().reads(15_u64)) .saturating_add(RocksDbWeight::get().writes(9_u64)) } /// Storage: `FastUnstake::ErasToCheckPerBlock` (r:1 w:0) @@ -354,10 +346,10 @@ impl WeightInfo for () { /// Proof: `FastUnstake::CounterForQueue` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn deregister() -> Weight { // Proof Size summary in bytes: - // Measured: `1383` + // Measured: `1350` // Estimated: `7253` - // Minimum execution time: 55_859_000 picoseconds. - Weight::from_parts(56_949_000, 7253) + // Minimum execution time: 45_337_000 picoseconds. + Weight::from_parts(47_359_000, 7253) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -367,8 +359,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_226_000 picoseconds. - Weight::from_parts(2_356_000, 0) + // Minimum execution time: 2_258_000 picoseconds. + Weight::from_parts(2_406_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } } diff --git a/substrate/frame/glutton/Cargo.toml b/substrate/frame/glutton/Cargo.toml index 317a9ea8b760..6717176ffc95 100644 --- a/substrate/frame/glutton/Cargo.toml +++ b/substrate/frame/glutton/Cargo.toml @@ -18,15 +18,15 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] blake2 = { workspace = true } codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } +log = { workspace = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } -log = { workspace = true } -scale-info = { features = ["derive"], workspace = true } sp-core = { workspace = true } -sp-inherents = { workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } +sp-inherents = { workspace = true } [dev-dependencies] pallet-balances = { workspace = true, default-features = true } diff --git a/substrate/frame/glutton/src/weights.rs b/substrate/frame/glutton/src/weights.rs index 825ab922408f..d9e6ebd9d8a9 100644 --- a/substrate/frame/glutton/src/weights.rs +++ b/substrate/frame/glutton/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_glutton` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-04-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -72,12 +72,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[0, 1000]`. fn initialize_pallet_grow(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `113` + // Measured: `86` // Estimated: `1489` - // Minimum execution time: 9_697_000 picoseconds. - Weight::from_parts(9_901_000, 1489) - // Standard Error: 4_104 - .saturating_add(Weight::from_parts(10_452_607, 0).saturating_mul(n.into())) + // Minimum execution time: 8_453_000 picoseconds. + Weight::from_parts(5_470_386, 1489) + // Standard Error: 4_723 + .saturating_add(Weight::from_parts(10_418_732, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) @@ -89,12 +89,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[0, 1000]`. fn initialize_pallet_shrink(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `146` + // Measured: `119` // Estimated: `1489` - // Minimum execution time: 9_630_000 picoseconds. - Weight::from_parts(9_800_000, 1489) - // Standard Error: 1_222 - .saturating_add(Weight::from_parts(1_172_845, 0).saturating_mul(n.into())) + // Minimum execution time: 8_646_000 picoseconds. + Weight::from_parts(7_948_965, 1489) + // Standard Error: 2_154 + .saturating_add(Weight::from_parts(1_197_352, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) @@ -104,22 +104,22 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 666_000 picoseconds. - Weight::from_parts(1_717_806, 0) - // Standard Error: 8 - .saturating_add(Weight::from_parts(106_571, 0).saturating_mul(i.into())) + // Minimum execution time: 643_000 picoseconds. + Weight::from_parts(4_035_744, 0) + // Standard Error: 14 + .saturating_add(Weight::from_parts(105_406, 0).saturating_mul(i.into())) } /// Storage: `Glutton::TrashData` (r:5000 w:0) /// Proof: `Glutton::TrashData` (`max_values`: Some(65000), `max_size`: Some(1036), added: 3016, mode: `MaxEncodedLen`) /// The range of component `i` is `[0, 5000]`. fn waste_proof_size_some(i: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `119141 + i * (1022 ±0)` + // Measured: `119114 + i * (1022 ±0)` // Estimated: `990 + i * (3016 ±0)` - // Minimum execution time: 408_000 picoseconds. - Weight::from_parts(389_107_502, 990) - // Standard Error: 8_027 - .saturating_add(Weight::from_parts(7_091_830, 0).saturating_mul(i.into())) + // Minimum execution time: 228_000 picoseconds. + Weight::from_parts(62_060_711, 990) + // Standard Error: 5_638 + .saturating_add(Weight::from_parts(5_970_065, 0).saturating_mul(i.into())) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(i.into()))) .saturating_add(Weight::from_parts(0, 3016).saturating_mul(i.into())) } @@ -131,10 +131,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Glutton::TrashData` (`max_values`: Some(65000), `max_size`: Some(1036), added: 3016, mode: `MaxEncodedLen`) fn on_idle_high_proof_waste() -> Weight { // Proof Size summary in bytes: - // Measured: `1900524` + // Measured: `1900497` // Estimated: `5239782` - // Minimum execution time: 58_810_751_000 picoseconds. - Weight::from_parts(59_238_169_000, 5239782) + // Minimum execution time: 57_557_511_000 picoseconds. + Weight::from_parts(57_644_868_000, 5239782) .saturating_add(T::DbWeight::get().reads(1739_u64)) } /// Storage: `Glutton::Storage` (r:1 w:0) @@ -145,10 +145,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Glutton::TrashData` (`max_values`: Some(65000), `max_size`: Some(1036), added: 3016, mode: `MaxEncodedLen`) fn on_idle_low_proof_waste() -> Weight { // Proof Size summary in bytes: - // Measured: `9574` + // Measured: `9547` // Estimated: `16070` - // Minimum execution time: 100_387_946_000 picoseconds. - Weight::from_parts(100_470_819_000, 16070) + // Minimum execution time: 101_362_469_000 picoseconds. + Weight::from_parts(101_583_065_000, 16070) .saturating_add(T::DbWeight::get().reads(7_u64)) } /// Storage: `Glutton::Storage` (r:1 w:0) @@ -157,10 +157,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Glutton::Compute` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) fn empty_on_idle() -> Weight { // Proof Size summary in bytes: - // Measured: `113` + // Measured: `86` // Estimated: `1493` - // Minimum execution time: 6_587_000 picoseconds. - Weight::from_parts(6_835_000, 1493) + // Minimum execution time: 5_118_000 picoseconds. + Weight::from_parts(5_320_000, 1493) .saturating_add(T::DbWeight::get().reads(2_u64)) } /// Storage: `Glutton::Compute` (r:0 w:1) @@ -169,8 +169,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_238_000 picoseconds. - Weight::from_parts(5_466_000, 0) + // Minimum execution time: 5_925_000 picoseconds. + Weight::from_parts(6_193_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Glutton::Storage` (r:0 w:1) @@ -179,8 +179,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_136_000 picoseconds. - Weight::from_parts(5_437_000, 0) + // Minimum execution time: 5_912_000 picoseconds. + Weight::from_parts(6_170_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } } @@ -194,12 +194,12 @@ impl WeightInfo for () { /// The range of component `n` is `[0, 1000]`. fn initialize_pallet_grow(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `113` + // Measured: `86` // Estimated: `1489` - // Minimum execution time: 9_697_000 picoseconds. - Weight::from_parts(9_901_000, 1489) - // Standard Error: 4_104 - .saturating_add(Weight::from_parts(10_452_607, 0).saturating_mul(n.into())) + // Minimum execution time: 8_453_000 picoseconds. + Weight::from_parts(5_470_386, 1489) + // Standard Error: 4_723 + .saturating_add(Weight::from_parts(10_418_732, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(n.into()))) @@ -211,12 +211,12 @@ impl WeightInfo for () { /// The range of component `n` is `[0, 1000]`. fn initialize_pallet_shrink(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `146` + // Measured: `119` // Estimated: `1489` - // Minimum execution time: 9_630_000 picoseconds. - Weight::from_parts(9_800_000, 1489) - // Standard Error: 1_222 - .saturating_add(Weight::from_parts(1_172_845, 0).saturating_mul(n.into())) + // Minimum execution time: 8_646_000 picoseconds. + Weight::from_parts(7_948_965, 1489) + // Standard Error: 2_154 + .saturating_add(Weight::from_parts(1_197_352, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(n.into()))) @@ -226,22 +226,22 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 666_000 picoseconds. - Weight::from_parts(1_717_806, 0) - // Standard Error: 8 - .saturating_add(Weight::from_parts(106_571, 0).saturating_mul(i.into())) + // Minimum execution time: 643_000 picoseconds. + Weight::from_parts(4_035_744, 0) + // Standard Error: 14 + .saturating_add(Weight::from_parts(105_406, 0).saturating_mul(i.into())) } /// Storage: `Glutton::TrashData` (r:5000 w:0) /// Proof: `Glutton::TrashData` (`max_values`: Some(65000), `max_size`: Some(1036), added: 3016, mode: `MaxEncodedLen`) /// The range of component `i` is `[0, 5000]`. fn waste_proof_size_some(i: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `119141 + i * (1022 ±0)` + // Measured: `119114 + i * (1022 ±0)` // Estimated: `990 + i * (3016 ±0)` - // Minimum execution time: 408_000 picoseconds. - Weight::from_parts(389_107_502, 990) - // Standard Error: 8_027 - .saturating_add(Weight::from_parts(7_091_830, 0).saturating_mul(i.into())) + // Minimum execution time: 228_000 picoseconds. + Weight::from_parts(62_060_711, 990) + // Standard Error: 5_638 + .saturating_add(Weight::from_parts(5_970_065, 0).saturating_mul(i.into())) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(i.into()))) .saturating_add(Weight::from_parts(0, 3016).saturating_mul(i.into())) } @@ -253,10 +253,10 @@ impl WeightInfo for () { /// Proof: `Glutton::TrashData` (`max_values`: Some(65000), `max_size`: Some(1036), added: 3016, mode: `MaxEncodedLen`) fn on_idle_high_proof_waste() -> Weight { // Proof Size summary in bytes: - // Measured: `1900524` + // Measured: `1900497` // Estimated: `5239782` - // Minimum execution time: 58_810_751_000 picoseconds. - Weight::from_parts(59_238_169_000, 5239782) + // Minimum execution time: 57_557_511_000 picoseconds. + Weight::from_parts(57_644_868_000, 5239782) .saturating_add(RocksDbWeight::get().reads(1739_u64)) } /// Storage: `Glutton::Storage` (r:1 w:0) @@ -267,10 +267,10 @@ impl WeightInfo for () { /// Proof: `Glutton::TrashData` (`max_values`: Some(65000), `max_size`: Some(1036), added: 3016, mode: `MaxEncodedLen`) fn on_idle_low_proof_waste() -> Weight { // Proof Size summary in bytes: - // Measured: `9574` + // Measured: `9547` // Estimated: `16070` - // Minimum execution time: 100_387_946_000 picoseconds. - Weight::from_parts(100_470_819_000, 16070) + // Minimum execution time: 101_362_469_000 picoseconds. + Weight::from_parts(101_583_065_000, 16070) .saturating_add(RocksDbWeight::get().reads(7_u64)) } /// Storage: `Glutton::Storage` (r:1 w:0) @@ -279,10 +279,10 @@ impl WeightInfo for () { /// Proof: `Glutton::Compute` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) fn empty_on_idle() -> Weight { // Proof Size summary in bytes: - // Measured: `113` + // Measured: `86` // Estimated: `1493` - // Minimum execution time: 6_587_000 picoseconds. - Weight::from_parts(6_835_000, 1493) + // Minimum execution time: 5_118_000 picoseconds. + Weight::from_parts(5_320_000, 1493) .saturating_add(RocksDbWeight::get().reads(2_u64)) } /// Storage: `Glutton::Compute` (r:0 w:1) @@ -291,8 +291,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_238_000 picoseconds. - Weight::from_parts(5_466_000, 0) + // Minimum execution time: 5_925_000 picoseconds. + Weight::from_parts(6_193_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Glutton::Storage` (r:0 w:1) @@ -301,8 +301,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_136_000 picoseconds. - Weight::from_parts(5_437_000, 0) + // Minimum execution time: 5_912_000 picoseconds. + Weight::from_parts(6_170_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } } diff --git a/substrate/frame/grandpa/Cargo.toml b/substrate/frame/grandpa/Cargo.toml index 4072d65b6267..86ace358d05d 100644 --- a/substrate/frame/grandpa/Cargo.toml +++ b/substrate/frame/grandpa/Cargo.toml @@ -17,13 +17,13 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { features = ["derive"], workspace = true } +log = { workspace = true } +scale-info = { features = ["derive", "serde"], workspace = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } -log = { workspace = true } pallet-authorship = { workspace = true } pallet-session = { workspace = true } -scale-info = { features = ["derive", "serde"], workspace = true } sp-application-crypto = { features = ["serde"], workspace = true } sp-consensus-grandpa = { features = ["serde"], workspace = true } sp-core = { features = ["serde"], workspace = true } diff --git a/substrate/frame/grandpa/src/mock.rs b/substrate/frame/grandpa/src/mock.rs index 87369c23948c..cf4c29003a71 100644 --- a/substrate/frame/grandpa/src/mock.rs +++ b/substrate/frame/grandpa/src/mock.rs @@ -297,7 +297,7 @@ pub fn start_session(session_index: SessionIndex) { pub fn start_era(era_index: EraIndex) { start_session((era_index * 3).into()); - assert_eq!(pallet_staking::CurrentEra::::get(), Some(era_index)); + assert_eq!(Staking::current_era(), Some(era_index)); } pub fn initialize_block(number: u64, parent_hash: H256) { diff --git a/substrate/frame/grandpa/src/tests.rs b/substrate/frame/grandpa/src/tests.rs index 383f77f00de7..e1e963ce564a 100644 --- a/substrate/frame/grandpa/src/tests.rs +++ b/substrate/frame/grandpa/src/tests.rs @@ -319,7 +319,7 @@ fn report_equivocation_current_set_works() { let authorities = test_authorities(); new_test_ext_raw_authorities(authorities).execute_with(|| { - assert_eq!(pallet_staking::CurrentEra::::get(), Some(0)); + assert_eq!(Staking::current_era(), Some(0)); assert_eq!(Session::current_index(), 0); start_era(1); diff --git a/substrate/frame/identity/Cargo.toml b/substrate/frame/identity/Cargo.toml index 4ea7f797d9ee..bf974221b857 100644 --- a/substrate/frame/identity/Cargo.toml +++ b/substrate/frame/identity/Cargo.toml @@ -18,11 +18,11 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { features = ["derive", "max-encoded-len"], workspace = true } enumflags2 = { workspace = true } +log = { workspace = true } +scale-info = { features = ["derive"], workspace = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } -log = { workspace = true } -scale-info = { features = ["derive"], workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } diff --git a/substrate/frame/identity/src/weights.rs b/substrate/frame/identity/src/weights.rs index f1ede9213280..a74cca9dc8ec 100644 --- a/substrate/frame/identity/src/weights.rs +++ b/substrate/frame/identity/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_identity` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-04-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -69,13 +69,13 @@ pub trait WeightInfo { fn quit_sub(s: u32, ) -> Weight; fn add_username_authority() -> Weight; fn remove_username_authority() -> Weight; - fn set_username_for(p: u32, ) -> Weight; + fn set_username_for(p: u32) -> Weight; fn accept_username() -> Weight; - fn remove_expired_approval(p: u32, ) -> Weight; + fn remove_expired_approval(p: u32) -> Weight; fn set_primary_username() -> Weight; fn unbind_username() -> Weight; fn remove_username() -> Weight; - fn kill_username(p: u32, ) -> Weight; + fn kill_username(p: u32) -> Weight; fn migration_v2_authority_step() -> Weight; fn migration_v2_username_step() -> Weight; fn migration_v2_identity_step() -> Weight; @@ -94,29 +94,29 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `32 + r * (57 ±0)` // Estimated: `2626` - // Minimum execution time: 9_510_000 picoseconds. - Weight::from_parts(10_180_808, 2626) - // Standard Error: 1_519 - .saturating_add(Weight::from_parts(97_439, 0).saturating_mul(r.into())) + // Minimum execution time: 8_696_000 picoseconds. + Weight::from_parts(9_620_793, 2626) + // Standard Error: 1_909 + .saturating_add(Weight::from_parts(94_977, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Identity::IdentityOf` (r:1 w:1) - /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 20]`. fn set_identity(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `6977 + r * (5 ±0)` - // Estimated: `11003` - // Minimum execution time: 121_544_000 picoseconds. - Weight::from_parts(123_405_465, 11003) - // Standard Error: 10_028 - .saturating_add(Weight::from_parts(280_726, 0).saturating_mul(r.into())) + // Measured: `6978 + r * (5 ±0)` + // Estimated: `11037` + // Minimum execution time: 110_950_000 picoseconds. + Weight::from_parts(112_705_139, 11037) + // Standard Error: 6_475 + .saturating_add(Weight::from_parts(212_737, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Identity::IdentityOf` (r:1 w:0) - /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) /// Storage: `Identity::SubsOf` (r:1 w:1) /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) /// Storage: `Identity::SuperOf` (r:100 w:100) @@ -125,11 +125,11 @@ impl WeightInfo for SubstrateWeight { fn set_subs_new(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `101` - // Estimated: `11003 + s * (2589 ±0)` - // Minimum execution time: 13_867_000 picoseconds. - Weight::from_parts(26_900_535, 11003) - // Standard Error: 5_334 - .saturating_add(Weight::from_parts(3_798_050, 0).saturating_mul(s.into())) + // Estimated: `11037 + s * (2589 ±0)` + // Minimum execution time: 9_440_000 picoseconds. + Weight::from_parts(23_266_871, 11037) + // Standard Error: 10_640 + .saturating_add(Weight::from_parts(3_663_971, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(s.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) @@ -137,7 +137,7 @@ impl WeightInfo for SubstrateWeight { .saturating_add(Weight::from_parts(0, 2589).saturating_mul(s.into())) } /// Storage: `Identity::IdentityOf` (r:1 w:0) - /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) /// Storage: `Identity::SubsOf` (r:1 w:1) /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) /// Storage: `Identity::SuperOf` (r:0 w:100) @@ -146,11 +146,11 @@ impl WeightInfo for SubstrateWeight { fn set_subs_old(p: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `194 + p * (32 ±0)` - // Estimated: `11003` - // Minimum execution time: 13_911_000 picoseconds. - Weight::from_parts(31_349_327, 11003) - // Standard Error: 4_045 - .saturating_add(Weight::from_parts(1_503_129, 0).saturating_mul(p.into())) + // Estimated: `11037` + // Minimum execution time: 9_588_000 picoseconds. + Weight::from_parts(22_403_362, 11037) + // Standard Error: 3_359 + .saturating_add(Weight::from_parts(1_557_280, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(p.into()))) @@ -158,21 +158,21 @@ impl WeightInfo for SubstrateWeight { /// Storage: `Identity::SubsOf` (r:1 w:1) /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) /// Storage: `Identity::IdentityOf` (r:1 w:1) - /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) /// Storage: `Identity::SuperOf` (r:0 w:100) /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 20]`. /// The range of component `s` is `[0, 100]`. fn clear_identity(r: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `7069 + r * (5 ±0) + s * (32 ±0)` - // Estimated: `11003` - // Minimum execution time: 61_520_000 picoseconds. - Weight::from_parts(63_655_763, 11003) - // Standard Error: 12_100 - .saturating_add(Weight::from_parts(174_203, 0).saturating_mul(r.into())) - // Standard Error: 2_361 - .saturating_add(Weight::from_parts(1_480_283, 0).saturating_mul(s.into())) + // Measured: `7070 + r * (5 ±0) + s * (32 ±0)` + // Estimated: `11037` + // Minimum execution time: 55_387_000 picoseconds. + Weight::from_parts(52_575_769, 11037) + // Standard Error: 17_705 + .saturating_add(Weight::from_parts(268_160, 0).saturating_mul(r.into())) + // Standard Error: 3_454 + .saturating_add(Weight::from_parts(1_576_194, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -180,30 +180,30 @@ impl WeightInfo for SubstrateWeight { /// Storage: `Identity::Registrars` (r:1 w:0) /// Proof: `Identity::Registrars` (`max_values`: Some(1), `max_size`: Some(1141), added: 1636, mode: `MaxEncodedLen`) /// Storage: `Identity::IdentityOf` (r:1 w:1) - /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 20]`. fn request_judgement(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `6967 + r * (57 ±0)` - // Estimated: `11003` - // Minimum execution time: 85_411_000 picoseconds. - Weight::from_parts(87_137_905, 11003) - // Standard Error: 5_469 - .saturating_add(Weight::from_parts(189_201, 0).saturating_mul(r.into())) + // Measured: `6968 + r * (57 ±0)` + // Estimated: `11037` + // Minimum execution time: 78_243_000 picoseconds. + Weight::from_parts(80_404_226, 11037) + // Standard Error: 5_153 + .saturating_add(Weight::from_parts(149_799, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Identity::IdentityOf` (r:1 w:1) - /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 20]`. fn cancel_request(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `6998` - // Estimated: `11003` - // Minimum execution time: 83_034_000 picoseconds. - Weight::from_parts(84_688_145, 11003) - // Standard Error: 4_493 - .saturating_add(Weight::from_parts(126_412, 0).saturating_mul(r.into())) + // Measured: `6999` + // Estimated: `11037` + // Minimum execution time: 73_360_000 picoseconds. + Weight::from_parts(76_216_374, 11037) + // Standard Error: 15_603 + .saturating_add(Weight::from_parts(189_080, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -214,10 +214,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `89 + r * (57 ±0)` // Estimated: `2626` - // Minimum execution time: 6_984_000 picoseconds. - Weight::from_parts(7_653_398, 2626) - // Standard Error: 1_328 - .saturating_add(Weight::from_parts(83_290, 0).saturating_mul(r.into())) + // Minimum execution time: 6_287_000 picoseconds. + Weight::from_parts(6_721_854, 2626) + // Standard Error: 1_488 + .saturating_add(Weight::from_parts(96_288, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -228,10 +228,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `89 + r * (57 ±0)` // Estimated: `2626` - // Minimum execution time: 10_608_000 picoseconds. - Weight::from_parts(11_047_553, 2626) - // Standard Error: 1_253 - .saturating_add(Weight::from_parts(76_665, 0).saturating_mul(r.into())) + // Minimum execution time: 6_441_000 picoseconds. + Weight::from_parts(6_864_863, 2626) + // Standard Error: 1_403 + .saturating_add(Weight::from_parts(85_123, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -242,33 +242,33 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `89 + r * (57 ±0)` // Estimated: `2626` - // Minimum execution time: 10_291_000 picoseconds. - Weight::from_parts(10_787_424, 2626) - // Standard Error: 1_267 - .saturating_add(Weight::from_parts(88_833, 0).saturating_mul(r.into())) + // Minimum execution time: 6_249_000 picoseconds. + Weight::from_parts(6_658_251, 2626) + // Standard Error: 1_443 + .saturating_add(Weight::from_parts(92_586, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Identity::Registrars` (r:1 w:0) /// Proof: `Identity::Registrars` (`max_values`: Some(1), `max_size`: Some(1141), added: 1636, mode: `MaxEncodedLen`) /// Storage: `Identity::IdentityOf` (r:1 w:1) - /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 19]`. fn provide_judgement(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `7045 + r * (57 ±0)` - // Estimated: `11003` - // Minimum execution time: 105_178_000 picoseconds. - Weight::from_parts(107_276_823, 11003) - // Standard Error: 7_063 - .saturating_add(Weight::from_parts(149_499, 0).saturating_mul(r.into())) + // Measured: `7046 + r * (57 ±0)` + // Estimated: `11037` + // Minimum execution time: 97_969_000 picoseconds. + Weight::from_parts(101_366_385, 11037) + // Standard Error: 19_594 + .saturating_add(Weight::from_parts(103_251, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Identity::SubsOf` (r:1 w:1) /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) /// Storage: `Identity::IdentityOf` (r:1 w:1) - /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Identity::SuperOf` (r:0 w:100) @@ -277,20 +277,20 @@ impl WeightInfo for SubstrateWeight { /// The range of component `s` is `[0, 100]`. fn kill_identity(r: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `7276 + r * (5 ±0) + s * (32 ±0)` - // Estimated: `11003` - // Minimum execution time: 76_175_000 picoseconds. - Weight::from_parts(77_692_045, 11003) - // Standard Error: 14_176 - .saturating_add(Weight::from_parts(201_431, 0).saturating_mul(r.into())) - // Standard Error: 2_766 - .saturating_add(Weight::from_parts(1_499_834, 0).saturating_mul(s.into())) + // Measured: `7277 + r * (5 ±0) + s * (32 ±0)` + // Estimated: `11037` + // Minimum execution time: 73_785_000 picoseconds. + Weight::from_parts(73_606_063, 11037) + // Standard Error: 26_433 + .saturating_add(Weight::from_parts(230_018, 0).saturating_mul(r.into())) + // Standard Error: 5_157 + .saturating_add(Weight::from_parts(1_483_326, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) } /// Storage: `Identity::IdentityOf` (r:1 w:0) - /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) /// Storage: `Identity::SuperOf` (r:1 w:1) /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) /// Storage: `Identity::SubsOf` (r:1 w:1) @@ -299,32 +299,32 @@ impl WeightInfo for SubstrateWeight { fn add_sub(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `475 + s * (36 ±0)` - // Estimated: `11003` - // Minimum execution time: 29_756_000 picoseconds. - Weight::from_parts(38_457_195, 11003) - // Standard Error: 2_153 - .saturating_add(Weight::from_parts(114_749, 0).saturating_mul(s.into())) + // Estimated: `11037` + // Minimum execution time: 27_304_000 picoseconds. + Weight::from_parts(31_677_329, 11037) + // Standard Error: 1_388 + .saturating_add(Weight::from_parts(102_193, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } /// Storage: `Identity::IdentityOf` (r:1 w:0) - /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) /// Storage: `Identity::SuperOf` (r:1 w:1) /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) /// The range of component `s` is `[1, 100]`. fn rename_sub(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `591 + s * (3 ±0)` - // Estimated: `11003` - // Minimum execution time: 21_627_000 picoseconds. - Weight::from_parts(24_786_470, 11003) - // Standard Error: 837 - .saturating_add(Weight::from_parts(63_553, 0).saturating_mul(s.into())) + // Estimated: `11037` + // Minimum execution time: 12_925_000 picoseconds. + Weight::from_parts(14_756_477, 11037) + // Standard Error: 646 + .saturating_add(Weight::from_parts(36_734, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Identity::IdentityOf` (r:1 w:0) - /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) /// Storage: `Identity::SuperOf` (r:1 w:1) /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) /// Storage: `Identity::SubsOf` (r:1 w:1) @@ -333,11 +333,11 @@ impl WeightInfo for SubstrateWeight { fn remove_sub(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `638 + s * (35 ±0)` - // Estimated: `11003` - // Minimum execution time: 37_768_000 picoseconds. - Weight::from_parts(41_759_997, 11003) - // Standard Error: 1_157 - .saturating_add(Weight::from_parts(97_679, 0).saturating_mul(s.into())) + // Estimated: `11037` + // Minimum execution time: 30_475_000 picoseconds. + Weight::from_parts(33_821_774, 11037) + // Standard Error: 1_012 + .saturating_add(Weight::from_parts(87_704, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -352,225 +352,116 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `704 + s * (37 ±0)` // Estimated: `6723` - // Minimum execution time: 29_539_000 picoseconds. - Weight::from_parts(31_966_337, 6723) - // Standard Error: 1_076 - .saturating_add(Weight::from_parts(94_311, 0).saturating_mul(s.into())) + // Minimum execution time: 22_841_000 picoseconds. + Weight::from_parts(25_781_412, 6723) + // Standard Error: 1_145 + .saturating_add(Weight::from_parts(84_692, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: `Identity::AuthorityOf` (r:0 w:1) - /// Proof: `Identity::AuthorityOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `Identity::UsernameAuthorities` (r:0 w:1) + /// Proof: `Identity::UsernameAuthorities` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) fn add_username_authority() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_783_000 picoseconds. - Weight::from_parts(7_098_000, 0) + // Minimum execution time: 6_983_000 picoseconds. + Weight::from_parts(7_388_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: `Identity::AuthorityOf` (r:1 w:1) - /// Proof: `Identity::AuthorityOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `Identity::UsernameAuthorities` (r:1 w:1) + /// Proof: `Identity::UsernameAuthorities` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) fn remove_username_authority() -> Weight { // Proof Size summary in bytes: - // Measured: `79` + // Measured: `80` // Estimated: `3517` - // Minimum execution time: 10_772_000 picoseconds. - Weight::from_parts(11_136_000, 3517) + // Minimum execution time: 9_717_000 picoseconds. + Weight::from_parts(10_322_000, 3517) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: `Identity::AuthorityOf` (r:1 w:1) - /// Proof: `Identity::AuthorityOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - /// Storage: `Identity::UsernameInfoOf` (r:1 w:1) - /// Proof: `Identity::UsernameInfoOf` (`max_values`: None, `max_size`: Some(98), added: 2573, mode: `MaxEncodedLen`) + /// Storage: `Identity::UsernameAuthorities` (r:1 w:1) + /// Proof: `Identity::UsernameAuthorities` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `Identity::AccountOfUsername` (r:1 w:1) + /// Proof: `Identity::AccountOfUsername` (`max_values`: None, `max_size`: Some(81), added: 2556, mode: `MaxEncodedLen`) /// Storage: `Identity::PendingUsernames` (r:1 w:0) - /// Proof: `Identity::PendingUsernames` (`max_values`: None, `max_size`: Some(102), added: 2577, mode: `MaxEncodedLen`) - /// Storage: `Identity::UsernameOf` (r:1 w:1) - /// Proof: `Identity::UsernameOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// The range of component `p` is `[0, 1]`. - fn set_username_for(_p: u32, ) -> Weight { + /// Proof: `Identity::PendingUsernames` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Storage: `Identity::IdentityOf` (r:1 w:1) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) + fn set_username_for(_p: u32) -> Weight { // Proof Size summary in bytes: - // Measured: `181` - // Estimated: `3593` - // Minimum execution time: 68_832_000 picoseconds. - Weight::from_parts(91_310_781, 3593) - .saturating_add(T::DbWeight::get().reads(5_u64)) - .saturating_add(T::DbWeight::get().writes(4_u64)) + // Measured: `80` + // Estimated: `11037` + // Minimum execution time: 70_714_000 picoseconds. + Weight::from_parts(74_990_000, 11037) + .saturating_add(T::DbWeight::get().reads(4_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: `Identity::PendingUsernames` (r:1 w:1) - /// Proof: `Identity::PendingUsernames` (`max_values`: None, `max_size`: Some(102), added: 2577, mode: `MaxEncodedLen`) - /// Storage: `Identity::UsernameOf` (r:1 w:1) - /// Proof: `Identity::UsernameOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// Storage: `Identity::UsernameInfoOf` (r:0 w:1) - /// Proof: `Identity::UsernameInfoOf` (`max_values`: None, `max_size`: Some(98), added: 2573, mode: `MaxEncodedLen`) + /// Proof: `Identity::PendingUsernames` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Storage: `Identity::IdentityOf` (r:1 w:1) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) + /// Storage: `Identity::AccountOfUsername` (r:0 w:1) + /// Proof: `Identity::AccountOfUsername` (`max_values`: None, `max_size`: Some(81), added: 2556, mode: `MaxEncodedLen`) fn accept_username() -> Weight { // Proof Size summary in bytes: - // Measured: `116` - // Estimated: `3567` - // Minimum execution time: 21_196_000 picoseconds. - Weight::from_parts(21_755_000, 3567) + // Measured: `115` + // Estimated: `11037` + // Minimum execution time: 21_996_000 picoseconds. + Weight::from_parts(22_611_000, 11037) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: `Identity::PendingUsernames` (r:1 w:1) - /// Proof: `Identity::PendingUsernames` (`max_values`: None, `max_size`: Some(102), added: 2577, mode: `MaxEncodedLen`) - /// Storage: `Identity::AuthorityOf` (r:1 w:0) - /// Proof: `Identity::AuthorityOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// The range of component `p` is `[0, 1]`. - fn remove_expired_approval(_p: u32, ) -> Weight { + /// Proof: `Identity::PendingUsernames` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + fn remove_expired_approval(_p: u32) -> Weight { // Proof Size summary in bytes: - // Measured: `309` - // Estimated: `3593` - // Minimum execution time: 19_371_000 picoseconds. - Weight::from_parts(62_390_200, 3593) - .saturating_add(T::DbWeight::get().reads(3_u64)) - .saturating_add(T::DbWeight::get().writes(2_u64)) + // Measured: `115` + // Estimated: `3550` + // Minimum execution time: 16_880_000 picoseconds. + Weight::from_parts(28_371_000, 3550) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: `Identity::UsernameInfoOf` (r:1 w:0) - /// Proof: `Identity::UsernameInfoOf` (`max_values`: None, `max_size`: Some(98), added: 2573, mode: `MaxEncodedLen`) - /// Storage: `Identity::UsernameOf` (r:0 w:1) - /// Proof: `Identity::UsernameOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Identity::AccountOfUsername` (r:1 w:0) + /// Proof: `Identity::AccountOfUsername` (`max_values`: None, `max_size`: Some(81), added: 2556, mode: `MaxEncodedLen`) + /// Storage: `Identity::IdentityOf` (r:1 w:1) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) fn set_primary_username() -> Weight { // Proof Size summary in bytes: - // Measured: `172` - // Estimated: `3563` - // Minimum execution time: 13_890_000 picoseconds. - Weight::from_parts(14_307_000, 3563) - .saturating_add(T::DbWeight::get().reads(1_u64)) + // Measured: `257` + // Estimated: `11037` + // Minimum execution time: 16_771_000 picoseconds. + Weight::from_parts(17_333_000, 11037) + .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: `Identity::UsernameInfoOf` (r:1 w:0) - /// Proof: `Identity::UsernameInfoOf` (`max_values`: None, `max_size`: Some(98), added: 2573, mode: `MaxEncodedLen`) - /// Storage: `Identity::AuthorityOf` (r:1 w:0) - /// Proof: `Identity::AuthorityOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - /// Storage: `Identity::UnbindingUsernames` (r:1 w:1) - /// Proof: `Identity::UnbindingUsernames` (`max_values`: None, `max_size`: Some(53), added: 2528, mode: `MaxEncodedLen`) fn unbind_username() -> Weight { - // Proof Size summary in bytes: - // Measured: `236` - // Estimated: `3563` - // Minimum execution time: 22_126_000 picoseconds. - Weight::from_parts(23_177_000, 3563) - .saturating_add(T::DbWeight::get().reads(3_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) + Weight::zero() } - /// Storage: `Identity::UnbindingUsernames` (r:1 w:1) - /// Proof: `Identity::UnbindingUsernames` (`max_values`: None, `max_size`: Some(53), added: 2528, mode: `MaxEncodedLen`) - /// Storage: `Identity::UsernameInfoOf` (r:1 w:1) - /// Proof: `Identity::UsernameInfoOf` (`max_values`: None, `max_size`: Some(98), added: 2573, mode: `MaxEncodedLen`) - /// Storage: `Identity::UsernameOf` (r:1 w:1) - /// Proof: `Identity::UsernameOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// Storage: `Identity::AuthorityOf` (r:1 w:0) - /// Proof: `Identity::AuthorityOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) fn remove_username() -> Weight { - // Proof Size summary in bytes: - // Measured: `297` - // Estimated: `3563` - // Minimum execution time: 27_513_000 picoseconds. - Weight::from_parts(28_389_000, 3563) - .saturating_add(T::DbWeight::get().reads(4_u64)) - .saturating_add(T::DbWeight::get().writes(3_u64)) + Weight::zero() } - /// Storage: `Identity::UsernameInfoOf` (r:1 w:1) - /// Proof: `Identity::UsernameInfoOf` (`max_values`: None, `max_size`: Some(98), added: 2573, mode: `MaxEncodedLen`) - /// Storage: `Identity::UsernameOf` (r:1 w:1) - /// Proof: `Identity::UsernameOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// Storage: `Identity::UnbindingUsernames` (r:1 w:1) - /// Proof: `Identity::UnbindingUsernames` (`max_values`: None, `max_size`: Some(53), added: 2528, mode: `MaxEncodedLen`) - /// Storage: `Identity::AuthorityOf` (r:1 w:0) - /// Proof: `Identity::AuthorityOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// The range of component `p` is `[0, 1]`. - fn kill_username(_p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `470` - // Estimated: `3593` - // Minimum execution time: 25_125_000 picoseconds. - Weight::from_parts(55_315_063, 3593) - .saturating_add(T::DbWeight::get().reads(5_u64)) - .saturating_add(T::DbWeight::get().writes(4_u64)) + fn kill_username(_p: u32) -> Weight { + Weight::zero() } - /// Storage: UNKNOWN KEY `0x2aeddc77fe58c98d50bd37f1b90840f99622d1423cdd16f5c33e2b531c34a53d` (r:2 w:0) - /// Proof: UNKNOWN KEY `0x2aeddc77fe58c98d50bd37f1b90840f99622d1423cdd16f5c33e2b531c34a53d` (r:2 w:0) - /// Storage: `Identity::AuthorityOf` (r:0 w:1) - /// Proof: `Identity::AuthorityOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) fn migration_v2_authority_step() -> Weight { - // Proof Size summary in bytes: - // Measured: `147` - // Estimated: `6087` - // Minimum execution time: 9_218_000 picoseconds. - Weight::from_parts(9_560_000, 6087) - .saturating_add(T::DbWeight::get().reads(2_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) + Weight::zero() } - /// Storage: UNKNOWN KEY `0x2aeddc77fe58c98d50bd37f1b90840f97c182fead9255863460affdd63116be3` (r:2 w:0) - /// Proof: UNKNOWN KEY `0x2aeddc77fe58c98d50bd37f1b90840f97c182fead9255863460affdd63116be3` (r:2 w:0) - /// Storage: `Identity::UsernameInfoOf` (r:0 w:1) - /// Proof: `Identity::UsernameInfoOf` (`max_values`: None, `max_size`: Some(98), added: 2573, mode: `MaxEncodedLen`) fn migration_v2_username_step() -> Weight { - // Proof Size summary in bytes: - // Measured: `159` - // Estimated: `6099` - // Minimum execution time: 9_090_000 picoseconds. - Weight::from_parts(9_456_000, 6099) - .saturating_add(T::DbWeight::get().reads(2_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) + Weight::zero() } - /// Storage: `Identity::IdentityOf` (r:2 w:1) - /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) - /// Storage: `Identity::UsernameOf` (r:0 w:1) - /// Proof: `Identity::UsernameOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn migration_v2_identity_step() -> Weight { - // Proof Size summary in bytes: - // Measured: `7062` - // Estimated: `21016` - // Minimum execution time: 64_909_000 picoseconds. - Weight::from_parts(65_805_000, 21016) - .saturating_add(T::DbWeight::get().reads(2_u64)) - .saturating_add(T::DbWeight::get().writes(2_u64)) + Weight::zero() } - /// Storage: `Identity::PendingUsernames` (r:2 w:1) - /// Proof: `Identity::PendingUsernames` (`max_values`: None, `max_size`: Some(102), added: 2577, mode: `MaxEncodedLen`) fn migration_v2_pending_username_step() -> Weight { - // Proof Size summary in bytes: - // Measured: `201` - // Estimated: `6144` - // Minimum execution time: 8_518_000 picoseconds. - Weight::from_parts(8_933_000, 6144) - .saturating_add(T::DbWeight::get().reads(2_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) + Weight::zero() } - /// Storage: `Identity::AuthorityOf` (r:2 w:0) - /// Proof: `Identity::AuthorityOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0x2aeddc77fe58c98d50bd37f1b90840f99622d1423cdd16f5c33e2b531c34a53d` (r:1 w:1) - /// Proof: UNKNOWN KEY `0x2aeddc77fe58c98d50bd37f1b90840f99622d1423cdd16f5c33e2b531c34a53d` (r:1 w:1) fn migration_v2_cleanup_authority_step() -> Weight { - // Proof Size summary in bytes: - // Measured: `288` - // Estimated: `6044` - // Minimum execution time: 16_108_000 picoseconds. - Weight::from_parts(16_597_000, 6044) - .saturating_add(T::DbWeight::get().reads(3_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) + Weight::zero() } - /// Storage: `Identity::UsernameInfoOf` (r:2 w:0) - /// Proof: `Identity::UsernameInfoOf` (`max_values`: None, `max_size`: Some(98), added: 2573, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0x2aeddc77fe58c98d50bd37f1b90840f97c182fead9255863460affdd63116be3` (r:1 w:1) - /// Proof: UNKNOWN KEY `0x2aeddc77fe58c98d50bd37f1b90840f97c182fead9255863460affdd63116be3` (r:1 w:1) fn migration_v2_cleanup_username_step() -> Weight { - // Proof Size summary in bytes: - // Measured: `290` - // Estimated: `6136` - // Minimum execution time: 11_336_000 picoseconds. - Weight::from_parts(11_938_000, 6136) - .saturating_add(T::DbWeight::get().reads(3_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) + Weight::zero() } } @@ -583,29 +474,29 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `32 + r * (57 ±0)` // Estimated: `2626` - // Minimum execution time: 9_510_000 picoseconds. - Weight::from_parts(10_180_808, 2626) - // Standard Error: 1_519 - .saturating_add(Weight::from_parts(97_439, 0).saturating_mul(r.into())) + // Minimum execution time: 8_696_000 picoseconds. + Weight::from_parts(9_620_793, 2626) + // Standard Error: 1_909 + .saturating_add(Weight::from_parts(94_977, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Identity::IdentityOf` (r:1 w:1) - /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 20]`. fn set_identity(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `6977 + r * (5 ±0)` - // Estimated: `11003` - // Minimum execution time: 121_544_000 picoseconds. - Weight::from_parts(123_405_465, 11003) - // Standard Error: 10_028 - .saturating_add(Weight::from_parts(280_726, 0).saturating_mul(r.into())) + // Measured: `6978 + r * (5 ±0)` + // Estimated: `11037` + // Minimum execution time: 110_950_000 picoseconds. + Weight::from_parts(112_705_139, 11037) + // Standard Error: 6_475 + .saturating_add(Weight::from_parts(212_737, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Identity::IdentityOf` (r:1 w:0) - /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) /// Storage: `Identity::SubsOf` (r:1 w:1) /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) /// Storage: `Identity::SuperOf` (r:100 w:100) @@ -614,11 +505,11 @@ impl WeightInfo for () { fn set_subs_new(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `101` - // Estimated: `11003 + s * (2589 ±0)` - // Minimum execution time: 13_867_000 picoseconds. - Weight::from_parts(26_900_535, 11003) - // Standard Error: 5_334 - .saturating_add(Weight::from_parts(3_798_050, 0).saturating_mul(s.into())) + // Estimated: `11037 + s * (2589 ±0)` + // Minimum execution time: 9_440_000 picoseconds. + Weight::from_parts(23_266_871, 11037) + // Standard Error: 10_640 + .saturating_add(Weight::from_parts(3_663_971, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(s.into()))) .saturating_add(RocksDbWeight::get().writes(1_u64)) @@ -626,7 +517,7 @@ impl WeightInfo for () { .saturating_add(Weight::from_parts(0, 2589).saturating_mul(s.into())) } /// Storage: `Identity::IdentityOf` (r:1 w:0) - /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) /// Storage: `Identity::SubsOf` (r:1 w:1) /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) /// Storage: `Identity::SuperOf` (r:0 w:100) @@ -635,11 +526,11 @@ impl WeightInfo for () { fn set_subs_old(p: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `194 + p * (32 ±0)` - // Estimated: `11003` - // Minimum execution time: 13_911_000 picoseconds. - Weight::from_parts(31_349_327, 11003) - // Standard Error: 4_045 - .saturating_add(Weight::from_parts(1_503_129, 0).saturating_mul(p.into())) + // Estimated: `11037` + // Minimum execution time: 9_588_000 picoseconds. + Weight::from_parts(22_403_362, 11037) + // Standard Error: 3_359 + .saturating_add(Weight::from_parts(1_557_280, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(p.into()))) @@ -647,21 +538,21 @@ impl WeightInfo for () { /// Storage: `Identity::SubsOf` (r:1 w:1) /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) /// Storage: `Identity::IdentityOf` (r:1 w:1) - /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) /// Storage: `Identity::SuperOf` (r:0 w:100) /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 20]`. /// The range of component `s` is `[0, 100]`. fn clear_identity(r: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `7069 + r * (5 ±0) + s * (32 ±0)` - // Estimated: `11003` - // Minimum execution time: 61_520_000 picoseconds. - Weight::from_parts(63_655_763, 11003) - // Standard Error: 12_100 - .saturating_add(Weight::from_parts(174_203, 0).saturating_mul(r.into())) - // Standard Error: 2_361 - .saturating_add(Weight::from_parts(1_480_283, 0).saturating_mul(s.into())) + // Measured: `7070 + r * (5 ±0) + s * (32 ±0)` + // Estimated: `11037` + // Minimum execution time: 55_387_000 picoseconds. + Weight::from_parts(52_575_769, 11037) + // Standard Error: 17_705 + .saturating_add(Weight::from_parts(268_160, 0).saturating_mul(r.into())) + // Standard Error: 3_454 + .saturating_add(Weight::from_parts(1_576_194, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -669,30 +560,30 @@ impl WeightInfo for () { /// Storage: `Identity::Registrars` (r:1 w:0) /// Proof: `Identity::Registrars` (`max_values`: Some(1), `max_size`: Some(1141), added: 1636, mode: `MaxEncodedLen`) /// Storage: `Identity::IdentityOf` (r:1 w:1) - /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 20]`. fn request_judgement(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `6967 + r * (57 ±0)` - // Estimated: `11003` - // Minimum execution time: 85_411_000 picoseconds. - Weight::from_parts(87_137_905, 11003) - // Standard Error: 5_469 - .saturating_add(Weight::from_parts(189_201, 0).saturating_mul(r.into())) + // Measured: `6968 + r * (57 ±0)` + // Estimated: `11037` + // Minimum execution time: 78_243_000 picoseconds. + Weight::from_parts(80_404_226, 11037) + // Standard Error: 5_153 + .saturating_add(Weight::from_parts(149_799, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Identity::IdentityOf` (r:1 w:1) - /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 20]`. fn cancel_request(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `6998` - // Estimated: `11003` - // Minimum execution time: 83_034_000 picoseconds. - Weight::from_parts(84_688_145, 11003) - // Standard Error: 4_493 - .saturating_add(Weight::from_parts(126_412, 0).saturating_mul(r.into())) + // Measured: `6999` + // Estimated: `11037` + // Minimum execution time: 73_360_000 picoseconds. + Weight::from_parts(76_216_374, 11037) + // Standard Error: 15_603 + .saturating_add(Weight::from_parts(189_080, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -703,10 +594,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `89 + r * (57 ±0)` // Estimated: `2626` - // Minimum execution time: 6_984_000 picoseconds. - Weight::from_parts(7_653_398, 2626) - // Standard Error: 1_328 - .saturating_add(Weight::from_parts(83_290, 0).saturating_mul(r.into())) + // Minimum execution time: 6_287_000 picoseconds. + Weight::from_parts(6_721_854, 2626) + // Standard Error: 1_488 + .saturating_add(Weight::from_parts(96_288, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -717,10 +608,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `89 + r * (57 ±0)` // Estimated: `2626` - // Minimum execution time: 10_608_000 picoseconds. - Weight::from_parts(11_047_553, 2626) - // Standard Error: 1_253 - .saturating_add(Weight::from_parts(76_665, 0).saturating_mul(r.into())) + // Minimum execution time: 6_441_000 picoseconds. + Weight::from_parts(6_864_863, 2626) + // Standard Error: 1_403 + .saturating_add(Weight::from_parts(85_123, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -731,33 +622,33 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `89 + r * (57 ±0)` // Estimated: `2626` - // Minimum execution time: 10_291_000 picoseconds. - Weight::from_parts(10_787_424, 2626) - // Standard Error: 1_267 - .saturating_add(Weight::from_parts(88_833, 0).saturating_mul(r.into())) + // Minimum execution time: 6_249_000 picoseconds. + Weight::from_parts(6_658_251, 2626) + // Standard Error: 1_443 + .saturating_add(Weight::from_parts(92_586, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Identity::Registrars` (r:1 w:0) /// Proof: `Identity::Registrars` (`max_values`: Some(1), `max_size`: Some(1141), added: 1636, mode: `MaxEncodedLen`) /// Storage: `Identity::IdentityOf` (r:1 w:1) - /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 19]`. fn provide_judgement(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `7045 + r * (57 ±0)` - // Estimated: `11003` - // Minimum execution time: 105_178_000 picoseconds. - Weight::from_parts(107_276_823, 11003) - // Standard Error: 7_063 - .saturating_add(Weight::from_parts(149_499, 0).saturating_mul(r.into())) + // Measured: `7046 + r * (57 ±0)` + // Estimated: `11037` + // Minimum execution time: 97_969_000 picoseconds. + Weight::from_parts(101_366_385, 11037) + // Standard Error: 19_594 + .saturating_add(Weight::from_parts(103_251, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Identity::SubsOf` (r:1 w:1) /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) /// Storage: `Identity::IdentityOf` (r:1 w:1) - /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Identity::SuperOf` (r:0 w:100) @@ -766,20 +657,20 @@ impl WeightInfo for () { /// The range of component `s` is `[0, 100]`. fn kill_identity(r: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `7276 + r * (5 ±0) + s * (32 ±0)` - // Estimated: `11003` - // Minimum execution time: 76_175_000 picoseconds. - Weight::from_parts(77_692_045, 11003) - // Standard Error: 14_176 - .saturating_add(Weight::from_parts(201_431, 0).saturating_mul(r.into())) - // Standard Error: 2_766 - .saturating_add(Weight::from_parts(1_499_834, 0).saturating_mul(s.into())) + // Measured: `7277 + r * (5 ±0) + s * (32 ±0)` + // Estimated: `11037` + // Minimum execution time: 73_785_000 picoseconds. + Weight::from_parts(73_606_063, 11037) + // Standard Error: 26_433 + .saturating_add(Weight::from_parts(230_018, 0).saturating_mul(r.into())) + // Standard Error: 5_157 + .saturating_add(Weight::from_parts(1_483_326, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(s.into()))) } /// Storage: `Identity::IdentityOf` (r:1 w:0) - /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) /// Storage: `Identity::SuperOf` (r:1 w:1) /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) /// Storage: `Identity::SubsOf` (r:1 w:1) @@ -788,32 +679,32 @@ impl WeightInfo for () { fn add_sub(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `475 + s * (36 ±0)` - // Estimated: `11003` - // Minimum execution time: 29_756_000 picoseconds. - Weight::from_parts(38_457_195, 11003) - // Standard Error: 2_153 - .saturating_add(Weight::from_parts(114_749, 0).saturating_mul(s.into())) + // Estimated: `11037` + // Minimum execution time: 27_304_000 picoseconds. + Weight::from_parts(31_677_329, 11037) + // Standard Error: 1_388 + .saturating_add(Weight::from_parts(102_193, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } /// Storage: `Identity::IdentityOf` (r:1 w:0) - /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) /// Storage: `Identity::SuperOf` (r:1 w:1) /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) /// The range of component `s` is `[1, 100]`. fn rename_sub(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `591 + s * (3 ±0)` - // Estimated: `11003` - // Minimum execution time: 21_627_000 picoseconds. - Weight::from_parts(24_786_470, 11003) - // Standard Error: 837 - .saturating_add(Weight::from_parts(63_553, 0).saturating_mul(s.into())) + // Estimated: `11037` + // Minimum execution time: 12_925_000 picoseconds. + Weight::from_parts(14_756_477, 11037) + // Standard Error: 646 + .saturating_add(Weight::from_parts(36_734, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Identity::IdentityOf` (r:1 w:0) - /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) /// Storage: `Identity::SuperOf` (r:1 w:1) /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) /// Storage: `Identity::SubsOf` (r:1 w:1) @@ -822,11 +713,11 @@ impl WeightInfo for () { fn remove_sub(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `638 + s * (35 ±0)` - // Estimated: `11003` - // Minimum execution time: 37_768_000 picoseconds. - Weight::from_parts(41_759_997, 11003) - // Standard Error: 1_157 - .saturating_add(Weight::from_parts(97_679, 0).saturating_mul(s.into())) + // Estimated: `11037` + // Minimum execution time: 30_475_000 picoseconds. + Weight::from_parts(33_821_774, 11037) + // Standard Error: 1_012 + .saturating_add(Weight::from_parts(87_704, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -841,224 +732,115 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `704 + s * (37 ±0)` // Estimated: `6723` - // Minimum execution time: 29_539_000 picoseconds. - Weight::from_parts(31_966_337, 6723) - // Standard Error: 1_076 - .saturating_add(Weight::from_parts(94_311, 0).saturating_mul(s.into())) + // Minimum execution time: 22_841_000 picoseconds. + Weight::from_parts(25_781_412, 6723) + // Standard Error: 1_145 + .saturating_add(Weight::from_parts(84_692, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: `Identity::AuthorityOf` (r:0 w:1) - /// Proof: `Identity::AuthorityOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `Identity::UsernameAuthorities` (r:0 w:1) + /// Proof: `Identity::UsernameAuthorities` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) fn add_username_authority() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_783_000 picoseconds. - Weight::from_parts(7_098_000, 0) + // Minimum execution time: 6_983_000 picoseconds. + Weight::from_parts(7_388_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: `Identity::AuthorityOf` (r:1 w:1) - /// Proof: `Identity::AuthorityOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `Identity::UsernameAuthorities` (r:1 w:1) + /// Proof: `Identity::UsernameAuthorities` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) fn remove_username_authority() -> Weight { // Proof Size summary in bytes: - // Measured: `79` + // Measured: `80` // Estimated: `3517` - // Minimum execution time: 10_772_000 picoseconds. - Weight::from_parts(11_136_000, 3517) + // Minimum execution time: 9_717_000 picoseconds. + Weight::from_parts(10_322_000, 3517) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: `Identity::AuthorityOf` (r:1 w:1) - /// Proof: `Identity::AuthorityOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - /// Storage: `Identity::UsernameInfoOf` (r:1 w:1) - /// Proof: `Identity::UsernameInfoOf` (`max_values`: None, `max_size`: Some(98), added: 2573, mode: `MaxEncodedLen`) + /// Storage: `Identity::UsernameAuthorities` (r:1 w:1) + /// Proof: `Identity::UsernameAuthorities` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `Identity::AccountOfUsername` (r:1 w:1) + /// Proof: `Identity::AccountOfUsername` (`max_values`: None, `max_size`: Some(81), added: 2556, mode: `MaxEncodedLen`) /// Storage: `Identity::PendingUsernames` (r:1 w:0) - /// Proof: `Identity::PendingUsernames` (`max_values`: None, `max_size`: Some(102), added: 2577, mode: `MaxEncodedLen`) - /// Storage: `Identity::UsernameOf` (r:1 w:1) - /// Proof: `Identity::UsernameOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// The range of component `p` is `[0, 1]`. - fn set_username_for(_p: u32, ) -> Weight { + /// Proof: `Identity::PendingUsernames` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Storage: `Identity::IdentityOf` (r:1 w:1) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) + fn set_username_for(_p: u32) -> Weight { // Proof Size summary in bytes: - // Measured: `181` - // Estimated: `3593` - // Minimum execution time: 68_832_000 picoseconds. - Weight::from_parts(91_310_781, 3593) - .saturating_add(RocksDbWeight::get().reads(5_u64)) - .saturating_add(RocksDbWeight::get().writes(4_u64)) + // Measured: `80` + // Estimated: `11037` + // Minimum execution time: 70_714_000 picoseconds. + Weight::from_parts(74_990_000, 11037) + .saturating_add(RocksDbWeight::get().reads(4_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: `Identity::PendingUsernames` (r:1 w:1) - /// Proof: `Identity::PendingUsernames` (`max_values`: None, `max_size`: Some(102), added: 2577, mode: `MaxEncodedLen`) - /// Storage: `Identity::UsernameOf` (r:1 w:1) - /// Proof: `Identity::UsernameOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// Storage: `Identity::UsernameInfoOf` (r:0 w:1) - /// Proof: `Identity::UsernameInfoOf` (`max_values`: None, `max_size`: Some(98), added: 2573, mode: `MaxEncodedLen`) + /// Proof: `Identity::PendingUsernames` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Storage: `Identity::IdentityOf` (r:1 w:1) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) + /// Storage: `Identity::AccountOfUsername` (r:0 w:1) + /// Proof: `Identity::AccountOfUsername` (`max_values`: None, `max_size`: Some(81), added: 2556, mode: `MaxEncodedLen`) fn accept_username() -> Weight { // Proof Size summary in bytes: - // Measured: `116` - // Estimated: `3567` - // Minimum execution time: 21_196_000 picoseconds. - Weight::from_parts(21_755_000, 3567) + // Measured: `115` + // Estimated: `11037` + // Minimum execution time: 21_996_000 picoseconds. + Weight::from_parts(22_611_000, 11037) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: `Identity::PendingUsernames` (r:1 w:1) - /// Proof: `Identity::PendingUsernames` (`max_values`: None, `max_size`: Some(102), added: 2577, mode: `MaxEncodedLen`) - /// Storage: `Identity::AuthorityOf` (r:1 w:0) - /// Proof: `Identity::AuthorityOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// The range of component `p` is `[0, 1]`. - fn remove_expired_approval(_p: u32, ) -> Weight { + /// Proof: `Identity::PendingUsernames` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + fn remove_expired_approval(_p: u32) -> Weight { // Proof Size summary in bytes: - // Measured: `309` - // Estimated: `3593` - // Minimum execution time: 19_371_000 picoseconds. - Weight::from_parts(62_390_200, 3593) - .saturating_add(RocksDbWeight::get().reads(3_u64)) - .saturating_add(RocksDbWeight::get().writes(2_u64)) + // Measured: `115` + // Estimated: `3550` + // Minimum execution time: 16_880_000 picoseconds. + Weight::from_parts(28_371_000, 3550) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: `Identity::UsernameInfoOf` (r:1 w:0) - /// Proof: `Identity::UsernameInfoOf` (`max_values`: None, `max_size`: Some(98), added: 2573, mode: `MaxEncodedLen`) - /// Storage: `Identity::UsernameOf` (r:0 w:1) - /// Proof: `Identity::UsernameOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Identity::AccountOfUsername` (r:1 w:0) + /// Proof: `Identity::AccountOfUsername` (`max_values`: None, `max_size`: Some(81), added: 2556, mode: `MaxEncodedLen`) + /// Storage: `Identity::IdentityOf` (r:1 w:1) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) fn set_primary_username() -> Weight { // Proof Size summary in bytes: - // Measured: `172` - // Estimated: `3563` - // Minimum execution time: 13_890_000 picoseconds. - Weight::from_parts(14_307_000, 3563) - .saturating_add(RocksDbWeight::get().reads(1_u64)) + // Measured: `257` + // Estimated: `11037` + // Minimum execution time: 16_771_000 picoseconds. + Weight::from_parts(17_333_000, 11037) + .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: `Identity::UsernameInfoOf` (r:1 w:0) - /// Proof: `Identity::UsernameInfoOf` (`max_values`: None, `max_size`: Some(98), added: 2573, mode: `MaxEncodedLen`) - /// Storage: `Identity::AuthorityOf` (r:1 w:0) - /// Proof: `Identity::AuthorityOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - /// Storage: `Identity::UnbindingUsernames` (r:1 w:1) - /// Proof: `Identity::UnbindingUsernames` (`max_values`: None, `max_size`: Some(53), added: 2528, mode: `MaxEncodedLen`) fn unbind_username() -> Weight { - // Proof Size summary in bytes: - // Measured: `236` - // Estimated: `3563` - // Minimum execution time: 22_126_000 picoseconds. - Weight::from_parts(23_177_000, 3563) - .saturating_add(RocksDbWeight::get().reads(3_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) + Weight::zero() } - /// Storage: `Identity::UnbindingUsernames` (r:1 w:1) - /// Proof: `Identity::UnbindingUsernames` (`max_values`: None, `max_size`: Some(53), added: 2528, mode: `MaxEncodedLen`) - /// Storage: `Identity::UsernameInfoOf` (r:1 w:1) - /// Proof: `Identity::UsernameInfoOf` (`max_values`: None, `max_size`: Some(98), added: 2573, mode: `MaxEncodedLen`) - /// Storage: `Identity::UsernameOf` (r:1 w:1) - /// Proof: `Identity::UsernameOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// Storage: `Identity::AuthorityOf` (r:1 w:0) - /// Proof: `Identity::AuthorityOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) fn remove_username() -> Weight { - // Proof Size summary in bytes: - // Measured: `297` - // Estimated: `3563` - // Minimum execution time: 27_513_000 picoseconds. - Weight::from_parts(28_389_000, 3563) - .saturating_add(RocksDbWeight::get().reads(4_u64)) - .saturating_add(RocksDbWeight::get().writes(3_u64)) + Weight::zero() } - /// Storage: `Identity::UsernameInfoOf` (r:1 w:1) - /// Proof: `Identity::UsernameInfoOf` (`max_values`: None, `max_size`: Some(98), added: 2573, mode: `MaxEncodedLen`) - /// Storage: `Identity::UsernameOf` (r:1 w:1) - /// Proof: `Identity::UsernameOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// Storage: `Identity::UnbindingUsernames` (r:1 w:1) - /// Proof: `Identity::UnbindingUsernames` (`max_values`: None, `max_size`: Some(53), added: 2528, mode: `MaxEncodedLen`) - /// Storage: `Identity::AuthorityOf` (r:1 w:0) - /// Proof: `Identity::AuthorityOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// The range of component `p` is `[0, 1]`. - fn kill_username(_p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `470` - // Estimated: `3593` - // Minimum execution time: 25_125_000 picoseconds. - Weight::from_parts(55_315_063, 3593) - .saturating_add(RocksDbWeight::get().reads(5_u64)) - .saturating_add(RocksDbWeight::get().writes(4_u64)) + fn kill_username(_p: u32) -> Weight { + Weight::zero() } - /// Storage: UNKNOWN KEY `0x2aeddc77fe58c98d50bd37f1b90840f99622d1423cdd16f5c33e2b531c34a53d` (r:2 w:0) - /// Proof: UNKNOWN KEY `0x2aeddc77fe58c98d50bd37f1b90840f99622d1423cdd16f5c33e2b531c34a53d` (r:2 w:0) - /// Storage: `Identity::AuthorityOf` (r:0 w:1) - /// Proof: `Identity::AuthorityOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) fn migration_v2_authority_step() -> Weight { - // Proof Size summary in bytes: - // Measured: `147` - // Estimated: `6087` - // Minimum execution time: 9_218_000 picoseconds. - Weight::from_parts(9_560_000, 6087) - .saturating_add(RocksDbWeight::get().reads(2_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) + Weight::zero() } - /// Storage: UNKNOWN KEY `0x2aeddc77fe58c98d50bd37f1b90840f97c182fead9255863460affdd63116be3` (r:2 w:0) - /// Proof: UNKNOWN KEY `0x2aeddc77fe58c98d50bd37f1b90840f97c182fead9255863460affdd63116be3` (r:2 w:0) - /// Storage: `Identity::UsernameInfoOf` (r:0 w:1) - /// Proof: `Identity::UsernameInfoOf` (`max_values`: None, `max_size`: Some(98), added: 2573, mode: `MaxEncodedLen`) fn migration_v2_username_step() -> Weight { - // Proof Size summary in bytes: - // Measured: `159` - // Estimated: `6099` - // Minimum execution time: 9_090_000 picoseconds. - Weight::from_parts(9_456_000, 6099) - .saturating_add(RocksDbWeight::get().reads(2_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) + Weight::zero() } - /// Storage: `Identity::IdentityOf` (r:2 w:1) - /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) - /// Storage: `Identity::UsernameOf` (r:0 w:1) - /// Proof: `Identity::UsernameOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn migration_v2_identity_step() -> Weight { - // Proof Size summary in bytes: - // Measured: `7062` - // Estimated: `21016` - // Minimum execution time: 64_909_000 picoseconds. - Weight::from_parts(65_805_000, 21016) - .saturating_add(RocksDbWeight::get().reads(2_u64)) - .saturating_add(RocksDbWeight::get().writes(2_u64)) + Weight::zero() } - /// Storage: `Identity::PendingUsernames` (r:2 w:1) - /// Proof: `Identity::PendingUsernames` (`max_values`: None, `max_size`: Some(102), added: 2577, mode: `MaxEncodedLen`) fn migration_v2_pending_username_step() -> Weight { - // Proof Size summary in bytes: - // Measured: `201` - // Estimated: `6144` - // Minimum execution time: 8_518_000 picoseconds. - Weight::from_parts(8_933_000, 6144) - .saturating_add(RocksDbWeight::get().reads(2_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) + Weight::zero() } - /// Storage: `Identity::AuthorityOf` (r:2 w:0) - /// Proof: `Identity::AuthorityOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0x2aeddc77fe58c98d50bd37f1b90840f99622d1423cdd16f5c33e2b531c34a53d` (r:1 w:1) - /// Proof: UNKNOWN KEY `0x2aeddc77fe58c98d50bd37f1b90840f99622d1423cdd16f5c33e2b531c34a53d` (r:1 w:1) fn migration_v2_cleanup_authority_step() -> Weight { - // Proof Size summary in bytes: - // Measured: `288` - // Estimated: `6044` - // Minimum execution time: 16_108_000 picoseconds. - Weight::from_parts(16_597_000, 6044) - .saturating_add(RocksDbWeight::get().reads(3_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) + Weight::zero() } - /// Storage: `Identity::UsernameInfoOf` (r:2 w:0) - /// Proof: `Identity::UsernameInfoOf` (`max_values`: None, `max_size`: Some(98), added: 2573, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0x2aeddc77fe58c98d50bd37f1b90840f97c182fead9255863460affdd63116be3` (r:1 w:1) - /// Proof: UNKNOWN KEY `0x2aeddc77fe58c98d50bd37f1b90840f97c182fead9255863460affdd63116be3` (r:1 w:1) fn migration_v2_cleanup_username_step() -> Weight { - // Proof Size summary in bytes: - // Measured: `290` - // Estimated: `6136` - // Minimum execution time: 11_336_000 picoseconds. - Weight::from_parts(11_938_000, 6136) - .saturating_add(RocksDbWeight::get().reads(3_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) + Weight::zero() } } diff --git a/substrate/frame/im-online/Cargo.toml b/substrate/frame/im-online/Cargo.toml index 179c4c3ce3b1..6c32c8ae898e 100644 --- a/substrate/frame/im-online/Cargo.toml +++ b/substrate/frame/im-online/Cargo.toml @@ -17,12 +17,12 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { features = ["derive"], workspace = true } +log = { workspace = true } +scale-info = { features = ["derive", "serde"], workspace = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } -log = { workspace = true } pallet-authorship = { workspace = true } -scale-info = { features = ["derive", "serde"], workspace = true } sp-application-crypto = { features = ["serde"], workspace = true } sp-core = { features = ["serde"], workspace = true } sp-io = { workspace = true } diff --git a/substrate/frame/im-online/src/weights.rs b/substrate/frame/im-online/src/weights.rs index 6fde451caf9e..105a36fb209f 100644 --- a/substrate/frame/im-online/src/weights.rs +++ b/substrate/frame/im-online/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_im_online` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-04-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -72,10 +72,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `327 + k * (32 ±0)` // Estimated: `321487 + k * (1761 ±0)` - // Minimum execution time: 70_883_000 picoseconds. - Weight::from_parts(93_034_812, 321487) - // Standard Error: 811 - .saturating_add(Weight::from_parts(37_349, 0).saturating_mul(k.into())) + // Minimum execution time: 64_011_000 picoseconds. + Weight::from_parts(80_632_380, 321487) + // Standard Error: 676 + .saturating_add(Weight::from_parts(34_921, 0).saturating_mul(k.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1761).saturating_mul(k.into())) @@ -99,10 +99,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `327 + k * (32 ±0)` // Estimated: `321487 + k * (1761 ±0)` - // Minimum execution time: 70_883_000 picoseconds. - Weight::from_parts(93_034_812, 321487) - // Standard Error: 811 - .saturating_add(Weight::from_parts(37_349, 0).saturating_mul(k.into())) + // Minimum execution time: 64_011_000 picoseconds. + Weight::from_parts(80_632_380, 321487) + // Standard Error: 676 + .saturating_add(Weight::from_parts(34_921, 0).saturating_mul(k.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1761).saturating_mul(k.into())) diff --git a/substrate/frame/indices/Cargo.toml b/substrate/frame/indices/Cargo.toml index a0030b5b0edf..d81b2d5cabf1 100644 --- a/substrate/frame/indices/Cargo.toml +++ b/substrate/frame/indices/Cargo.toml @@ -17,10 +17,10 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } -scale-info = { features = ["derive"], workspace = true } sp-core = { workspace = true } sp-io = { workspace = true } sp-keyring = { optional = true, workspace = true } diff --git a/substrate/frame/indices/src/benchmarking.rs b/substrate/frame/indices/src/benchmarking.rs index 28f5e3bf5cf0..bd173815cb34 100644 --- a/substrate/frame/indices/src/benchmarking.rs +++ b/substrate/frame/indices/src/benchmarking.rs @@ -19,31 +19,26 @@ #![cfg(feature = "runtime-benchmarks")] -use crate::*; -use frame_benchmarking::v2::*; +use super::*; +use frame_benchmarking::v1::{account, benchmarks, whitelisted_caller}; use frame_system::RawOrigin; use sp_runtime::traits::Bounded; -const SEED: u32 = 0; +use crate::Pallet as Indices; -#[benchmarks] -mod benchmarks { - use super::*; +const SEED: u32 = 0; - #[benchmark] - fn claim() { +benchmarks! { + claim { let account_index = T::AccountIndex::from(SEED); let caller: T::AccountId = whitelisted_caller(); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); - - #[extrinsic_call] - _(RawOrigin::Signed(caller.clone()), account_index); - + }: _(RawOrigin::Signed(caller.clone()), account_index) + verify { assert_eq!(Accounts::::get(account_index).unwrap().0, caller); } - #[benchmark] - fn transfer() -> Result<(), BenchmarkError> { + transfer { let account_index = T::AccountIndex::from(SEED); // Setup accounts let caller: T::AccountId = whitelisted_caller(); @@ -52,33 +47,25 @@ mod benchmarks { let recipient_lookup = T::Lookup::unlookup(recipient.clone()); T::Currency::make_free_balance_be(&recipient, BalanceOf::::max_value()); // Claim the index - Pallet::::claim(RawOrigin::Signed(caller.clone()).into(), account_index)?; - - #[extrinsic_call] - _(RawOrigin::Signed(caller.clone()), recipient_lookup, account_index); - + Indices::::claim(RawOrigin::Signed(caller.clone()).into(), account_index)?; + }: _(RawOrigin::Signed(caller.clone()), recipient_lookup, account_index) + verify { assert_eq!(Accounts::::get(account_index).unwrap().0, recipient); - Ok(()) } - #[benchmark] - fn free() -> Result<(), BenchmarkError> { + free { let account_index = T::AccountIndex::from(SEED); // Setup accounts let caller: T::AccountId = whitelisted_caller(); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); // Claim the index - Pallet::::claim(RawOrigin::Signed(caller.clone()).into(), account_index)?; - - #[extrinsic_call] - _(RawOrigin::Signed(caller.clone()), account_index); - + Indices::::claim(RawOrigin::Signed(caller.clone()).into(), account_index)?; + }: _(RawOrigin::Signed(caller.clone()), account_index) + verify { assert_eq!(Accounts::::get(account_index), None); - Ok(()) } - #[benchmark] - fn force_transfer() -> Result<(), BenchmarkError> { + force_transfer { let account_index = T::AccountIndex::from(SEED); // Setup accounts let original: T::AccountId = account("original", 0, SEED); @@ -87,32 +74,25 @@ mod benchmarks { let recipient_lookup = T::Lookup::unlookup(recipient.clone()); T::Currency::make_free_balance_be(&recipient, BalanceOf::::max_value()); // Claim the index - Pallet::::claim(RawOrigin::Signed(original).into(), account_index)?; - - #[extrinsic_call] - _(RawOrigin::Root, recipient_lookup, account_index, false); - + Indices::::claim(RawOrigin::Signed(original).into(), account_index)?; + }: _(RawOrigin::Root, recipient_lookup, account_index, false) + verify { assert_eq!(Accounts::::get(account_index).unwrap().0, recipient); - Ok(()) } - #[benchmark] - fn freeze() -> Result<(), BenchmarkError> { + freeze { let account_index = T::AccountIndex::from(SEED); // Setup accounts let caller: T::AccountId = whitelisted_caller(); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); // Claim the index - Pallet::::claim(RawOrigin::Signed(caller.clone()).into(), account_index)?; - - #[extrinsic_call] - _(RawOrigin::Signed(caller.clone()), account_index); - + Indices::::claim(RawOrigin::Signed(caller.clone()).into(), account_index)?; + }: _(RawOrigin::Signed(caller.clone()), account_index) + verify { assert_eq!(Accounts::::get(account_index).unwrap().2, true); - Ok(()) } // TODO in another PR: lookup and unlookup trait weights (not critical) - impl_benchmark_test_suite!(Pallet, mock::new_test_ext(), mock::Test); + impl_benchmark_test_suite!(Indices, crate::mock::new_test_ext(), crate::mock::Test); } diff --git a/substrate/frame/indices/src/weights.rs b/substrate/frame/indices/src/weights.rs index 567e9bab54bd..e1bc90c9b128 100644 --- a/substrate/frame/indices/src/weights.rs +++ b/substrate/frame/indices/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_indices` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-04-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -67,8 +67,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `76` // Estimated: `3534` - // Minimum execution time: 23_283_000 picoseconds. - Weight::from_parts(24_326_000, 3534) + // Minimum execution time: 22_026_000 picoseconds. + Weight::from_parts(22_522_000, 3534) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -78,10 +78,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn transfer() -> Weight { // Proof Size summary in bytes: - // Measured: `312` + // Measured: `275` // Estimated: `3593` - // Minimum execution time: 40_906_000 picoseconds. - Weight::from_parts(42_117_000, 3593) + // Minimum execution time: 34_160_000 picoseconds. + Weight::from_parts(35_138_000, 3593) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -91,8 +91,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `172` // Estimated: `3534` - // Minimum execution time: 27_419_000 picoseconds. - Weight::from_parts(28_544_000, 3534) + // Minimum execution time: 23_736_000 picoseconds. + Weight::from_parts(24_247_000, 3534) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -104,8 +104,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `275` // Estimated: `3593` - // Minimum execution time: 30_098_000 picoseconds. - Weight::from_parts(31_368_000, 3593) + // Minimum execution time: 25_810_000 picoseconds. + Weight::from_parts(26_335_000, 3593) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -115,8 +115,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `172` // Estimated: `3534` - // Minimum execution time: 30_356_000 picoseconds. - Weight::from_parts(31_036_000, 3534) + // Minimum execution time: 24_502_000 picoseconds. + Weight::from_parts(25_425_000, 3534) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -130,8 +130,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `76` // Estimated: `3534` - // Minimum execution time: 23_283_000 picoseconds. - Weight::from_parts(24_326_000, 3534) + // Minimum execution time: 22_026_000 picoseconds. + Weight::from_parts(22_522_000, 3534) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -141,10 +141,10 @@ impl WeightInfo for () { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn transfer() -> Weight { // Proof Size summary in bytes: - // Measured: `312` + // Measured: `275` // Estimated: `3593` - // Minimum execution time: 40_906_000 picoseconds. - Weight::from_parts(42_117_000, 3593) + // Minimum execution time: 34_160_000 picoseconds. + Weight::from_parts(35_138_000, 3593) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -154,8 +154,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `172` // Estimated: `3534` - // Minimum execution time: 27_419_000 picoseconds. - Weight::from_parts(28_544_000, 3534) + // Minimum execution time: 23_736_000 picoseconds. + Weight::from_parts(24_247_000, 3534) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -167,8 +167,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `275` // Estimated: `3593` - // Minimum execution time: 30_098_000 picoseconds. - Weight::from_parts(31_368_000, 3593) + // Minimum execution time: 25_810_000 picoseconds. + Weight::from_parts(26_335_000, 3593) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -178,8 +178,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `172` // Estimated: `3534` - // Minimum execution time: 30_356_000 picoseconds. - Weight::from_parts(31_036_000, 3534) + // Minimum execution time: 24_502_000 picoseconds. + Weight::from_parts(25_425_000, 3534) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/substrate/frame/insecure-randomness-collective-flip/Cargo.toml b/substrate/frame/insecure-randomness-collective-flip/Cargo.toml index 1682b52dfbf4..1a47030812da 100644 --- a/substrate/frame/insecure-randomness-collective-flip/Cargo.toml +++ b/substrate/frame/insecure-randomness-collective-flip/Cargo.toml @@ -17,10 +17,10 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { features = ["derive"], workspace = true } -frame-support = { workspace = true } -frame-system = { workspace = true } safe-mix = { workspace = true } scale-info = { features = ["derive"], workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } sp-runtime = { workspace = true } [dev-dependencies] diff --git a/substrate/frame/lottery/Cargo.toml b/substrate/frame/lottery/Cargo.toml index 23eb19c7ffa7..eb6e0b703d08 100644 --- a/substrate/frame/lottery/Cargo.toml +++ b/substrate/frame/lottery/Cargo.toml @@ -18,10 +18,10 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { features = [ "derive", ], workspace = true } +scale-info = { features = ["derive"], workspace = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } -scale-info = { features = ["derive"], workspace = true } sp-runtime = { workspace = true } [dev-dependencies] diff --git a/substrate/frame/lottery/src/weights.rs b/substrate/frame/lottery/src/weights.rs index cac6136a9ba9..0ab7f64509cd 100644 --- a/substrate/frame/lottery/src/weights.rs +++ b/substrate/frame/lottery/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_lottery` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-04-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -82,10 +82,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Lottery::Tickets` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) fn buy_ticket() -> Weight { // Proof Size summary in bytes: - // Measured: `526` + // Measured: `492` // Estimated: `3997` - // Minimum execution time: 67_624_000 picoseconds. - Weight::from_parts(69_671_000, 3997) + // Minimum execution time: 60_979_000 picoseconds. + Weight::from_parts(63_452_000, 3997) .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -96,10 +96,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_828_000 picoseconds. - Weight::from_parts(5_618_456, 0) - // Standard Error: 3_095 - .saturating_add(Weight::from_parts(367_041, 0).saturating_mul(n.into())) + // Minimum execution time: 5_245_000 picoseconds. + Weight::from_parts(6_113_777, 0) + // Standard Error: 3_280 + .saturating_add(Weight::from_parts(349_366, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Lottery::Lottery` (r:1 w:1) @@ -110,10 +110,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn start_lottery() -> Weight { // Proof Size summary in bytes: - // Measured: `181` + // Measured: `194` // Estimated: `3593` - // Minimum execution time: 29_189_000 picoseconds. - Weight::from_parts(29_952_000, 3593) + // Minimum execution time: 29_131_000 picoseconds. + Weight::from_parts(29_722_000, 3593) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -123,8 +123,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `252` // Estimated: `1514` - // Minimum execution time: 7_320_000 picoseconds. - Weight::from_parts(7_805_000, 1514) + // Minimum execution time: 6_413_000 picoseconds. + Weight::from_parts(6_702_000, 1514) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -140,10 +140,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Lottery::Tickets` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) fn on_initialize_end() -> Weight { // Proof Size summary in bytes: - // Measured: `677` + // Measured: `591` // Estimated: `6196` - // Minimum execution time: 72_030_000 picoseconds. - Weight::from_parts(73_116_000, 6196) + // Minimum execution time: 65_913_000 picoseconds. + Weight::from_parts(66_864_000, 6196) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -161,10 +161,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Lottery::LotteryIndex` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn on_initialize_repeat() -> Weight { // Proof Size summary in bytes: - // Measured: `677` + // Measured: `591` // Estimated: `6196` - // Minimum execution time: 73_263_000 picoseconds. - Weight::from_parts(74_616_000, 6196) + // Minimum execution time: 66_950_000 picoseconds. + Weight::from_parts(68_405_000, 6196) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -192,10 +192,10 @@ impl WeightInfo for () { /// Proof: `Lottery::Tickets` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) fn buy_ticket() -> Weight { // Proof Size summary in bytes: - // Measured: `526` + // Measured: `492` // Estimated: `3997` - // Minimum execution time: 67_624_000 picoseconds. - Weight::from_parts(69_671_000, 3997) + // Minimum execution time: 60_979_000 picoseconds. + Weight::from_parts(63_452_000, 3997) .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -206,10 +206,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_828_000 picoseconds. - Weight::from_parts(5_618_456, 0) - // Standard Error: 3_095 - .saturating_add(Weight::from_parts(367_041, 0).saturating_mul(n.into())) + // Minimum execution time: 5_245_000 picoseconds. + Weight::from_parts(6_113_777, 0) + // Standard Error: 3_280 + .saturating_add(Weight::from_parts(349_366, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Lottery::Lottery` (r:1 w:1) @@ -220,10 +220,10 @@ impl WeightInfo for () { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn start_lottery() -> Weight { // Proof Size summary in bytes: - // Measured: `181` + // Measured: `194` // Estimated: `3593` - // Minimum execution time: 29_189_000 picoseconds. - Weight::from_parts(29_952_000, 3593) + // Minimum execution time: 29_131_000 picoseconds. + Weight::from_parts(29_722_000, 3593) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -233,8 +233,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `252` // Estimated: `1514` - // Minimum execution time: 7_320_000 picoseconds. - Weight::from_parts(7_805_000, 1514) + // Minimum execution time: 6_413_000 picoseconds. + Weight::from_parts(6_702_000, 1514) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -250,10 +250,10 @@ impl WeightInfo for () { /// Proof: `Lottery::Tickets` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) fn on_initialize_end() -> Weight { // Proof Size summary in bytes: - // Measured: `677` + // Measured: `591` // Estimated: `6196` - // Minimum execution time: 72_030_000 picoseconds. - Weight::from_parts(73_116_000, 6196) + // Minimum execution time: 65_913_000 picoseconds. + Weight::from_parts(66_864_000, 6196) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -271,10 +271,10 @@ impl WeightInfo for () { /// Proof: `Lottery::LotteryIndex` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn on_initialize_repeat() -> Weight { // Proof Size summary in bytes: - // Measured: `677` + // Measured: `591` // Estimated: `6196` - // Minimum execution time: 73_263_000 picoseconds. - Weight::from_parts(74_616_000, 6196) + // Minimum execution time: 66_950_000 picoseconds. + Weight::from_parts(68_405_000, 6196) .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } diff --git a/substrate/frame/membership/Cargo.toml b/substrate/frame/membership/Cargo.toml index 738d09b4b354..67aa3503ac0a 100644 --- a/substrate/frame/membership/Cargo.toml +++ b/substrate/frame/membership/Cargo.toml @@ -17,11 +17,11 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { workspace = true } +log = { workspace = true } +scale-info = { features = ["derive", "serde"], workspace = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } -log = { workspace = true } -scale-info = { features = ["derive", "serde"], workspace = true } sp-core = { features = ["serde"], workspace = true } sp-io = { workspace = true } sp-runtime = { features = ["serde"], workspace = true } diff --git a/substrate/frame/membership/src/benchmarking.rs b/substrate/frame/membership/src/benchmarking.rs index d752abaae866..515be7eb5386 100644 --- a/substrate/frame/membership/src/benchmarking.rs +++ b/substrate/frame/membership/src/benchmarking.rs @@ -99,7 +99,7 @@ benchmarks_instance_pallet! { assert!(!Members::::get().contains(&remove)); assert!(Members::::get().contains(&add)); // prime is rejigged - assert!(Prime::::get().is_some()); + assert!(Prime::::get().is_some() && T::MembershipChanged::get_prime().is_some()); #[cfg(test)] crate::mock::clean(); } @@ -119,7 +119,7 @@ benchmarks_instance_pallet! { new_members.sort(); assert_eq!(Members::::get(), new_members); // prime is rejigged - assert!(Prime::::get().is_some()); + assert!(Prime::::get().is_some() && T::MembershipChanged::get_prime().is_some()); #[cfg(test)] crate::mock::clean(); } @@ -157,6 +157,7 @@ benchmarks_instance_pallet! { )); } verify { assert!(Prime::::get().is_some()); + assert!(::get_prime().is_some()); #[cfg(test)] crate::mock::clean(); } diff --git a/substrate/frame/membership/src/weights.rs b/substrate/frame/membership/src/weights.rs index 2185319676c5..10e9c9afa582 100644 --- a/substrate/frame/membership/src/weights.rs +++ b/substrate/frame/membership/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_membership` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-04-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -76,10 +76,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `207 + m * (64 ±0)` // Estimated: `4687 + m * (64 ±0)` - // Minimum execution time: 17_738_000 picoseconds. - Weight::from_parts(18_805_035, 4687) - // Standard Error: 796 - .saturating_add(Weight::from_parts(26_172, 0).saturating_mul(m.into())) + // Minimum execution time: 12_827_000 picoseconds. + Weight::from_parts(13_743_651, 4687) + // Standard Error: 622 + .saturating_add(Weight::from_parts(35_417, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) @@ -99,10 +99,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `311 + m * (64 ±0)` // Estimated: `4687 + m * (64 ±0)` - // Minimum execution time: 20_462_000 picoseconds. - Weight::from_parts(21_560_127, 4687) - // Standard Error: 581 - .saturating_add(Weight::from_parts(18_475, 0).saturating_mul(m.into())) + // Minimum execution time: 15_197_000 picoseconds. + Weight::from_parts(16_172_409, 4687) + // Standard Error: 650 + .saturating_add(Weight::from_parts(35_790, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) @@ -122,10 +122,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `311 + m * (64 ±0)` // Estimated: `4687 + m * (64 ±0)` - // Minimum execution time: 20_345_000 picoseconds. - Weight::from_parts(21_400_566, 4687) - // Standard Error: 711 - .saturating_add(Weight::from_parts(39_733, 0).saturating_mul(m.into())) + // Minimum execution time: 15_558_000 picoseconds. + Weight::from_parts(16_370_827, 4687) + // Standard Error: 603 + .saturating_add(Weight::from_parts(45_739, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) @@ -145,10 +145,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `311 + m * (64 ±0)` // Estimated: `4687 + m * (64 ±0)` - // Minimum execution time: 20_149_000 picoseconds. - Weight::from_parts(21_579_056, 4687) - // Standard Error: 693 - .saturating_add(Weight::from_parts(121_676, 0).saturating_mul(m.into())) + // Minimum execution time: 15_086_000 picoseconds. + Weight::from_parts(16_444_101, 4687) + // Standard Error: 967 + .saturating_add(Weight::from_parts(143_947, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) @@ -168,10 +168,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `311 + m * (64 ±0)` // Estimated: `4687 + m * (64 ±0)` - // Minimum execution time: 21_033_000 picoseconds. - Weight::from_parts(21_867_983, 4687) - // Standard Error: 1_003 - .saturating_add(Weight::from_parts(44_414, 0).saturating_mul(m.into())) + // Minimum execution time: 16_146_000 picoseconds. + Weight::from_parts(17_269_755, 4687) + // Standard Error: 660 + .saturating_add(Weight::from_parts(42_082, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) @@ -187,10 +187,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `31 + m * (32 ±0)` // Estimated: `4687 + m * (32 ±0)` - // Minimum execution time: 6_849_000 picoseconds. - Weight::from_parts(7_199_679, 4687) - // Standard Error: 199 - .saturating_add(Weight::from_parts(9_242, 0).saturating_mul(m.into())) + // Minimum execution time: 5_937_000 picoseconds. + Weight::from_parts(6_501_085, 4687) + // Standard Error: 323 + .saturating_add(Weight::from_parts(18_285, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 32).saturating_mul(m.into())) @@ -203,8 +203,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_297_000 picoseconds. - Weight::from_parts(2_540_000, 0) + // Minimum execution time: 2_533_000 picoseconds. + Weight::from_parts(2_807_000, 0) .saturating_add(T::DbWeight::get().writes(2_u64)) } } @@ -224,10 +224,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `207 + m * (64 ±0)` // Estimated: `4687 + m * (64 ±0)` - // Minimum execution time: 17_738_000 picoseconds. - Weight::from_parts(18_805_035, 4687) - // Standard Error: 796 - .saturating_add(Weight::from_parts(26_172, 0).saturating_mul(m.into())) + // Minimum execution time: 12_827_000 picoseconds. + Weight::from_parts(13_743_651, 4687) + // Standard Error: 622 + .saturating_add(Weight::from_parts(35_417, 0).saturating_mul(m.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) @@ -247,10 +247,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `311 + m * (64 ±0)` // Estimated: `4687 + m * (64 ±0)` - // Minimum execution time: 20_462_000 picoseconds. - Weight::from_parts(21_560_127, 4687) - // Standard Error: 581 - .saturating_add(Weight::from_parts(18_475, 0).saturating_mul(m.into())) + // Minimum execution time: 15_197_000 picoseconds. + Weight::from_parts(16_172_409, 4687) + // Standard Error: 650 + .saturating_add(Weight::from_parts(35_790, 0).saturating_mul(m.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) @@ -270,10 +270,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `311 + m * (64 ±0)` // Estimated: `4687 + m * (64 ±0)` - // Minimum execution time: 20_345_000 picoseconds. - Weight::from_parts(21_400_566, 4687) - // Standard Error: 711 - .saturating_add(Weight::from_parts(39_733, 0).saturating_mul(m.into())) + // Minimum execution time: 15_558_000 picoseconds. + Weight::from_parts(16_370_827, 4687) + // Standard Error: 603 + .saturating_add(Weight::from_parts(45_739, 0).saturating_mul(m.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) @@ -293,10 +293,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `311 + m * (64 ±0)` // Estimated: `4687 + m * (64 ±0)` - // Minimum execution time: 20_149_000 picoseconds. - Weight::from_parts(21_579_056, 4687) - // Standard Error: 693 - .saturating_add(Weight::from_parts(121_676, 0).saturating_mul(m.into())) + // Minimum execution time: 15_086_000 picoseconds. + Weight::from_parts(16_444_101, 4687) + // Standard Error: 967 + .saturating_add(Weight::from_parts(143_947, 0).saturating_mul(m.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) @@ -316,10 +316,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `311 + m * (64 ±0)` // Estimated: `4687 + m * (64 ±0)` - // Minimum execution time: 21_033_000 picoseconds. - Weight::from_parts(21_867_983, 4687) - // Standard Error: 1_003 - .saturating_add(Weight::from_parts(44_414, 0).saturating_mul(m.into())) + // Minimum execution time: 16_146_000 picoseconds. + Weight::from_parts(17_269_755, 4687) + // Standard Error: 660 + .saturating_add(Weight::from_parts(42_082, 0).saturating_mul(m.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) @@ -335,10 +335,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `31 + m * (32 ±0)` // Estimated: `4687 + m * (32 ±0)` - // Minimum execution time: 6_849_000 picoseconds. - Weight::from_parts(7_199_679, 4687) - // Standard Error: 199 - .saturating_add(Weight::from_parts(9_242, 0).saturating_mul(m.into())) + // Minimum execution time: 5_937_000 picoseconds. + Weight::from_parts(6_501_085, 4687) + // Standard Error: 323 + .saturating_add(Weight::from_parts(18_285, 0).saturating_mul(m.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 32).saturating_mul(m.into())) @@ -351,8 +351,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_297_000 picoseconds. - Weight::from_parts(2_540_000, 0) + // Minimum execution time: 2_533_000 picoseconds. + Weight::from_parts(2_807_000, 0) .saturating_add(RocksDbWeight::get().writes(2_u64)) } } diff --git a/substrate/frame/merkle-mountain-range/Cargo.toml b/substrate/frame/merkle-mountain-range/Cargo.toml index 04f5ab64100d..4daa394a82d7 100644 --- a/substrate/frame/merkle-mountain-range/Cargo.toml +++ b/substrate/frame/merkle-mountain-range/Cargo.toml @@ -16,11 +16,11 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { workspace = true } +log = { workspace = true } +scale-info = { features = ["derive"], workspace = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } -log = { workspace = true } -scale-info = { features = ["derive"], workspace = true } sp-core = { workspace = true } sp-io = { workspace = true } sp-mmr-primitives = { workspace = true } @@ -28,8 +28,8 @@ sp-runtime = { workspace = true } [dev-dependencies] array-bytes = { workspace = true, default-features = true } -itertools = { workspace = true } sp-tracing = { workspace = true, default-features = true } +itertools = { workspace = true } [features] default = ["std"] diff --git a/substrate/frame/message-queue/Cargo.toml b/substrate/frame/message-queue/Cargo.toml index 7b0de7c1e4ff..a6de61d70abf 100644 --- a/substrate/frame/message-queue/Cargo.toml +++ b/substrate/frame/message-queue/Cargo.toml @@ -13,15 +13,15 @@ workspace = true [dependencies] codec = { features = ["derive"], workspace = true } -environmental = { workspace = true } -log = { workspace = true } scale-info = { features = ["derive"], workspace = true } serde = { optional = true, features = ["derive"], workspace = true, default-features = true } +log = { workspace = true } +environmental = { workspace = true } -sp-arithmetic = { workspace = true } sp-core = { workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } +sp-arithmetic = { workspace = true } sp-weights = { workspace = true } frame-benchmarking = { optional = true, workspace = true } @@ -29,10 +29,10 @@ frame-support = { workspace = true } frame-system = { workspace = true } [dev-dependencies] -rand = { workspace = true, default-features = true } -rand_distr = { workspace = true } sp-crypto-hashing = { workspace = true, default-features = true } sp-tracing = { workspace = true, default-features = true } +rand = { workspace = true, default-features = true } +rand_distr = { workspace = true } [features] default = ["std"] diff --git a/substrate/frame/message-queue/src/weights.rs b/substrate/frame/message-queue/src/weights.rs index 7d36cb755106..46fd52194bf2 100644 --- a/substrate/frame/message-queue/src/weights.rs +++ b/substrate/frame/message-queue/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_message_queue` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-04-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -74,8 +74,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `301` // Estimated: `6038` - // Minimum execution time: 17_093_000 picoseconds. - Weight::from_parts(17_612_000, 6038) + // Minimum execution time: 11_674_000 picoseconds. + Weight::from_parts(12_105_000, 6038) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -87,8 +87,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `301` // Estimated: `6038` - // Minimum execution time: 15_482_000 picoseconds. - Weight::from_parts(16_159_000, 6038) + // Minimum execution time: 10_262_000 picoseconds. + Weight::from_parts(10_654_000, 6038) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -98,8 +98,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `76` // Estimated: `3514` - // Minimum execution time: 4_911_000 picoseconds. - Weight::from_parts(5_177_000, 3514) + // Minimum execution time: 4_363_000 picoseconds. + Weight::from_parts(4_589_000, 3514) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -109,8 +109,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `147` // Estimated: `69049` - // Minimum execution time: 7_108_000 picoseconds. - Weight::from_parts(7_477_000, 69049) + // Minimum execution time: 6_220_000 picoseconds. + Weight::from_parts(6_622_000, 69049) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -120,8 +120,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `147` // Estimated: `69049` - // Minimum execution time: 7_435_000 picoseconds. - Weight::from_parts(7_669_000, 69049) + // Minimum execution time: 6_342_000 picoseconds. + Weight::from_parts(6_727_000, 69049) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -133,8 +133,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 173_331_000 picoseconds. - Weight::from_parts(174_170_000, 0) + // Minimum execution time: 112_729_000 picoseconds. + Weight::from_parts(114_076_000, 0) .saturating_add(T::DbWeight::get().writes(2_u64)) } /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) @@ -145,8 +145,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `246` // Estimated: `3514` - // Minimum execution time: 11_817_000 picoseconds. - Weight::from_parts(12_351_000, 3514) + // Minimum execution time: 6_836_000 picoseconds. + Weight::from_parts(6_986_000, 3514) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -158,8 +158,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `65744` // Estimated: `69049` - // Minimum execution time: 60_883_000 picoseconds. - Weight::from_parts(62_584_000, 69049) + // Minimum execution time: 50_733_000 picoseconds. + Weight::from_parts(51_649_000, 69049) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -171,8 +171,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `65744` // Estimated: `69049` - // Minimum execution time: 77_569_000 picoseconds. - Weight::from_parts(79_165_000, 69049) + // Minimum execution time: 67_335_000 picoseconds. + Weight::from_parts(68_347_000, 69049) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -184,8 +184,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `65744` // Estimated: `69049` - // Minimum execution time: 120_786_000 picoseconds. - Weight::from_parts(122_457_000, 69049) + // Minimum execution time: 77_610_000 picoseconds. + Weight::from_parts(80_338_000, 69049) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -201,8 +201,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `301` // Estimated: `6038` - // Minimum execution time: 17_093_000 picoseconds. - Weight::from_parts(17_612_000, 6038) + // Minimum execution time: 11_674_000 picoseconds. + Weight::from_parts(12_105_000, 6038) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -214,8 +214,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `301` // Estimated: `6038` - // Minimum execution time: 15_482_000 picoseconds. - Weight::from_parts(16_159_000, 6038) + // Minimum execution time: 10_262_000 picoseconds. + Weight::from_parts(10_654_000, 6038) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -225,8 +225,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `76` // Estimated: `3514` - // Minimum execution time: 4_911_000 picoseconds. - Weight::from_parts(5_177_000, 3514) + // Minimum execution time: 4_363_000 picoseconds. + Weight::from_parts(4_589_000, 3514) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -236,8 +236,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `147` // Estimated: `69049` - // Minimum execution time: 7_108_000 picoseconds. - Weight::from_parts(7_477_000, 69049) + // Minimum execution time: 6_220_000 picoseconds. + Weight::from_parts(6_622_000, 69049) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -247,8 +247,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `147` // Estimated: `69049` - // Minimum execution time: 7_435_000 picoseconds. - Weight::from_parts(7_669_000, 69049) + // Minimum execution time: 6_342_000 picoseconds. + Weight::from_parts(6_727_000, 69049) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -260,8 +260,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 173_331_000 picoseconds. - Weight::from_parts(174_170_000, 0) + // Minimum execution time: 112_729_000 picoseconds. + Weight::from_parts(114_076_000, 0) .saturating_add(RocksDbWeight::get().writes(2_u64)) } /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) @@ -272,8 +272,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `246` // Estimated: `3514` - // Minimum execution time: 11_817_000 picoseconds. - Weight::from_parts(12_351_000, 3514) + // Minimum execution time: 6_836_000 picoseconds. + Weight::from_parts(6_986_000, 3514) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -285,8 +285,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `65744` // Estimated: `69049` - // Minimum execution time: 60_883_000 picoseconds. - Weight::from_parts(62_584_000, 69049) + // Minimum execution time: 50_733_000 picoseconds. + Weight::from_parts(51_649_000, 69049) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -298,8 +298,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `65744` // Estimated: `69049` - // Minimum execution time: 77_569_000 picoseconds. - Weight::from_parts(79_165_000, 69049) + // Minimum execution time: 67_335_000 picoseconds. + Weight::from_parts(68_347_000, 69049) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -311,8 +311,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `65744` // Estimated: `69049` - // Minimum execution time: 120_786_000 picoseconds. - Weight::from_parts(122_457_000, 69049) + // Minimum execution time: 77_610_000 picoseconds. + Weight::from_parts(80_338_000, 69049) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } diff --git a/substrate/frame/metadata-hash-extension/Cargo.toml b/substrate/frame/metadata-hash-extension/Cargo.toml index c7a417795ffe..bca2c3ffb198 100644 --- a/substrate/frame/metadata-hash-extension/Cargo.toml +++ b/substrate/frame/metadata-hash-extension/Cargo.toml @@ -11,22 +11,22 @@ description = "FRAME signed extension for verifying the metadata hash" [dependencies] array-bytes = { workspace = true, default-features = true } codec = { features = ["derive"], workspace = true } -const-hex = { workspace = true } -docify = { workspace = true } +scale-info = { features = ["derive", "serde"], workspace = true } +sp-runtime = { features = ["serde"], workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } log = { workspace = true } -scale-info = { features = ["derive", "serde"], workspace = true } -sp-runtime = { features = ["serde"], workspace = true } +docify = { workspace = true } +const-hex = { workspace = true } [dev-dependencies] -frame-metadata = { features = ["current", "unstable"], workspace = true, default-features = true } -merkleized-metadata = { workspace = true } +substrate-wasm-builder = { features = ["metadata-hash"], workspace = true, default-features = true } +substrate-test-runtime-client = { workspace = true } sp-api = { workspace = true, default-features = true } -sp-tracing = { workspace = true, default-features = true } sp-transaction-pool = { workspace = true, default-features = true } -substrate-test-runtime-client = { workspace = true } -substrate-wasm-builder = { features = ["metadata-hash"], workspace = true, default-features = true } +merkleized-metadata = { workspace = true } +frame-metadata = { features = ["current"], workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/frame/migrations/Cargo.toml b/substrate/frame/migrations/Cargo.toml index 469592780beb..a32e48e65280 100644 --- a/substrate/frame/migrations/Cargo.toml +++ b/substrate/frame/migrations/Cargo.toml @@ -11,8 +11,8 @@ repository.workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -cfg-if = { workspace = true } codec = { features = ["derive"], workspace = true } +cfg-if = { workspace = true } docify = { workspace = true } impl-trait-for-tuples = { workspace = true } log = { workspace = true, default-features = true } diff --git a/substrate/frame/migrations/src/weights.rs b/substrate/frame/migrations/src/weights.rs index 49ae379dba02..6f5ac9715376 100644 --- a/substrate/frame/migrations/src/weights.rs +++ b/substrate/frame/migrations/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_migrations` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-04-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -74,10 +74,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) fn onboard_new_mbms() -> Weight { // Proof Size summary in bytes: - // Measured: `309` + // Measured: `276` // Estimated: `67035` - // Minimum execution time: 9_520_000 picoseconds. - Weight::from_parts(9_934_000, 67035) + // Minimum execution time: 7_762_000 picoseconds. + Weight::from_parts(8_100_000, 67035) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -87,8 +87,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `67035` - // Minimum execution time: 2_993_000 picoseconds. - Weight::from_parts(3_088_000, 67035) + // Minimum execution time: 2_077_000 picoseconds. + Weight::from_parts(2_138_000, 67035) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) @@ -97,10 +97,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) fn exec_migration_completed() -> Weight { // Proof Size summary in bytes: - // Measured: `167` - // Estimated: `3632` - // Minimum execution time: 7_042_000 picoseconds. - Weight::from_parts(7_272_000, 3632) + // Measured: `134` + // Estimated: `3599` + // Minimum execution time: 5_868_000 picoseconds. + Weight::from_parts(6_143_000, 3599) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -110,10 +110,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `MultiBlockMigrations::Historic` (`max_values`: None, `max_size`: Some(266), added: 2741, mode: `MaxEncodedLen`) fn exec_migration_skipped_historic() -> Weight { // Proof Size summary in bytes: - // Measured: `363` - // Estimated: `3828` - // Minimum execution time: 16_522_000 picoseconds. - Weight::from_parts(17_082_000, 3828) + // Measured: `330` + // Estimated: `3795` + // Minimum execution time: 10_283_000 picoseconds. + Weight::from_parts(10_964_000, 3795) .saturating_add(T::DbWeight::get().reads(2_u64)) } /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) @@ -122,10 +122,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `MultiBlockMigrations::Historic` (`max_values`: None, `max_size`: Some(266), added: 2741, mode: `MaxEncodedLen`) fn exec_migration_advance() -> Weight { // Proof Size summary in bytes: - // Measured: `309` - // Estimated: `3774` - // Minimum execution time: 12_445_000 picoseconds. - Weight::from_parts(12_797_000, 3774) + // Measured: `276` + // Estimated: `3741` + // Minimum execution time: 9_900_000 picoseconds. + Weight::from_parts(10_396_000, 3741) .saturating_add(T::DbWeight::get().reads(2_u64)) } /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) @@ -134,10 +134,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `MultiBlockMigrations::Historic` (`max_values`: None, `max_size`: Some(266), added: 2741, mode: `MaxEncodedLen`) fn exec_migration_complete() -> Weight { // Proof Size summary in bytes: - // Measured: `309` - // Estimated: `3774` - // Minimum execution time: 14_057_000 picoseconds. - Weight::from_parts(14_254_000, 3774) + // Measured: `276` + // Estimated: `3741` + // Minimum execution time: 11_411_000 picoseconds. + Weight::from_parts(11_956_000, 3741) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -149,10 +149,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) fn exec_migration_fail() -> Weight { // Proof Size summary in bytes: - // Measured: `309` - // Estimated: `3774` - // Minimum execution time: 14_578_000 picoseconds. - Weight::from_parts(14_825_000, 3774) + // Measured: `276` + // Estimated: `3741` + // Minimum execution time: 12_398_000 picoseconds. + Weight::from_parts(12_910_000, 3741) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -160,8 +160,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 169_000 picoseconds. - Weight::from_parts(197_000, 0) + // Minimum execution time: 166_000 picoseconds. + Weight::from_parts(193_000, 0) } /// Storage: `MultiBlockMigrations::Cursor` (r:0 w:1) /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) @@ -169,8 +169,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_634_000 picoseconds. - Weight::from_parts(2_798_000, 0) + // Minimum execution time: 2_686_000 picoseconds. + Weight::from_parts(2_859_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `MultiBlockMigrations::Cursor` (r:0 w:1) @@ -179,8 +179,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_069_000 picoseconds. - Weight::from_parts(3_293_000, 0) + // Minimum execution time: 3_070_000 picoseconds. + Weight::from_parts(3_250_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `MultiBlockMigrations::Cursor` (r:1 w:0) @@ -189,10 +189,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) fn force_onboard_mbms() -> Weight { // Proof Size summary in bytes: - // Measured: `284` + // Measured: `251` // Estimated: `67035` - // Minimum execution time: 7_674_000 picoseconds. - Weight::from_parts(8_000_000, 67035) + // Minimum execution time: 5_901_000 picoseconds. + Weight::from_parts(6_320_000, 67035) .saturating_add(T::DbWeight::get().reads(2_u64)) } /// Storage: `MultiBlockMigrations::Historic` (r:256 w:256) @@ -202,10 +202,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1122 + n * (271 ±0)` // Estimated: `3834 + n * (2740 ±0)` - // Minimum execution time: 16_937_000 picoseconds. - Weight::from_parts(15_713_121, 3834) - // Standard Error: 2_580 - .saturating_add(Weight::from_parts(1_424_239, 0).saturating_mul(n.into())) + // Minimum execution time: 15_952_000 picoseconds. + Weight::from_parts(14_358_665, 3834) + // Standard Error: 3_358 + .saturating_add(Weight::from_parts(1_323_674, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) @@ -221,10 +221,10 @@ impl WeightInfo for () { /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) fn onboard_new_mbms() -> Weight { // Proof Size summary in bytes: - // Measured: `309` + // Measured: `276` // Estimated: `67035` - // Minimum execution time: 9_520_000 picoseconds. - Weight::from_parts(9_934_000, 67035) + // Minimum execution time: 7_762_000 picoseconds. + Weight::from_parts(8_100_000, 67035) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -234,8 +234,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `67035` - // Minimum execution time: 2_993_000 picoseconds. - Weight::from_parts(3_088_000, 67035) + // Minimum execution time: 2_077_000 picoseconds. + Weight::from_parts(2_138_000, 67035) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) @@ -244,10 +244,10 @@ impl WeightInfo for () { /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) fn exec_migration_completed() -> Weight { // Proof Size summary in bytes: - // Measured: `167` - // Estimated: `3632` - // Minimum execution time: 7_042_000 picoseconds. - Weight::from_parts(7_272_000, 3632) + // Measured: `134` + // Estimated: `3599` + // Minimum execution time: 5_868_000 picoseconds. + Weight::from_parts(6_143_000, 3599) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -257,10 +257,10 @@ impl WeightInfo for () { /// Proof: `MultiBlockMigrations::Historic` (`max_values`: None, `max_size`: Some(266), added: 2741, mode: `MaxEncodedLen`) fn exec_migration_skipped_historic() -> Weight { // Proof Size summary in bytes: - // Measured: `363` - // Estimated: `3828` - // Minimum execution time: 16_522_000 picoseconds. - Weight::from_parts(17_082_000, 3828) + // Measured: `330` + // Estimated: `3795` + // Minimum execution time: 10_283_000 picoseconds. + Weight::from_parts(10_964_000, 3795) .saturating_add(RocksDbWeight::get().reads(2_u64)) } /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) @@ -269,10 +269,10 @@ impl WeightInfo for () { /// Proof: `MultiBlockMigrations::Historic` (`max_values`: None, `max_size`: Some(266), added: 2741, mode: `MaxEncodedLen`) fn exec_migration_advance() -> Weight { // Proof Size summary in bytes: - // Measured: `309` - // Estimated: `3774` - // Minimum execution time: 12_445_000 picoseconds. - Weight::from_parts(12_797_000, 3774) + // Measured: `276` + // Estimated: `3741` + // Minimum execution time: 9_900_000 picoseconds. + Weight::from_parts(10_396_000, 3741) .saturating_add(RocksDbWeight::get().reads(2_u64)) } /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) @@ -281,10 +281,10 @@ impl WeightInfo for () { /// Proof: `MultiBlockMigrations::Historic` (`max_values`: None, `max_size`: Some(266), added: 2741, mode: `MaxEncodedLen`) fn exec_migration_complete() -> Weight { // Proof Size summary in bytes: - // Measured: `309` - // Estimated: `3774` - // Minimum execution time: 14_057_000 picoseconds. - Weight::from_parts(14_254_000, 3774) + // Measured: `276` + // Estimated: `3741` + // Minimum execution time: 11_411_000 picoseconds. + Weight::from_parts(11_956_000, 3741) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -296,10 +296,10 @@ impl WeightInfo for () { /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) fn exec_migration_fail() -> Weight { // Proof Size summary in bytes: - // Measured: `309` - // Estimated: `3774` - // Minimum execution time: 14_578_000 picoseconds. - Weight::from_parts(14_825_000, 3774) + // Measured: `276` + // Estimated: `3741` + // Minimum execution time: 12_398_000 picoseconds. + Weight::from_parts(12_910_000, 3741) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -307,8 +307,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 169_000 picoseconds. - Weight::from_parts(197_000, 0) + // Minimum execution time: 166_000 picoseconds. + Weight::from_parts(193_000, 0) } /// Storage: `MultiBlockMigrations::Cursor` (r:0 w:1) /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) @@ -316,8 +316,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_634_000 picoseconds. - Weight::from_parts(2_798_000, 0) + // Minimum execution time: 2_686_000 picoseconds. + Weight::from_parts(2_859_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `MultiBlockMigrations::Cursor` (r:0 w:1) @@ -326,8 +326,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_069_000 picoseconds. - Weight::from_parts(3_293_000, 0) + // Minimum execution time: 3_070_000 picoseconds. + Weight::from_parts(3_250_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `MultiBlockMigrations::Cursor` (r:1 w:0) @@ -336,10 +336,10 @@ impl WeightInfo for () { /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) fn force_onboard_mbms() -> Weight { // Proof Size summary in bytes: - // Measured: `284` + // Measured: `251` // Estimated: `67035` - // Minimum execution time: 7_674_000 picoseconds. - Weight::from_parts(8_000_000, 67035) + // Minimum execution time: 5_901_000 picoseconds. + Weight::from_parts(6_320_000, 67035) .saturating_add(RocksDbWeight::get().reads(2_u64)) } /// Storage: `MultiBlockMigrations::Historic` (r:256 w:256) @@ -349,10 +349,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1122 + n * (271 ±0)` // Estimated: `3834 + n * (2740 ±0)` - // Minimum execution time: 16_937_000 picoseconds. - Weight::from_parts(15_713_121, 3834) - // Standard Error: 2_580 - .saturating_add(Weight::from_parts(1_424_239, 0).saturating_mul(n.into())) + // Minimum execution time: 15_952_000 picoseconds. + Weight::from_parts(14_358_665, 3834) + // Standard Error: 3_358 + .saturating_add(Weight::from_parts(1_323_674, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(n.into()))) diff --git a/substrate/frame/mixnet/Cargo.toml b/substrate/frame/mixnet/Cargo.toml index 0ae3b3938c60..bb5e84864566 100644 --- a/substrate/frame/mixnet/Cargo.toml +++ b/substrate/frame/mixnet/Cargo.toml @@ -17,24 +17,42 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { features = ["derive", "max-encoded-len"], workspace = true } -frame = { workspace = true, features = ["experimental", "runtime"] } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } log = { workspace = true } scale-info = { features = ["derive"], workspace = true } serde = { features = ["derive"], workspace = true } sp-application-crypto = { workspace = true } +sp-arithmetic = { workspace = true } +sp-io = { workspace = true } sp-mixnet = { workspace = true } +sp-runtime = { workspace = true } [features] default = ["std"] std = [ "codec/std", - "frame/std", + "frame-benchmarking?/std", + "frame-support/std", + "frame-system/std", "log/std", "scale-info/std", "serde/std", "sp-application-crypto/std", + "sp-arithmetic/std", + "sp-io/std", "sp-mixnet/std", + "sp-runtime/std", +] +runtime-benchmarks = [ + "frame-benchmarking/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", ] try-runtime = [ - "frame/try-runtime", + "frame-support/try-runtime", + "frame-system/try-runtime", + "sp-runtime/try-runtime", ] diff --git a/substrate/frame/mixnet/src/lib.rs b/substrate/frame/mixnet/src/lib.rs index 984981817676..6579ed678ae7 100644 --- a/substrate/frame/mixnet/src/lib.rs +++ b/substrate/frame/mixnet/src/lib.rs @@ -23,23 +23,28 @@ extern crate alloc; -pub use pallet::*; - use alloc::vec::Vec; +use codec::{Decode, Encode, MaxEncodedLen}; use core::cmp::Ordering; -use frame::{ - deps::{ - sp_io::{self, MultiRemovalResults}, - sp_runtime, - }, - prelude::*, +use frame_support::{ + traits::{EstimateNextSessionRotation, Get, OneSessionHandler}, + BoundedVec, }; +use frame_system::{ + offchain::{CreateInherent, SubmitTransaction}, + pallet_prelude::BlockNumberFor, +}; +pub use pallet::*; +use scale_info::TypeInfo; use serde::{Deserialize, Serialize}; use sp_application_crypto::RuntimeAppPublic; +use sp_arithmetic::traits::{CheckedSub, Saturating, UniqueSaturatedInto, Zero}; +use sp_io::MultiRemovalResults; use sp_mixnet::types::{ AuthorityId, AuthoritySignature, KxPublic, Mixnode, MixnodesErr, PeerId, SessionIndex, SessionPhase, SessionStatus, KX_PUBLIC_SIZE, }; +use sp_runtime::RuntimeDebug; const LOG_TARGET: &str = "runtime::mixnet"; @@ -163,9 +168,12 @@ fn twox>( // The pallet //////////////////////////////////////////////////////////////////////////////// -#[frame::pallet(dev_mode)] +#[frame_support::pallet(dev_mode)] pub mod pallet { use super::*; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + #[pallet::pallet] pub struct Pallet(_); @@ -246,7 +254,7 @@ pub mod pallet { StorageDoubleMap<_, Identity, SessionIndex, Identity, AuthorityIndex, BoundedMixnodeFor>; #[pallet::genesis_config] - #[derive(DefaultNoBound)] + #[derive(frame_support::DefaultNoBound)] pub struct GenesisConfig { /// The mixnode set for the very first session. pub mixnodes: BoundedVec, T::MaxAuthorities>, @@ -300,7 +308,7 @@ pub mod pallet { fn validate_unsigned(_source: TransactionSource, call: &Self::Call) -> TransactionValidity { let Self::Call::register { registration, signature } = call else { - return InvalidTransaction::Call.into(); + return InvalidTransaction::Call.into() }; // Check session index matches @@ -312,16 +320,16 @@ pub mod pallet { // Check authority index is valid if registration.authority_index >= T::MaxAuthorities::get() { - return InvalidTransaction::BadProof.into(); + return InvalidTransaction::BadProof.into() } let Some(authority_id) = NextAuthorityIds::::get(registration.authority_index) else { - return InvalidTransaction::BadProof.into(); + return InvalidTransaction::BadProof.into() }; // Check the authority hasn't registered a mixnode yet if Self::already_registered(registration.session_index, registration.authority_index) { - return InvalidTransaction::Stale.into(); + return InvalidTransaction::Stale.into() } // Check signature. Note that we don't use regular signed transactions for registration @@ -331,7 +339,7 @@ pub mod pallet { authority_id.verify(&encoded_registration, signature) }); if !signature_ok { - return InvalidTransaction::BadProof.into(); + return InvalidTransaction::BadProof.into() } ValidTransaction::with_tag_prefix("MixnetRegistration") @@ -360,12 +368,12 @@ impl Pallet { .saturating_sub(CurrentSessionStartBlock::::get()); let Some(block_in_phase) = block_in_phase.checked_sub(&T::NumCoverToCurrentBlocks::get()) else { - return SessionPhase::CoverToCurrent; + return SessionPhase::CoverToCurrent }; let Some(block_in_phase) = block_in_phase.checked_sub(&T::NumRequestsToCurrentBlocks::get()) else { - return SessionPhase::RequestsToCurrent; + return SessionPhase::RequestsToCurrent }; if block_in_phase < T::NumCoverToPrevBlocks::get() { SessionPhase::CoverToPrev @@ -403,7 +411,7 @@ impl Pallet { return Err(MixnodesErr::InsufficientRegistrations { num: 0, min: T::MinMixnodes::get(), - }); + }) }; Self::mixnodes(prev_session_index) } @@ -422,7 +430,7 @@ impl Pallet { // registering let block_in_session = block_number.saturating_sub(CurrentSessionStartBlock::::get()); if block_in_session < T::NumRegisterStartSlackBlocks::get() { - return false; + return false } let (Some(end_block), _weight) = @@ -430,7 +438,7 @@ impl Pallet { else { // Things aren't going to work terribly well in this case as all the authorities will // just pile in after the slack period... - return true; + return true }; let remaining_blocks = end_block @@ -439,7 +447,7 @@ impl Pallet { if remaining_blocks.is_zero() { // Into the slack time at the end of the session. Not necessarily too late; // registrations are accepted right up until the session ends. - return true; + return true } // Want uniform distribution over the remaining blocks, so pick this block with probability @@ -488,7 +496,7 @@ impl Pallet { "Session {session_index} registration attempted, \ but current session is {current_session_index}", ); - return false; + return false } let block_number = frame_system::Pallet::::block_number(); @@ -497,7 +505,7 @@ impl Pallet { target: LOG_TARGET, "Waiting for the session to progress further before registering", ); - return false; + return false } let Some((authority_index, authority_id)) = Self::next_local_authority() else { @@ -505,7 +513,7 @@ impl Pallet { target: LOG_TARGET, "Not an authority in the next session; cannot register a mixnode", ); - return false; + return false }; if Self::already_registered(session_index, authority_index) { @@ -513,14 +521,14 @@ impl Pallet { target: LOG_TARGET, "Already registered a mixnode for the next session", ); - return false; + return false } let registration = Registration { block_number, session_index, authority_index, mixnode: mixnode.into() }; let Some(signature) = authority_id.sign(®istration.encode()) else { log::debug!(target: LOG_TARGET, "Failed to sign registration"); - return false; + return false }; let call = Call::register { registration, signature }; let xt = T::create_inherent(call.into()); diff --git a/substrate/frame/multisig/Cargo.toml b/substrate/frame/multisig/Cargo.toml index 0d175617c9c2..c96be908faef 100644 --- a/substrate/frame/multisig/Cargo.toml +++ b/substrate/frame/multisig/Cargo.toml @@ -17,8 +17,8 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { workspace = true } -frame = { workspace = true, features = ["experimental", "runtime"] } scale-info = { features = ["derive"], workspace = true } +frame = { workspace = true, features = ["experimental", "runtime"] } # third party log = { workspace = true } diff --git a/substrate/frame/multisig/src/lib.rs b/substrate/frame/multisig/src/lib.rs index 869b4adc2adc..4a30b5c119b9 100644 --- a/substrate/frame/multisig/src/lib.rs +++ b/substrate/frame/multisig/src/lib.rs @@ -77,9 +77,6 @@ macro_rules! log { type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; -pub type BlockNumberFor = - <::BlockNumberProvider as BlockNumberProvider>::BlockNumber; - /// A global extrinsic index, formed as the extrinsic index within a block, together with that /// block's height. This allows a transaction in which a multisig operation of a particular /// composite was created to be uniquely identified. @@ -156,9 +153,6 @@ pub mod pallet { /// Weight information for extrinsics in this pallet. type WeightInfo: weights::WeightInfo; - - /// Provider for the block number. Normally this is the `frame_system` pallet. - type BlockNumberProvider: BlockNumberProvider; } /// The in-code storage version. @@ -241,7 +235,7 @@ pub mod pallet { } #[pallet::hooks] - impl Hooks> for Pallet {} + impl Hooks> for Pallet {} #[pallet::call] impl Pallet { @@ -632,7 +626,7 @@ impl Pallet { /// The current `Timepoint`. pub fn timepoint() -> Timepoint> { Timepoint { - height: T::BlockNumberProvider::current_block_number(), + height: >::block_number(), index: >::extrinsic_index().unwrap_or_default(), } } diff --git a/substrate/frame/multisig/src/tests.rs b/substrate/frame/multisig/src/tests.rs index 4065ce73f905..c5a98845270c 100644 --- a/substrate/frame/multisig/src/tests.rs +++ b/substrate/frame/multisig/src/tests.rs @@ -66,7 +66,6 @@ impl Config for Test { type DepositFactor = ConstU64<1>; type MaxSignatories = ConstU32<3>; type WeightInfo = (); - type BlockNumberProvider = frame_system::Pallet; } use pallet_balances::Call as BalancesCall; diff --git a/substrate/frame/multisig/src/weights.rs b/substrate/frame/multisig/src/weights.rs index 5c14922e0ef0..fb263116ea62 100644 --- a/substrate/frame/multisig/src/weights.rs +++ b/substrate/frame/multisig/src/weights.rs @@ -294,4 +294,4 @@ impl WeightInfo for () { .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } -} \ No newline at end of file +} diff --git a/substrate/frame/nft-fractionalization/Cargo.toml b/substrate/frame/nft-fractionalization/Cargo.toml index 7f6df86ed0e5..6a064204b895 100644 --- a/substrate/frame/nft-fractionalization/Cargo.toml +++ b/substrate/frame/nft-fractionalization/Cargo.toml @@ -17,13 +17,13 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { workspace = true } +log = { workspace = true } +scale-info = { features = ["derive"], workspace = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } -log = { workspace = true } pallet-assets = { workspace = true } pallet-nfts = { workspace = true } -scale-info = { features = ["derive"], workspace = true } sp-runtime = { workspace = true } [dev-dependencies] diff --git a/substrate/frame/nft-fractionalization/src/benchmarking.rs b/substrate/frame/nft-fractionalization/src/benchmarking.rs index 433019280f20..811b5fe1b317 100644 --- a/substrate/frame/nft-fractionalization/src/benchmarking.rs +++ b/substrate/frame/nft-fractionalization/src/benchmarking.rs @@ -20,7 +20,7 @@ #![cfg(feature = "runtime-benchmarks")] use super::*; -use frame_benchmarking::v2::*; +use frame_benchmarking::{benchmarks, whitelisted_caller}; use frame_support::{ assert_ok, traits::{ @@ -77,37 +77,20 @@ fn assert_last_event(generic_event: ::RuntimeEvent) { assert_eq!(event, &system_event); } -#[benchmarks( - where - T::Nfts: - Create< - T::AccountId, - CollectionConfig, - frame_system::pallet_prelude::BlockNumberFor::, - T::NftCollectionId> - > - + Mutate, -)] -mod benchmarks { - use super::*; - - #[benchmark] - fn fractionalize() { +benchmarks! { + where_clause { + where + T::Nfts: Create, frame_system::pallet_prelude::BlockNumberFor::, T::NftCollectionId>> + + Mutate, + } + + fractionalize { let asset = T::BenchmarkHelper::asset(0); let collection = T::BenchmarkHelper::collection(0); let nft = T::BenchmarkHelper::nft(0); let (caller, caller_lookup) = mint_nft::(nft); - - #[extrinsic_call] - _( - SystemOrigin::Signed(caller.clone()), - collection, - nft, - asset.clone(), - caller_lookup, - 1000u32.into(), - ); - + }: _(SystemOrigin::Signed(caller.clone()), collection, nft, asset.clone(), caller_lookup, 1000u32.into()) + verify { assert_last_event::( Event::NftFractionalized { nft_collection: collection, @@ -115,39 +98,34 @@ mod benchmarks { fractions: 1000u32.into(), asset, beneficiary: caller, - } - .into(), + }.into() ); } - #[benchmark] - fn unify() { + unify { let asset = T::BenchmarkHelper::asset(0); let collection = T::BenchmarkHelper::collection(0); let nft = T::BenchmarkHelper::nft(0); let (caller, caller_lookup) = mint_nft::(nft); - - assert_ok!(NftFractionalization::::fractionalize( + NftFractionalization::::fractionalize( SystemOrigin::Signed(caller.clone()).into(), collection, nft, asset.clone(), caller_lookup.clone(), 1000u32.into(), - )); - - #[extrinsic_call] - _(SystemOrigin::Signed(caller.clone()), collection, nft, asset.clone(), caller_lookup); - + )?; + }: _(SystemOrigin::Signed(caller.clone()), collection, nft, asset.clone(), caller_lookup) + verify { assert_last_event::( - Event::NftUnified { nft_collection: collection, nft, asset, beneficiary: caller } - .into(), + Event::NftUnified { + nft_collection: collection, + nft, + asset, + beneficiary: caller, + }.into() ); } - impl_benchmark_test_suite!( - NftFractionalization, - crate::mock::new_test_ext(), - crate::mock::Test - ); + impl_benchmark_test_suite!(NftFractionalization, crate::mock::new_test_ext(), crate::mock::Test); } diff --git a/substrate/frame/nft-fractionalization/src/mock.rs b/substrate/frame/nft-fractionalization/src/mock.rs index 762c1776e30f..50b41b5fc64e 100644 --- a/substrate/frame/nft-fractionalization/src/mock.rs +++ b/substrate/frame/nft-fractionalization/src/mock.rs @@ -115,7 +115,6 @@ impl pallet_nfts::Config for Test { type OffchainSignature = Signature; type OffchainPublic = AccountPublic; type WeightInfo = (); - type BlockNumberProvider = frame_system::Pallet; pallet_nfts::runtime_benchmarks_enabled! { type Helper = (); } diff --git a/substrate/frame/nft-fractionalization/src/weights.rs b/substrate/frame/nft-fractionalization/src/weights.rs index a55d01eb4f2d..bee6484d856e 100644 --- a/substrate/frame/nft-fractionalization/src/weights.rs +++ b/substrate/frame/nft-fractionalization/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_nft_fractionalization` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-04-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -61,15 +61,13 @@ impl WeightInfo for SubstrateWeight { /// Storage: `Nfts::Item` (r:1 w:0) /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) /// Storage: `Nfts::Attribute` (r:1 w:1) /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) /// Storage: `Nfts::Collection` (r:1 w:1) /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) /// Storage: `Assets::Asset` (r:1 w:1) /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::NextAssetId` (r:1 w:0) - /// Proof: `Assets::NextAssetId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `Assets::Account` (r:1 w:1) /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:1 w:1) @@ -80,11 +78,11 @@ impl WeightInfo for SubstrateWeight { /// Proof: `NftFractionalization::NftToAsset` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) fn fractionalize() -> Weight { // Proof Size summary in bytes: - // Measured: `661` + // Measured: `609` // Estimated: `4326` - // Minimum execution time: 186_614_000 picoseconds. - Weight::from_parts(192_990_000, 4326) - .saturating_add(T::DbWeight::get().reads(9_u64)) + // Minimum execution time: 174_545_000 picoseconds. + Weight::from_parts(177_765_000, 4326) + .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().writes(8_u64)) } /// Storage: `NftFractionalization::NftToAsset` (r:1 w:1) @@ -104,7 +102,7 @@ impl WeightInfo for SubstrateWeight { /// Storage: `Nfts::Item` (r:1 w:1) /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) /// Storage: `Nfts::Account` (r:0 w:1) /// Proof: `Nfts::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) /// Storage: `Nfts::ItemPriceOf` (r:0 w:1) @@ -115,8 +113,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1422` // Estimated: `4326` - // Minimum execution time: 140_234_000 picoseconds. - Weight::from_parts(144_124_000, 4326) + // Minimum execution time: 128_211_000 picoseconds. + Weight::from_parts(131_545_000, 4326) .saturating_add(T::DbWeight::get().reads(9_u64)) .saturating_add(T::DbWeight::get().writes(10_u64)) } @@ -127,15 +125,13 @@ impl WeightInfo for () { /// Storage: `Nfts::Item` (r:1 w:0) /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) /// Storage: `Nfts::Attribute` (r:1 w:1) /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) /// Storage: `Nfts::Collection` (r:1 w:1) /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) /// Storage: `Assets::Asset` (r:1 w:1) /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::NextAssetId` (r:1 w:0) - /// Proof: `Assets::NextAssetId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `Assets::Account` (r:1 w:1) /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:1 w:1) @@ -146,11 +142,11 @@ impl WeightInfo for () { /// Proof: `NftFractionalization::NftToAsset` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) fn fractionalize() -> Weight { // Proof Size summary in bytes: - // Measured: `661` + // Measured: `609` // Estimated: `4326` - // Minimum execution time: 186_614_000 picoseconds. - Weight::from_parts(192_990_000, 4326) - .saturating_add(RocksDbWeight::get().reads(9_u64)) + // Minimum execution time: 174_545_000 picoseconds. + Weight::from_parts(177_765_000, 4326) + .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().writes(8_u64)) } /// Storage: `NftFractionalization::NftToAsset` (r:1 w:1) @@ -170,7 +166,7 @@ impl WeightInfo for () { /// Storage: `Nfts::Item` (r:1 w:1) /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) /// Storage: `Nfts::Account` (r:0 w:1) /// Proof: `Nfts::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) /// Storage: `Nfts::ItemPriceOf` (r:0 w:1) @@ -181,8 +177,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1422` // Estimated: `4326` - // Minimum execution time: 140_234_000 picoseconds. - Weight::from_parts(144_124_000, 4326) + // Minimum execution time: 128_211_000 picoseconds. + Weight::from_parts(131_545_000, 4326) .saturating_add(RocksDbWeight::get().reads(9_u64)) .saturating_add(RocksDbWeight::get().writes(10_u64)) } diff --git a/substrate/frame/nfts/Cargo.toml b/substrate/frame/nfts/Cargo.toml index 18895018e1c5..a97b49e56524 100644 --- a/substrate/frame/nfts/Cargo.toml +++ b/substrate/frame/nfts/Cargo.toml @@ -18,11 +18,11 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { workspace = true } enumflags2 = { workspace = true } +log = { workspace = true } +scale-info = { features = ["derive"], workspace = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } -log = { workspace = true } -scale-info = { features = ["derive"], workspace = true } sp-core = { workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } diff --git a/substrate/frame/nfts/src/benchmarking.rs b/substrate/frame/nfts/src/benchmarking.rs index 81828be5fa09..bc81096b459d 100644 --- a/substrate/frame/nfts/src/benchmarking.rs +++ b/substrate/frame/nfts/src/benchmarking.rs @@ -29,7 +29,7 @@ use frame_support::{ traits::{EnsureOrigin, Get, UnfilteredDispatchable}, BoundedVec, }; -use frame_system::RawOrigin as SystemOrigin; +use frame_system::{pallet_prelude::BlockNumberFor, RawOrigin as SystemOrigin}; use sp_runtime::traits::{Bounded, One}; use crate::Pallet as Nfts; @@ -577,7 +577,7 @@ benchmarks_instance_pallet! { let (item, ..) = mint_item::(0); let delegate: T::AccountId = account("delegate", 0, SEED); let delegate_lookup = T::Lookup::unlookup(delegate.clone()); - let deadline = BlockNumberFor::::max_value(); + let deadline = BlockNumberFor::::max_value(); }: _(SystemOrigin::Signed(caller.clone()), collection, item, delegate_lookup, Some(deadline)) verify { assert_last_event::(Event::TransferApproved { collection, item, owner: caller, delegate, deadline: Some(deadline) }.into()); @@ -589,7 +589,7 @@ benchmarks_instance_pallet! { let delegate: T::AccountId = account("delegate", 0, SEED); let delegate_lookup = T::Lookup::unlookup(delegate.clone()); let origin = SystemOrigin::Signed(caller.clone()).into(); - let deadline = BlockNumberFor::::max_value(); + let deadline = BlockNumberFor::::max_value(); Nfts::::approve_transfer(origin, collection, item, delegate_lookup.clone(), Some(deadline))?; }: _(SystemOrigin::Signed(caller.clone()), collection, item, delegate_lookup) verify { @@ -602,7 +602,7 @@ benchmarks_instance_pallet! { let delegate: T::AccountId = account("delegate", 0, SEED); let delegate_lookup = T::Lookup::unlookup(delegate.clone()); let origin = SystemOrigin::Signed(caller.clone()).into(); - let deadline = BlockNumberFor::::max_value(); + let deadline = BlockNumberFor::::max_value(); Nfts::::approve_transfer(origin, collection, item, delegate_lookup.clone(), Some(deadline))?; }: _(SystemOrigin::Signed(caller.clone()), collection, item) verify { @@ -712,10 +712,10 @@ benchmarks_instance_pallet! { let price_direction = PriceDirection::Receive; let price_with_direction = PriceWithDirection { amount: price, direction: price_direction }; let duration = T::MaxDeadlineDuration::get(); - T::BlockNumberProvider::set_block_number(One::one()); + frame_system::Pallet::::set_block_number(One::one()); }: _(SystemOrigin::Signed(caller.clone()), collection, item1, collection, Some(item2), Some(price_with_direction.clone()), duration) verify { - let current_block = T::BlockNumberProvider::current_block_number(); + let current_block = frame_system::Pallet::::block_number(); assert_last_event::(Event::SwapCreated { offered_collection: collection, offered_item: item1, @@ -735,7 +735,7 @@ benchmarks_instance_pallet! { let duration = T::MaxDeadlineDuration::get(); let price_direction = PriceDirection::Receive; let price_with_direction = PriceWithDirection { amount: price, direction: price_direction }; - T::BlockNumberProvider::set_block_number(One::one()); + frame_system::Pallet::::set_block_number(One::one()); Nfts::::create_swap(origin, collection, item1, collection, Some(item2), Some(price_with_direction.clone()), duration)?; }: _(SystemOrigin::Signed(caller.clone()), collection, item1) verify { @@ -761,7 +761,7 @@ benchmarks_instance_pallet! { let target_lookup = T::Lookup::unlookup(target.clone()); T::Currency::make_free_balance_be(&target, T::Currency::minimum_balance()); let origin = SystemOrigin::Signed(caller.clone()); - T::BlockNumberProvider::set_block_number(One::one()); + frame_system::Pallet::::set_block_number(One::one()); Nfts::::transfer(origin.clone().into(), collection, item2, target_lookup)?; Nfts::::create_swap( origin.clone().into(), @@ -774,7 +774,7 @@ benchmarks_instance_pallet! { )?; }: _(SystemOrigin::Signed(target.clone()), collection, item2, collection, item1, Some(price_with_direction.clone())) verify { - let current_block = T::BlockNumberProvider::current_block_number(); + let current_block = frame_system::Pallet::::block_number(); assert_last_event::(Event::SwapClaimed { sent_collection: collection, sent_item: item2, @@ -822,7 +822,7 @@ benchmarks_instance_pallet! { let target: T::AccountId = account("target", 0, SEED); T::Currency::make_free_balance_be(&target, DepositBalanceOf::::max_value()); - T::BlockNumberProvider::set_block_number(One::one()); + frame_system::Pallet::::set_block_number(One::one()); }: _(SystemOrigin::Signed(target.clone()), Box::new(mint_data), signature.into(), caller) verify { let metadata: BoundedVec<_, _> = metadata.try_into().unwrap(); @@ -865,7 +865,7 @@ benchmarks_instance_pallet! { let message = Encode::encode(&pre_signed_data); let signature = T::Helper::sign(&signer_public, &message); - T::BlockNumberProvider::set_block_number(One::one()); + frame_system::Pallet::::set_block_number(One::one()); }: _(SystemOrigin::Signed(item_owner.clone()), pre_signed_data, signature.into(), signer.clone()) verify { assert_last_event::( diff --git a/substrate/frame/nfts/src/features/approvals.rs b/substrate/frame/nfts/src/features/approvals.rs index 4738f69f83c4..053fa67163b9 100644 --- a/substrate/frame/nfts/src/features/approvals.rs +++ b/substrate/frame/nfts/src/features/approvals.rs @@ -46,7 +46,7 @@ impl, I: 'static> Pallet { collection: T::CollectionId, item: T::ItemId, delegate: T::AccountId, - maybe_deadline: Option>, + maybe_deadline: Option>, ) -> DispatchResult { ensure!( Self::is_pallet_feature_enabled(PalletFeature::Approvals), @@ -65,7 +65,7 @@ impl, I: 'static> Pallet { ensure!(check_origin == details.owner, Error::::NoPermission); } - let now = T::BlockNumberProvider::current_block_number(); + let now = frame_system::Pallet::::block_number(); let deadline = maybe_deadline.map(|d| d.saturating_add(now)); details @@ -111,7 +111,7 @@ impl, I: 'static> Pallet { let maybe_deadline = details.approvals.get(&delegate).ok_or(Error::::NotDelegate)?; let is_past_deadline = if let Some(deadline) = maybe_deadline { - let now = T::BlockNumberProvider::current_block_number(); + let now = frame_system::Pallet::::block_number(); now > *deadline } else { false diff --git a/substrate/frame/nfts/src/features/atomic_swap.rs b/substrate/frame/nfts/src/features/atomic_swap.rs index 03ebd35b81b2..830283b73c2a 100644 --- a/substrate/frame/nfts/src/features/atomic_swap.rs +++ b/substrate/frame/nfts/src/features/atomic_swap.rs @@ -53,7 +53,7 @@ impl, I: 'static> Pallet { desired_collection_id: T::CollectionId, maybe_desired_item_id: Option, maybe_price: Option>>, - duration: BlockNumberFor, + duration: frame_system::pallet_prelude::BlockNumberFor, ) -> DispatchResult { ensure!( Self::is_pallet_feature_enabled(PalletFeature::Swaps), @@ -76,7 +76,7 @@ impl, I: 'static> Pallet { ), }; - let now = T::BlockNumberProvider::current_block_number(); + let now = frame_system::Pallet::::block_number(); let deadline = duration.saturating_add(now); PendingSwapOf::::insert( @@ -119,7 +119,7 @@ impl, I: 'static> Pallet { let swap = PendingSwapOf::::get(&offered_collection_id, &offered_item_id) .ok_or(Error::::UnknownSwap)?; - let now = T::BlockNumberProvider::current_block_number(); + let now = frame_system::Pallet::::block_number(); if swap.deadline > now { let item = Item::::get(&offered_collection_id, &offered_item_id) .ok_or(Error::::UnknownItem)?; @@ -187,7 +187,7 @@ impl, I: 'static> Pallet { ensure!(desired_item == send_item_id, Error::::UnknownSwap); } - let now = T::BlockNumberProvider::current_block_number(); + let now = frame_system::Pallet::::block_number(); ensure!(now <= swap.deadline, Error::::DeadlineExpired); if let Some(ref price) = swap.price { diff --git a/substrate/frame/nfts/src/features/attributes.rs b/substrate/frame/nfts/src/features/attributes.rs index 2cd09f7d2193..28f7bd2c58ce 100644 --- a/substrate/frame/nfts/src/features/attributes.rs +++ b/substrate/frame/nfts/src/features/attributes.rs @@ -225,7 +225,7 @@ impl, I: 'static> Pallet { Error::::MaxAttributesLimitReached ); - let now = T::BlockNumberProvider::current_block_number(); + let now = frame_system::Pallet::::block_number(); ensure!(deadline >= now, Error::::DeadlineExpired); let item_details = diff --git a/substrate/frame/nfts/src/features/create_delete_item.rs b/substrate/frame/nfts/src/features/create_delete_item.rs index 57366127f142..37f64ae1b1b9 100644 --- a/substrate/frame/nfts/src/features/create_delete_item.rs +++ b/substrate/frame/nfts/src/features/create_delete_item.rs @@ -145,7 +145,7 @@ impl, I: 'static> Pallet { ensure!(account == mint_to, Error::::WrongOrigin); } - let now = T::BlockNumberProvider::current_block_number(); + let now = frame_system::Pallet::::block_number(); ensure!(deadline >= now, Error::::DeadlineExpired); ensure!( diff --git a/substrate/frame/nfts/src/features/settings.rs b/substrate/frame/nfts/src/features/settings.rs index 48719ae2c20e..d4f7533ffa4e 100644 --- a/substrate/frame/nfts/src/features/settings.rs +++ b/substrate/frame/nfts/src/features/settings.rs @@ -96,7 +96,11 @@ impl, I: 'static> Pallet { pub(crate) fn do_update_mint_settings( maybe_check_origin: Option, collection: T::CollectionId, - mint_settings: MintSettings, BlockNumberFor, T::CollectionId>, + mint_settings: MintSettings< + BalanceOf, + frame_system::pallet_prelude::BlockNumberFor, + T::CollectionId, + >, ) -> DispatchResult { if let Some(check_origin) = &maybe_check_origin { ensure!( diff --git a/substrate/frame/nfts/src/lib.rs b/substrate/frame/nfts/src/lib.rs index 346ad162c503..4e5493a3c755 100644 --- a/substrate/frame/nfts/src/lib.rs +++ b/substrate/frame/nfts/src/lib.rs @@ -58,7 +58,7 @@ use frame_support::traits::{ }; use frame_system::Config as SystemConfig; use sp_runtime::{ - traits::{BlockNumberProvider, IdentifyAccount, Saturating, StaticLookup, Verify, Zero}, + traits::{IdentifyAccount, Saturating, StaticLookup, Verify, Zero}, RuntimeDebug, }; @@ -76,7 +76,7 @@ type AccountIdLookupOf = <::Lookup as StaticLookup>::Sourc pub mod pallet { use super::*; use frame_support::{pallet_prelude::*, traits::ExistenceRequirement}; - use frame_system::{ensure_signed, pallet_prelude::OriginFor}; + use frame_system::pallet_prelude::*; /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); @@ -210,7 +210,7 @@ pub mod pallet { /// The max duration in blocks for deadlines. #[pallet::constant] - type MaxDeadlineDuration: Get>; + type MaxDeadlineDuration: Get>; /// The max number of attributes a user could set per call. #[pallet::constant] @@ -242,9 +242,6 @@ pub mod pallet { /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; - - /// Provider for the block number. Normally this is the `frame_system` pallet. - type BlockNumberProvider: BlockNumberProvider; } /// Details of a collection. @@ -391,7 +388,7 @@ pub mod pallet { T::CollectionId, T::ItemId, PriceWithDirection>, - BlockNumberFor, + BlockNumberFor, >, OptionQuery, >; @@ -462,7 +459,7 @@ pub mod pallet { item: T::ItemId, owner: T::AccountId, delegate: T::AccountId, - deadline: Option>, + deadline: Option>, }, /// An approval for a `delegate` account to transfer the `item` of an item /// `collection` was cancelled by its `owner`. @@ -557,7 +554,7 @@ pub mod pallet { desired_collection: T::CollectionId, desired_item: Option, price: Option>>, - deadline: BlockNumberFor, + deadline: BlockNumberFor, }, /// The swap was cancelled. SwapCancelled { @@ -566,7 +563,7 @@ pub mod pallet { desired_collection: T::CollectionId, desired_item: Option, price: Option>>, - deadline: BlockNumberFor, + deadline: BlockNumberFor, }, /// The swap has been claimed. SwapClaimed { @@ -577,7 +574,7 @@ pub mod pallet { received_item: T::ItemId, received_item_owner: T::AccountId, price: Option>>, - deadline: BlockNumberFor, + deadline: BlockNumberFor, }, /// New attributes have been set for an `item` of the `collection`. PreSignedAttributesSet { @@ -860,7 +857,7 @@ pub mod pallet { item_config, |collection_details, collection_config| { let mint_settings = collection_config.mint_settings; - let now = T::BlockNumberProvider::current_block_number(); + let now = frame_system::Pallet::::block_number(); if let Some(start_block) = mint_settings.start_block { ensure!(start_block <= now, Error::::MintNotStarted); @@ -1032,7 +1029,7 @@ pub mod pallet { let deadline = details.approvals.get(&origin).ok_or(Error::::NoPermission)?; if let Some(d) = deadline { - let block_number = T::BlockNumberProvider::current_block_number(); + let block_number = frame_system::Pallet::::block_number(); ensure!(block_number <= *d, Error::::ApprovalExpired); } } @@ -1293,7 +1290,7 @@ pub mod pallet { collection: T::CollectionId, item: T::ItemId, delegate: AccountIdLookupOf, - maybe_deadline: Option>, + maybe_deadline: Option>, ) -> DispatchResult { let maybe_check_origin = T::ForceOrigin::try_origin(origin) .map(|_| None) @@ -1716,7 +1713,7 @@ pub mod pallet { pub fn update_mint_settings( origin: OriginFor, collection: T::CollectionId, - mint_settings: MintSettings, BlockNumberFor, T::CollectionId>, + mint_settings: MintSettings, BlockNumberFor, T::CollectionId>, ) -> DispatchResult { let maybe_check_origin = T::ForceOrigin::try_origin(origin) .map(|_| None) @@ -1812,7 +1809,7 @@ pub mod pallet { desired_collection: T::CollectionId, maybe_desired_item: Option, maybe_price: Option>>, - duration: BlockNumberFor, + duration: BlockNumberFor, ) -> DispatchResult { let origin = ensure_signed(origin)?; Self::do_create_swap( diff --git a/substrate/frame/nfts/src/mock.rs b/substrate/frame/nfts/src/mock.rs index 291c3c081334..5b589f591ca3 100644 --- a/substrate/frame/nfts/src/mock.rs +++ b/substrate/frame/nfts/src/mock.rs @@ -92,7 +92,6 @@ impl Config for Test { type WeightInfo = (); #[cfg(feature = "runtime-benchmarks")] type Helper = (); - type BlockNumberProvider = frame_system::Pallet; } pub(crate) fn new_test_ext() -> sp_io::TestExternalities { diff --git a/substrate/frame/nfts/src/types.rs b/substrate/frame/nfts/src/types.rs index 3ab85993473a..d67fb404ea79 100644 --- a/substrate/frame/nfts/src/types.rs +++ b/substrate/frame/nfts/src/types.rs @@ -27,11 +27,9 @@ use frame_support::{ traits::Get, BoundedBTreeMap, BoundedBTreeSet, }; +use frame_system::pallet_prelude::BlockNumberFor; use scale_info::{build::Fields, meta_type, Path, Type, TypeInfo, TypeParameter}; -pub type BlockNumberFor = - <>::BlockNumberProvider as BlockNumberProvider>::BlockNumber; - /// A type alias for handling balance deposits. pub type DepositBalanceOf = <>::Currency as Currency<::AccountId>>::Balance; @@ -41,7 +39,7 @@ pub type CollectionDetailsFor = /// A type alias for keeping track of approvals used by a single item. pub type ApprovalsOf = BoundedBTreeMap< ::AccountId, - Option>, + Option>, >::ApprovalsLimit, >; /// A type alias for keeping track of approvals for an item's attributes. @@ -72,13 +70,13 @@ pub type ItemTipOf = ItemTip< >; /// A type alias for the settings configuration of a collection. pub type CollectionConfigFor = - CollectionConfig, BlockNumberFor, >::CollectionId>; + CollectionConfig, BlockNumberFor, >::CollectionId>; /// A type alias for the pre-signed minting configuration for a specified collection. pub type PreSignedMintOf = PreSignedMint< >::CollectionId, >::ItemId, ::AccountId, - BlockNumberFor, + BlockNumberFor, BalanceOf, >; /// A type alias for the pre-signed minting configuration on the attribute level of an item. @@ -86,7 +84,7 @@ pub type PreSignedAttributesOf = PreSignedAttributes< >::CollectionId, >::ItemId, ::AccountId, - BlockNumberFor, + BlockNumberFor, >; /// Information about a collection. diff --git a/substrate/frame/nfts/src/weights.rs b/substrate/frame/nfts/src/weights.rs index 1182518e89f8..c5fb60a2206f 100644 --- a/substrate/frame/nfts/src/weights.rs +++ b/substrate/frame/nfts/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_nfts` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-04-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -109,8 +109,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `216` // Estimated: `3549` - // Minimum execution time: 39_795_000 picoseconds. - Weight::from_parts(40_954_000, 3549) + // Minimum execution time: 34_863_000 picoseconds. + Weight::from_parts(36_679_000, 3549) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -128,8 +128,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `76` // Estimated: `3549` - // Minimum execution time: 19_590_000 picoseconds. - Weight::from_parts(20_452_000, 3549) + // Minimum execution time: 19_631_000 picoseconds. + Weight::from_parts(20_384_000, 3549) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -152,16 +152,14 @@ impl WeightInfo for SubstrateWeight { /// The range of component `m` is `[0, 1000]`. /// The range of component `c` is `[0, 1000]`. /// The range of component `a` is `[0, 1000]`. - fn destroy(m: u32, _c: u32, a: u32, ) -> Weight { + fn destroy(_m: u32, _c: u32, a: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `32204 + a * (366 ±0)` // Estimated: `2523990 + a * (2954 ±0)` - // Minimum execution time: 1_283_452_000 picoseconds. - Weight::from_parts(1_066_445_083, 2523990) - // Standard Error: 9_120 - .saturating_add(Weight::from_parts(195_960, 0).saturating_mul(m.into())) - // Standard Error: 9_120 - .saturating_add(Weight::from_parts(7_706_045, 0).saturating_mul(a.into())) + // Minimum execution time: 1_282_083_000 picoseconds. + Weight::from_parts(1_249_191_963, 2523990) + // Standard Error: 4_719 + .saturating_add(Weight::from_parts(6_470_227, 0).saturating_mul(a.into())) .saturating_add(T::DbWeight::get().reads(1004_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(a.into()))) .saturating_add(T::DbWeight::get().writes(1005_u64)) @@ -184,8 +182,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `455` // Estimated: `4326` - // Minimum execution time: 55_122_000 picoseconds. - Weight::from_parts(56_437_000, 4326) + // Minimum execution time: 49_055_000 picoseconds. + Weight::from_parts(50_592_000, 4326) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -205,8 +203,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `455` // Estimated: `4326` - // Minimum execution time: 53_137_000 picoseconds. - Weight::from_parts(54_307_000, 4326) + // Minimum execution time: 47_102_000 picoseconds. + Weight::from_parts(48_772_000, 4326) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -232,8 +230,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `564` // Estimated: `4326` - // Minimum execution time: 59_107_000 picoseconds. - Weight::from_parts(60_638_000, 4326) + // Minimum execution time: 52_968_000 picoseconds. + Weight::from_parts(55_136_000, 4326) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } @@ -257,8 +255,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `593` // Estimated: `4326` - // Minimum execution time: 47_355_000 picoseconds. - Weight::from_parts(48_729_000, 4326) + // Minimum execution time: 41_140_000 picoseconds. + Weight::from_parts(43_288_000, 4326) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -273,10 +271,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `763 + i * (108 ±0)` // Estimated: `3549 + i * (3336 ±0)` - // Minimum execution time: 19_597_000 picoseconds. - Weight::from_parts(19_920_000, 3549) - // Standard Error: 25_051 - .saturating_add(Weight::from_parts(18_457_577, 0).saturating_mul(i.into())) + // Minimum execution time: 14_433_000 picoseconds. + Weight::from_parts(14_664_000, 3549) + // Standard Error: 23_078 + .saturating_add(Weight::from_parts(15_911_377, 0).saturating_mul(i.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(i.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(i.into()))) @@ -290,8 +288,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `435` // Estimated: `3534` - // Minimum execution time: 23_838_000 picoseconds. - Weight::from_parts(24_765_000, 3534) + // Minimum execution time: 18_307_000 picoseconds. + Weight::from_parts(18_966_000, 3534) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -303,8 +301,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `435` // Estimated: `3534` - // Minimum execution time: 24_030_000 picoseconds. - Weight::from_parts(24_589_000, 3534) + // Minimum execution time: 18_078_000 picoseconds. + Weight::from_parts(18_593_000, 3534) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -316,8 +314,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `340` // Estimated: `3549` - // Minimum execution time: 20_505_000 picoseconds. - Weight::from_parts(20_809_000, 3549) + // Minimum execution time: 15_175_000 picoseconds. + Weight::from_parts(15_762_000, 3549) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -333,8 +331,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `562` // Estimated: `3593` - // Minimum execution time: 32_314_000 picoseconds. - Weight::from_parts(33_213_000, 3593) + // Minimum execution time: 26_164_000 picoseconds. + Weight::from_parts(27_117_000, 3593) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -346,8 +344,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `369` // Estimated: `6078` - // Minimum execution time: 44_563_000 picoseconds. - Weight::from_parts(45_899_000, 6078) + // Minimum execution time: 38_523_000 picoseconds. + Weight::from_parts(39_486_000, 6078) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -359,8 +357,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `311` // Estimated: `3549` - // Minimum execution time: 20_515_000 picoseconds. - Weight::from_parts(21_125_000, 3549) + // Minimum execution time: 15_733_000 picoseconds. + Weight::from_parts(16_227_000, 3549) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -372,8 +370,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `276` // Estimated: `3549` - // Minimum execution time: 16_933_000 picoseconds. - Weight::from_parts(17_552_000, 3549) + // Minimum execution time: 12_042_000 picoseconds. + Weight::from_parts(12_690_000, 3549) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -385,8 +383,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `435` // Estimated: `3534` - // Minimum execution time: 22_652_000 picoseconds. - Weight::from_parts(23_655_000, 3534) + // Minimum execution time: 17_165_000 picoseconds. + Weight::from_parts(17_769_000, 3534) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -404,8 +402,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `539` // Estimated: `3944` - // Minimum execution time: 56_832_000 picoseconds. - Weight::from_parts(58_480_000, 3944) + // Minimum execution time: 48_862_000 picoseconds. + Weight::from_parts(50_584_000, 3944) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -417,8 +415,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `344` // Estimated: `3944` - // Minimum execution time: 30_136_000 picoseconds. - Weight::from_parts(30_919_000, 3944) + // Minimum execution time: 24_665_000 picoseconds. + Weight::from_parts(25_465_000, 3944) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -434,8 +432,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `983` // Estimated: `3944` - // Minimum execution time: 52_264_000 picoseconds. - Weight::from_parts(53_806_000, 3944) + // Minimum execution time: 44_617_000 picoseconds. + Weight::from_parts(46_458_000, 3944) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -447,8 +445,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `381` // Estimated: `4326` - // Minimum execution time: 20_476_000 picoseconds. - Weight::from_parts(21_213_000, 4326) + // Minimum execution time: 15_710_000 picoseconds. + Weight::from_parts(16_191_000, 4326) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -465,10 +463,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `831 + n * (398 ±0)` // Estimated: `4326 + n * (2954 ±0)` - // Minimum execution time: 30_667_000 picoseconds. - Weight::from_parts(31_079_000, 4326) - // Standard Error: 5_236 - .saturating_add(Weight::from_parts(7_517_246, 0).saturating_mul(n.into())) + // Minimum execution time: 24_447_000 picoseconds. + Weight::from_parts(25_144_000, 4326) + // Standard Error: 4_872 + .saturating_add(Weight::from_parts(6_523_101, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes(2_u64)) @@ -489,8 +487,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `539` // Estimated: `3812` - // Minimum execution time: 46_520_000 picoseconds. - Weight::from_parts(47_471_000, 3812) + // Minimum execution time: 39_990_000 picoseconds. + Weight::from_parts(41_098_000, 3812) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -506,8 +504,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `849` // Estimated: `3812` - // Minimum execution time: 44_199_000 picoseconds. - Weight::from_parts(45_621_000, 3812) + // Minimum execution time: 38_030_000 picoseconds. + Weight::from_parts(39_842_000, 3812) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -523,8 +521,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `398` // Estimated: `3759` - // Minimum execution time: 41_260_000 picoseconds. - Weight::from_parts(42_420_000, 3759) + // Minimum execution time: 36_778_000 picoseconds. + Weight::from_parts(38_088_000, 3759) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -540,8 +538,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `716` // Estimated: `3759` - // Minimum execution time: 40_975_000 picoseconds. - Weight::from_parts(42_367_000, 3759) + // Minimum execution time: 36_887_000 picoseconds. + Weight::from_parts(38_406_000, 3759) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -553,8 +551,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `410` // Estimated: `4326` - // Minimum execution time: 23_150_000 picoseconds. - Weight::from_parts(24_089_000, 4326) + // Minimum execution time: 18_734_000 picoseconds. + Weight::from_parts(19_267_000, 4326) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -564,8 +562,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `418` // Estimated: `4326` - // Minimum execution time: 20_362_000 picoseconds. - Weight::from_parts(21_102_000, 4326) + // Minimum execution time: 16_080_000 picoseconds. + Weight::from_parts(16_603_000, 4326) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -575,8 +573,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `418` // Estimated: `4326` - // Minimum execution time: 19_564_000 picoseconds. - Weight::from_parts(20_094_000, 4326) + // Minimum execution time: 15_013_000 picoseconds. + Weight::from_parts(15_607_000, 4326) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -586,8 +584,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `76` // Estimated: `3517` - // Minimum execution time: 13_360_000 picoseconds. - Weight::from_parts(13_943_000, 3517) + // Minimum execution time: 13_077_000 picoseconds. + Weight::from_parts(13_635_000, 3517) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -599,8 +597,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `340` // Estimated: `3549` - // Minimum execution time: 21_304_000 picoseconds. - Weight::from_parts(22_021_000, 3549) + // Minimum execution time: 17_146_000 picoseconds. + Weight::from_parts(17_453_000, 3549) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -612,8 +610,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `323` // Estimated: `3538` - // Minimum execution time: 20_888_000 picoseconds. - Weight::from_parts(21_600_000, 3538) + // Minimum execution time: 16_102_000 picoseconds. + Weight::from_parts(16_629_000, 3538) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -629,8 +627,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `518` // Estimated: `4326` - // Minimum execution time: 27_414_000 picoseconds. - Weight::from_parts(28_382_000, 4326) + // Minimum execution time: 22_118_000 picoseconds. + Weight::from_parts(22_849_000, 4326) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -654,8 +652,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `705` // Estimated: `4326` - // Minimum execution time: 55_660_000 picoseconds. - Weight::from_parts(57_720_000, 4326) + // Minimum execution time: 50_369_000 picoseconds. + Weight::from_parts(51_816_000, 4326) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -664,10 +662,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_064_000 picoseconds. - Weight::from_parts(3_432_697, 0) - // Standard Error: 6_920 - .saturating_add(Weight::from_parts(1_771_459, 0).saturating_mul(n.into())) + // Minimum execution time: 2_203_000 picoseconds. + Weight::from_parts(3_710_869, 0) + // Standard Error: 8_094 + .saturating_add(Weight::from_parts(2_201_869, 0).saturating_mul(n.into())) } /// Storage: `Nfts::Item` (r:2 w:0) /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) @@ -677,8 +675,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `494` // Estimated: `7662` - // Minimum execution time: 24_590_000 picoseconds. - Weight::from_parts(25_395_000, 7662) + // Minimum execution time: 18_893_000 picoseconds. + Weight::from_parts(19_506_000, 7662) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -690,8 +688,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `513` // Estimated: `4326` - // Minimum execution time: 22_121_000 picoseconds. - Weight::from_parts(23_196_000, 4326) + // Minimum execution time: 19_086_000 picoseconds. + Weight::from_parts(19_609_000, 4326) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -715,8 +713,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `834` // Estimated: `7662` - // Minimum execution time: 85_761_000 picoseconds. - Weight::from_parts(88_382_000, 7662) + // Minimum execution time: 84_103_000 picoseconds. + Weight::from_parts(85_325_000, 7662) .saturating_add(T::DbWeight::get().reads(9_u64)) .saturating_add(T::DbWeight::get().writes(10_u64)) } @@ -743,10 +741,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `629` // Estimated: `6078 + n * (2954 ±0)` - // Minimum execution time: 136_928_000 picoseconds. - Weight::from_parts(143_507_020, 6078) - // Standard Error: 45_424 - .saturating_add(Weight::from_parts(32_942_641, 0).saturating_mul(n.into())) + // Minimum execution time: 128_363_000 picoseconds. + Weight::from_parts(139_474_918, 6078) + // Standard Error: 79_252 + .saturating_add(Weight::from_parts(31_384_027, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes(6_u64)) @@ -770,10 +768,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `659` // Estimated: `4326 + n * (2954 ±0)` - // Minimum execution time: 72_412_000 picoseconds. - Weight::from_parts(84_724_399, 4326) - // Standard Error: 68_965 - .saturating_add(Weight::from_parts(31_711_702, 0).saturating_mul(n.into())) + // Minimum execution time: 66_688_000 picoseconds. + Weight::from_parts(79_208_379, 4326) + // Standard Error: 74_020 + .saturating_add(Weight::from_parts(31_028_221, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes(2_u64)) @@ -798,8 +796,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `216` // Estimated: `3549` - // Minimum execution time: 39_795_000 picoseconds. - Weight::from_parts(40_954_000, 3549) + // Minimum execution time: 34_863_000 picoseconds. + Weight::from_parts(36_679_000, 3549) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -817,8 +815,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `76` // Estimated: `3549` - // Minimum execution time: 19_590_000 picoseconds. - Weight::from_parts(20_452_000, 3549) + // Minimum execution time: 19_631_000 picoseconds. + Weight::from_parts(20_384_000, 3549) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -841,16 +839,14 @@ impl WeightInfo for () { /// The range of component `m` is `[0, 1000]`. /// The range of component `c` is `[0, 1000]`. /// The range of component `a` is `[0, 1000]`. - fn destroy(m: u32, _c: u32, a: u32, ) -> Weight { + fn destroy(_m: u32, _c: u32, a: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `32204 + a * (366 ±0)` // Estimated: `2523990 + a * (2954 ±0)` - // Minimum execution time: 1_283_452_000 picoseconds. - Weight::from_parts(1_066_445_083, 2523990) - // Standard Error: 9_120 - .saturating_add(Weight::from_parts(195_960, 0).saturating_mul(m.into())) - // Standard Error: 9_120 - .saturating_add(Weight::from_parts(7_706_045, 0).saturating_mul(a.into())) + // Minimum execution time: 1_282_083_000 picoseconds. + Weight::from_parts(1_249_191_963, 2523990) + // Standard Error: 4_719 + .saturating_add(Weight::from_parts(6_470_227, 0).saturating_mul(a.into())) .saturating_add(RocksDbWeight::get().reads(1004_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(a.into()))) .saturating_add(RocksDbWeight::get().writes(1005_u64)) @@ -873,8 +869,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `455` // Estimated: `4326` - // Minimum execution time: 55_122_000 picoseconds. - Weight::from_parts(56_437_000, 4326) + // Minimum execution time: 49_055_000 picoseconds. + Weight::from_parts(50_592_000, 4326) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -894,8 +890,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `455` // Estimated: `4326` - // Minimum execution time: 53_137_000 picoseconds. - Weight::from_parts(54_307_000, 4326) + // Minimum execution time: 47_102_000 picoseconds. + Weight::from_parts(48_772_000, 4326) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -921,8 +917,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `564` // Estimated: `4326` - // Minimum execution time: 59_107_000 picoseconds. - Weight::from_parts(60_638_000, 4326) + // Minimum execution time: 52_968_000 picoseconds. + Weight::from_parts(55_136_000, 4326) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } @@ -946,8 +942,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `593` // Estimated: `4326` - // Minimum execution time: 47_355_000 picoseconds. - Weight::from_parts(48_729_000, 4326) + // Minimum execution time: 41_140_000 picoseconds. + Weight::from_parts(43_288_000, 4326) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -962,10 +958,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `763 + i * (108 ±0)` // Estimated: `3549 + i * (3336 ±0)` - // Minimum execution time: 19_597_000 picoseconds. - Weight::from_parts(19_920_000, 3549) - // Standard Error: 25_051 - .saturating_add(Weight::from_parts(18_457_577, 0).saturating_mul(i.into())) + // Minimum execution time: 14_433_000 picoseconds. + Weight::from_parts(14_664_000, 3549) + // Standard Error: 23_078 + .saturating_add(Weight::from_parts(15_911_377, 0).saturating_mul(i.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(i.into()))) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(i.into()))) @@ -979,8 +975,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `435` // Estimated: `3534` - // Minimum execution time: 23_838_000 picoseconds. - Weight::from_parts(24_765_000, 3534) + // Minimum execution time: 18_307_000 picoseconds. + Weight::from_parts(18_966_000, 3534) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -992,8 +988,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `435` // Estimated: `3534` - // Minimum execution time: 24_030_000 picoseconds. - Weight::from_parts(24_589_000, 3534) + // Minimum execution time: 18_078_000 picoseconds. + Weight::from_parts(18_593_000, 3534) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1005,8 +1001,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `340` // Estimated: `3549` - // Minimum execution time: 20_505_000 picoseconds. - Weight::from_parts(20_809_000, 3549) + // Minimum execution time: 15_175_000 picoseconds. + Weight::from_parts(15_762_000, 3549) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1022,8 +1018,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `562` // Estimated: `3593` - // Minimum execution time: 32_314_000 picoseconds. - Weight::from_parts(33_213_000, 3593) + // Minimum execution time: 26_164_000 picoseconds. + Weight::from_parts(27_117_000, 3593) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -1035,8 +1031,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `369` // Estimated: `6078` - // Minimum execution time: 44_563_000 picoseconds. - Weight::from_parts(45_899_000, 6078) + // Minimum execution time: 38_523_000 picoseconds. + Weight::from_parts(39_486_000, 6078) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -1048,8 +1044,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `311` // Estimated: `3549` - // Minimum execution time: 20_515_000 picoseconds. - Weight::from_parts(21_125_000, 3549) + // Minimum execution time: 15_733_000 picoseconds. + Weight::from_parts(16_227_000, 3549) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -1061,8 +1057,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `276` // Estimated: `3549` - // Minimum execution time: 16_933_000 picoseconds. - Weight::from_parts(17_552_000, 3549) + // Minimum execution time: 12_042_000 picoseconds. + Weight::from_parts(12_690_000, 3549) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1074,8 +1070,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `435` // Estimated: `3534` - // Minimum execution time: 22_652_000 picoseconds. - Weight::from_parts(23_655_000, 3534) + // Minimum execution time: 17_165_000 picoseconds. + Weight::from_parts(17_769_000, 3534) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1093,8 +1089,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `539` // Estimated: `3944` - // Minimum execution time: 56_832_000 picoseconds. - Weight::from_parts(58_480_000, 3944) + // Minimum execution time: 48_862_000 picoseconds. + Weight::from_parts(50_584_000, 3944) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -1106,8 +1102,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `344` // Estimated: `3944` - // Minimum execution time: 30_136_000 picoseconds. - Weight::from_parts(30_919_000, 3944) + // Minimum execution time: 24_665_000 picoseconds. + Weight::from_parts(25_465_000, 3944) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -1123,8 +1119,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `983` // Estimated: `3944` - // Minimum execution time: 52_264_000 picoseconds. - Weight::from_parts(53_806_000, 3944) + // Minimum execution time: 44_617_000 picoseconds. + Weight::from_parts(46_458_000, 3944) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -1136,8 +1132,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `381` // Estimated: `4326` - // Minimum execution time: 20_476_000 picoseconds. - Weight::from_parts(21_213_000, 4326) + // Minimum execution time: 15_710_000 picoseconds. + Weight::from_parts(16_191_000, 4326) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1154,10 +1150,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `831 + n * (398 ±0)` // Estimated: `4326 + n * (2954 ±0)` - // Minimum execution time: 30_667_000 picoseconds. - Weight::from_parts(31_079_000, 4326) - // Standard Error: 5_236 - .saturating_add(Weight::from_parts(7_517_246, 0).saturating_mul(n.into())) + // Minimum execution time: 24_447_000 picoseconds. + Weight::from_parts(25_144_000, 4326) + // Standard Error: 4_872 + .saturating_add(Weight::from_parts(6_523_101, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes(2_u64)) @@ -1178,8 +1174,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `539` // Estimated: `3812` - // Minimum execution time: 46_520_000 picoseconds. - Weight::from_parts(47_471_000, 3812) + // Minimum execution time: 39_990_000 picoseconds. + Weight::from_parts(41_098_000, 3812) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -1195,8 +1191,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `849` // Estimated: `3812` - // Minimum execution time: 44_199_000 picoseconds. - Weight::from_parts(45_621_000, 3812) + // Minimum execution time: 38_030_000 picoseconds. + Weight::from_parts(39_842_000, 3812) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -1212,8 +1208,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `398` // Estimated: `3759` - // Minimum execution time: 41_260_000 picoseconds. - Weight::from_parts(42_420_000, 3759) + // Minimum execution time: 36_778_000 picoseconds. + Weight::from_parts(38_088_000, 3759) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -1229,8 +1225,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `716` // Estimated: `3759` - // Minimum execution time: 40_975_000 picoseconds. - Weight::from_parts(42_367_000, 3759) + // Minimum execution time: 36_887_000 picoseconds. + Weight::from_parts(38_406_000, 3759) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -1242,8 +1238,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `410` // Estimated: `4326` - // Minimum execution time: 23_150_000 picoseconds. - Weight::from_parts(24_089_000, 4326) + // Minimum execution time: 18_734_000 picoseconds. + Weight::from_parts(19_267_000, 4326) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1253,8 +1249,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `418` // Estimated: `4326` - // Minimum execution time: 20_362_000 picoseconds. - Weight::from_parts(21_102_000, 4326) + // Minimum execution time: 16_080_000 picoseconds. + Weight::from_parts(16_603_000, 4326) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1264,8 +1260,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `418` // Estimated: `4326` - // Minimum execution time: 19_564_000 picoseconds. - Weight::from_parts(20_094_000, 4326) + // Minimum execution time: 15_013_000 picoseconds. + Weight::from_parts(15_607_000, 4326) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1275,8 +1271,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `76` // Estimated: `3517` - // Minimum execution time: 13_360_000 picoseconds. - Weight::from_parts(13_943_000, 3517) + // Minimum execution time: 13_077_000 picoseconds. + Weight::from_parts(13_635_000, 3517) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1288,8 +1284,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `340` // Estimated: `3549` - // Minimum execution time: 21_304_000 picoseconds. - Weight::from_parts(22_021_000, 3549) + // Minimum execution time: 17_146_000 picoseconds. + Weight::from_parts(17_453_000, 3549) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1301,8 +1297,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `323` // Estimated: `3538` - // Minimum execution time: 20_888_000 picoseconds. - Weight::from_parts(21_600_000, 3538) + // Minimum execution time: 16_102_000 picoseconds. + Weight::from_parts(16_629_000, 3538) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1318,8 +1314,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `518` // Estimated: `4326` - // Minimum execution time: 27_414_000 picoseconds. - Weight::from_parts(28_382_000, 4326) + // Minimum execution time: 22_118_000 picoseconds. + Weight::from_parts(22_849_000, 4326) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1343,8 +1339,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `705` // Estimated: `4326` - // Minimum execution time: 55_660_000 picoseconds. - Weight::from_parts(57_720_000, 4326) + // Minimum execution time: 50_369_000 picoseconds. + Weight::from_parts(51_816_000, 4326) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -1353,10 +1349,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_064_000 picoseconds. - Weight::from_parts(3_432_697, 0) - // Standard Error: 6_920 - .saturating_add(Weight::from_parts(1_771_459, 0).saturating_mul(n.into())) + // Minimum execution time: 2_203_000 picoseconds. + Weight::from_parts(3_710_869, 0) + // Standard Error: 8_094 + .saturating_add(Weight::from_parts(2_201_869, 0).saturating_mul(n.into())) } /// Storage: `Nfts::Item` (r:2 w:0) /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) @@ -1366,8 +1362,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `494` // Estimated: `7662` - // Minimum execution time: 24_590_000 picoseconds. - Weight::from_parts(25_395_000, 7662) + // Minimum execution time: 18_893_000 picoseconds. + Weight::from_parts(19_506_000, 7662) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1379,8 +1375,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `513` // Estimated: `4326` - // Minimum execution time: 22_121_000 picoseconds. - Weight::from_parts(23_196_000, 4326) + // Minimum execution time: 19_086_000 picoseconds. + Weight::from_parts(19_609_000, 4326) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1404,8 +1400,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `834` // Estimated: `7662` - // Minimum execution time: 85_761_000 picoseconds. - Weight::from_parts(88_382_000, 7662) + // Minimum execution time: 84_103_000 picoseconds. + Weight::from_parts(85_325_000, 7662) .saturating_add(RocksDbWeight::get().reads(9_u64)) .saturating_add(RocksDbWeight::get().writes(10_u64)) } @@ -1432,10 +1428,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `629` // Estimated: `6078 + n * (2954 ±0)` - // Minimum execution time: 136_928_000 picoseconds. - Weight::from_parts(143_507_020, 6078) - // Standard Error: 45_424 - .saturating_add(Weight::from_parts(32_942_641, 0).saturating_mul(n.into())) + // Minimum execution time: 128_363_000 picoseconds. + Weight::from_parts(139_474_918, 6078) + // Standard Error: 79_252 + .saturating_add(Weight::from_parts(31_384_027, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes(6_u64)) @@ -1459,10 +1455,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `659` // Estimated: `4326 + n * (2954 ±0)` - // Minimum execution time: 72_412_000 picoseconds. - Weight::from_parts(84_724_399, 4326) - // Standard Error: 68_965 - .saturating_add(Weight::from_parts(31_711_702, 0).saturating_mul(n.into())) + // Minimum execution time: 66_688_000 picoseconds. + Weight::from_parts(79_208_379, 4326) + // Standard Error: 74_020 + .saturating_add(Weight::from_parts(31_028_221, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes(2_u64)) diff --git a/substrate/frame/nis/Cargo.toml b/substrate/frame/nis/Cargo.toml index ec1a5d93bcba..78e086d0ed12 100644 --- a/substrate/frame/nis/Cargo.toml +++ b/substrate/frame/nis/Cargo.toml @@ -17,10 +17,10 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } -scale-info = { features = ["derive"], workspace = true } sp-arithmetic = { workspace = true } sp-core = { workspace = true } sp-runtime = { workspace = true } diff --git a/substrate/frame/nis/src/weights.rs b/substrate/frame/nis/src/weights.rs index 4f476fd22c21..a2411c1e39a6 100644 --- a/substrate/frame/nis/src/weights.rs +++ b/substrate/frame/nis/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_nis` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-04-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -70,7 +70,7 @@ impl WeightInfo for SubstrateWeight { /// Storage: `Nis::Queues` (r:1 w:1) /// Proof: `Nis::Queues` (`max_values`: None, `max_size`: Some(48022), added: 50497, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) /// Storage: `Nis::QueueTotals` (r:1 w:1) /// Proof: `Nis::QueueTotals` (`max_values`: Some(1), `max_size`: Some(6002), added: 6497, mode: `MaxEncodedLen`) /// The range of component `l` is `[0, 999]`. @@ -78,32 +78,32 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `6210 + l * (48 ±0)` // Estimated: `51487` - // Minimum execution time: 47_511_000 picoseconds. - Weight::from_parts(49_908_184, 51487) - // Standard Error: 1_434 - .saturating_add(Weight::from_parts(104_320, 0).saturating_mul(l.into())) + // Minimum execution time: 47_065_000 picoseconds. + Weight::from_parts(52_894_557, 51487) + // Standard Error: 275 + .saturating_add(Weight::from_parts(48_441, 0).saturating_mul(l.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: `Nis::Queues` (r:1 w:1) /// Proof: `Nis::Queues` (`max_values`: None, `max_size`: Some(48022), added: 50497, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) /// Storage: `Nis::QueueTotals` (r:1 w:1) /// Proof: `Nis::QueueTotals` (`max_values`: Some(1), `max_size`: Some(6002), added: 6497, mode: `MaxEncodedLen`) fn place_bid_max() -> Weight { // Proof Size summary in bytes: // Measured: `54212` // Estimated: `51487` - // Minimum execution time: 163_636_000 picoseconds. - Weight::from_parts(172_874_000, 51487) + // Minimum execution time: 111_930_000 picoseconds. + Weight::from_parts(114_966_000, 51487) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: `Nis::Queues` (r:1 w:1) /// Proof: `Nis::Queues` (`max_values`: None, `max_size`: Some(48022), added: 50497, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) /// Storage: `Nis::QueueTotals` (r:1 w:1) /// Proof: `Nis::QueueTotals` (`max_values`: Some(1), `max_size`: Some(6002), added: 6497, mode: `MaxEncodedLen`) /// The range of component `l` is `[1, 1000]`. @@ -111,10 +111,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `6210 + l * (48 ±0)` // Estimated: `51487` - // Minimum execution time: 52_140_000 picoseconds. - Weight::from_parts(46_062_457, 51487) - // Standard Error: 1_320 - .saturating_add(Weight::from_parts(91_098, 0).saturating_mul(l.into())) + // Minimum execution time: 47_726_000 picoseconds. + Weight::from_parts(48_162_043, 51487) + // Standard Error: 187 + .saturating_add(Weight::from_parts(38_372, 0).saturating_mul(l.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -126,15 +126,15 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `225` // Estimated: `3593` - // Minimum execution time: 35_741_000 picoseconds. - Weight::from_parts(36_659_000, 3593) + // Minimum execution time: 31_194_000 picoseconds. + Weight::from_parts(32_922_000, 3593) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Nis::Receipts` (r:1 w:1) /// Proof: `Nis::Receipts` (`max_values`: None, `max_size`: Some(81), added: 2556, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Nis::Summary` (r:1 w:1) @@ -146,9 +146,9 @@ impl WeightInfo for SubstrateWeight { fn communify() -> Weight { // Proof Size summary in bytes: // Measured: `702` - // Estimated: `3820` - // Minimum execution time: 78_797_000 picoseconds. - Weight::from_parts(81_863_000, 3820) + // Estimated: `3675` + // Minimum execution time: 73_288_000 picoseconds. + Weight::from_parts(76_192_000, 3675) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -163,13 +163,13 @@ impl WeightInfo for SubstrateWeight { /// Storage: `Assets::Account` (r:1 w:1) /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) fn privatize() -> Weight { // Proof Size summary in bytes: // Measured: `863` - // Estimated: `3820` - // Minimum execution time: 100_374_000 picoseconds. - Weight::from_parts(103_660_000, 3820) + // Estimated: `3675` + // Minimum execution time: 94_307_000 picoseconds. + Weight::from_parts(96_561_000, 3675) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -180,13 +180,13 @@ impl WeightInfo for SubstrateWeight { /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) fn thaw_private() -> Weight { // Proof Size summary in bytes: // Measured: `388` - // Estimated: `3820` - // Minimum execution time: 58_624_000 picoseconds. - Weight::from_parts(60_177_000, 3820) + // Estimated: `3658` + // Minimum execution time: 49_873_000 picoseconds. + Weight::from_parts(51_361_000, 3658) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -204,8 +204,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `807` // Estimated: `3675` - // Minimum execution time: 98_193_000 picoseconds. - Weight::from_parts(101_255_000, 3675) + // Minimum execution time: 96_884_000 picoseconds. + Weight::from_parts(98_867_000, 3675) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -219,8 +219,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `6658` // Estimated: `7487` - // Minimum execution time: 29_640_000 picoseconds. - Weight::from_parts(31_768_000, 7487) + // Minimum execution time: 21_019_000 picoseconds. + Weight::from_parts(22_057_000, 7487) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -230,8 +230,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `76` // Estimated: `51487` - // Minimum execution time: 5_273_000 picoseconds. - Weight::from_parts(5_461_000, 51487) + // Minimum execution time: 4_746_000 picoseconds. + Weight::from_parts(4_953_000, 51487) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -241,8 +241,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_553_000 picoseconds. - Weight::from_parts(4_726_000, 0) + // Minimum execution time: 4_836_000 picoseconds. + Weight::from_parts(5_093_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } } @@ -252,7 +252,7 @@ impl WeightInfo for () { /// Storage: `Nis::Queues` (r:1 w:1) /// Proof: `Nis::Queues` (`max_values`: None, `max_size`: Some(48022), added: 50497, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) /// Storage: `Nis::QueueTotals` (r:1 w:1) /// Proof: `Nis::QueueTotals` (`max_values`: Some(1), `max_size`: Some(6002), added: 6497, mode: `MaxEncodedLen`) /// The range of component `l` is `[0, 999]`. @@ -260,32 +260,32 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `6210 + l * (48 ±0)` // Estimated: `51487` - // Minimum execution time: 47_511_000 picoseconds. - Weight::from_parts(49_908_184, 51487) - // Standard Error: 1_434 - .saturating_add(Weight::from_parts(104_320, 0).saturating_mul(l.into())) + // Minimum execution time: 47_065_000 picoseconds. + Weight::from_parts(52_894_557, 51487) + // Standard Error: 275 + .saturating_add(Weight::from_parts(48_441, 0).saturating_mul(l.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: `Nis::Queues` (r:1 w:1) /// Proof: `Nis::Queues` (`max_values`: None, `max_size`: Some(48022), added: 50497, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) /// Storage: `Nis::QueueTotals` (r:1 w:1) /// Proof: `Nis::QueueTotals` (`max_values`: Some(1), `max_size`: Some(6002), added: 6497, mode: `MaxEncodedLen`) fn place_bid_max() -> Weight { // Proof Size summary in bytes: // Measured: `54212` // Estimated: `51487` - // Minimum execution time: 163_636_000 picoseconds. - Weight::from_parts(172_874_000, 51487) + // Minimum execution time: 111_930_000 picoseconds. + Weight::from_parts(114_966_000, 51487) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: `Nis::Queues` (r:1 w:1) /// Proof: `Nis::Queues` (`max_values`: None, `max_size`: Some(48022), added: 50497, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) /// Storage: `Nis::QueueTotals` (r:1 w:1) /// Proof: `Nis::QueueTotals` (`max_values`: Some(1), `max_size`: Some(6002), added: 6497, mode: `MaxEncodedLen`) /// The range of component `l` is `[1, 1000]`. @@ -293,10 +293,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `6210 + l * (48 ±0)` // Estimated: `51487` - // Minimum execution time: 52_140_000 picoseconds. - Weight::from_parts(46_062_457, 51487) - // Standard Error: 1_320 - .saturating_add(Weight::from_parts(91_098, 0).saturating_mul(l.into())) + // Minimum execution time: 47_726_000 picoseconds. + Weight::from_parts(48_162_043, 51487) + // Standard Error: 187 + .saturating_add(Weight::from_parts(38_372, 0).saturating_mul(l.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -308,15 +308,15 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `225` // Estimated: `3593` - // Minimum execution time: 35_741_000 picoseconds. - Weight::from_parts(36_659_000, 3593) + // Minimum execution time: 31_194_000 picoseconds. + Weight::from_parts(32_922_000, 3593) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Nis::Receipts` (r:1 w:1) /// Proof: `Nis::Receipts` (`max_values`: None, `max_size`: Some(81), added: 2556, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Nis::Summary` (r:1 w:1) @@ -328,9 +328,9 @@ impl WeightInfo for () { fn communify() -> Weight { // Proof Size summary in bytes: // Measured: `702` - // Estimated: `3820` - // Minimum execution time: 78_797_000 picoseconds. - Weight::from_parts(81_863_000, 3820) + // Estimated: `3675` + // Minimum execution time: 73_288_000 picoseconds. + Weight::from_parts(76_192_000, 3675) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -345,13 +345,13 @@ impl WeightInfo for () { /// Storage: `Assets::Account` (r:1 w:1) /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) fn privatize() -> Weight { // Proof Size summary in bytes: // Measured: `863` - // Estimated: `3820` - // Minimum execution time: 100_374_000 picoseconds. - Weight::from_parts(103_660_000, 3820) + // Estimated: `3675` + // Minimum execution time: 94_307_000 picoseconds. + Weight::from_parts(96_561_000, 3675) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -362,13 +362,13 @@ impl WeightInfo for () { /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) fn thaw_private() -> Weight { // Proof Size summary in bytes: // Measured: `388` - // Estimated: `3820` - // Minimum execution time: 58_624_000 picoseconds. - Weight::from_parts(60_177_000, 3820) + // Estimated: `3658` + // Minimum execution time: 49_873_000 picoseconds. + Weight::from_parts(51_361_000, 3658) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -386,8 +386,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `807` // Estimated: `3675` - // Minimum execution time: 98_193_000 picoseconds. - Weight::from_parts(101_255_000, 3675) + // Minimum execution time: 96_884_000 picoseconds. + Weight::from_parts(98_867_000, 3675) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -401,8 +401,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `6658` // Estimated: `7487` - // Minimum execution time: 29_640_000 picoseconds. - Weight::from_parts(31_768_000, 7487) + // Minimum execution time: 21_019_000 picoseconds. + Weight::from_parts(22_057_000, 7487) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -412,8 +412,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `76` // Estimated: `51487` - // Minimum execution time: 5_273_000 picoseconds. - Weight::from_parts(5_461_000, 51487) + // Minimum execution time: 4_746_000 picoseconds. + Weight::from_parts(4_953_000, 51487) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -423,8 +423,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_553_000 picoseconds. - Weight::from_parts(4_726_000, 0) + // Minimum execution time: 4_836_000 picoseconds. + Weight::from_parts(5_093_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } } diff --git a/substrate/frame/node-authorization/Cargo.toml b/substrate/frame/node-authorization/Cargo.toml index 174736493934..82aecc21d0b5 100644 --- a/substrate/frame/node-authorization/Cargo.toml +++ b/substrate/frame/node-authorization/Cargo.toml @@ -16,10 +16,10 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { features = ["derive"], workspace = true } -frame-support = { workspace = true } -frame-system = { workspace = true } log = { workspace = true } scale-info = { features = ["derive"], workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } sp-core = { workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } diff --git a/substrate/frame/nomination-pools/Cargo.toml b/substrate/frame/nomination-pools/Cargo.toml index a5e8da17eb23..aa90e4d81339 100644 --- a/substrate/frame/nomination-pools/Cargo.toml +++ b/substrate/frame/nomination-pools/Cargo.toml @@ -26,11 +26,11 @@ scale-info = { features = [ # FRAME frame-support = { workspace = true } frame-system = { workspace = true } -log = { workspace = true } -sp-core = { workspace = true } -sp-io = { workspace = true } sp-runtime = { workspace = true } sp-staking = { workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } +log = { workspace = true } # Optional: use for testing and/or fuzzing pallet-balances = { optional = true, workspace = true } diff --git a/substrate/frame/nomination-pools/benchmarking/Cargo.toml b/substrate/frame/nomination-pools/benchmarking/Cargo.toml index 0b3ac228e86f..7dd826a91224 100644 --- a/substrate/frame/nomination-pools/benchmarking/Cargo.toml +++ b/substrate/frame/nomination-pools/benchmarking/Cargo.toml @@ -26,9 +26,9 @@ frame-election-provider-support = { workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } pallet-bags-list = { workspace = true } +pallet-staking = { workspace = true } pallet-delegated-staking = { workspace = true } pallet-nomination-pools = { workspace = true } -pallet-staking = { workspace = true } # Substrate Primitives sp-runtime = { workspace = true } @@ -37,8 +37,8 @@ sp-staking = { workspace = true } [dev-dependencies] pallet-balances = { workspace = true } -pallet-staking-reward-curve = { workspace = true, default-features = true } pallet-timestamp = { workspace = true, default-features = true } +pallet-staking-reward-curve = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } sp-io = { workspace = true, default-features = true } diff --git a/substrate/frame/nomination-pools/benchmarking/src/inner.rs b/substrate/frame/nomination-pools/benchmarking/src/inner.rs index 7ddb78cca3f9..b0c8f3655a50 100644 --- a/substrate/frame/nomination-pools/benchmarking/src/inner.rs +++ b/substrate/frame/nomination-pools/benchmarking/src/inner.rs @@ -18,7 +18,7 @@ //! Benchmarks for the nomination pools coupled with the staking and bags list pallets. use alloc::{vec, vec::Vec}; -use frame_benchmarking::v2::*; +use frame_benchmarking::v1::{account, whitelist_account}; use frame_election_provider_support::SortedListProvider; use frame_support::{ assert_ok, ensure, @@ -270,21 +270,19 @@ impl ListScenario { } } -#[benchmarks( - where - T: pallet_staking::Config, - pallet_staking::BalanceOf: From, - BalanceOf: Into, -)] -mod benchmarks { - use super::*; - - #[benchmark] - fn join() { +frame_benchmarking::benchmarks! { + where_clause { + where + T: pallet_staking::Config, + pallet_staking::BalanceOf: From, + BalanceOf: Into, + } + + join { let origin_weight = Pools::::depositor_min_bond() * 2u32.into(); // setup the worst case list scenario. - let scenario = ListScenario::::new(origin_weight, true).unwrap(); + let scenario = ListScenario::::new(origin_weight, true)?; assert_eq!( T::StakeAdapter::active_stake(Pool::from(scenario.origin1.clone())), origin_weight @@ -293,13 +291,12 @@ mod benchmarks { let max_additional = scenario.dest_weight - origin_weight; let joiner_free = CurrencyOf::::minimum_balance() + max_additional; - let joiner: T::AccountId = create_funded_user_with_balance::("joiner", 0, joiner_free); + let joiner: T::AccountId + = create_funded_user_with_balance::("joiner", 0, joiner_free); whitelist_account!(joiner); - - #[extrinsic_call] - _(RuntimeOrigin::Signed(joiner.clone()), max_additional, 1); - + }: _(RuntimeOrigin::Signed(joiner.clone()), max_additional, 1) + verify { assert_eq!(CurrencyOf::::balance(&joiner), joiner_free - max_additional); assert_eq!( T::StakeAdapter::active_stake(Pool::from(scenario.origin1)), @@ -307,64 +304,51 @@ mod benchmarks { ); } - #[benchmark] - fn bond_extra_transfer() { + bond_extra_transfer { let origin_weight = Pools::::depositor_min_bond() * 2u32.into(); - let scenario = ListScenario::::new(origin_weight, true).unwrap(); + let scenario = ListScenario::::new(origin_weight, true)?; let extra = scenario.dest_weight - origin_weight; // creator of the src pool will bond-extra, bumping itself to dest bag. - #[extrinsic_call] - bond_extra(RuntimeOrigin::Signed(scenario.creator1.clone()), BondExtra::FreeBalance(extra)); - + }: bond_extra(RuntimeOrigin::Signed(scenario.creator1.clone()), BondExtra::FreeBalance(extra)) + verify { assert!( - T::StakeAdapter::active_stake(Pool::from(scenario.origin1)) >= scenario.dest_weight + T::StakeAdapter::active_stake(Pool::from(scenario.origin1)) >= + scenario.dest_weight ); } - #[benchmark] - fn bond_extra_other() { + bond_extra_other { let claimer: T::AccountId = account("claimer", USER_SEED + 4, 0); let origin_weight = Pools::::depositor_min_bond() * 2u32.into(); - let scenario = ListScenario::::new(origin_weight, true).unwrap(); + let scenario = ListScenario::::new(origin_weight, true)?; let extra = (scenario.dest_weight - origin_weight).max(CurrencyOf::::minimum_balance()); - // set claim preferences to `PermissionlessAll` to any account to bond extra on member's - // behalf. - let _ = Pools::::set_claim_permission( - RuntimeOrigin::Signed(scenario.creator1.clone()).into(), - ClaimPermission::PermissionlessAll, - ); + // set claim preferences to `PermissionlessAll` to any account to bond extra on member's behalf. + let _ = Pools::::set_claim_permission(RuntimeOrigin::Signed(scenario.creator1.clone()).into(), ClaimPermission::PermissionlessAll); // transfer exactly `extra` to the depositor of the src pool (1), let reward_account1 = Pools::::generate_reward_account(1); assert!(extra >= CurrencyOf::::minimum_balance()); let _ = CurrencyOf::::mint_into(&reward_account1, extra); - #[extrinsic_call] - _( - RuntimeOrigin::Signed(claimer), - T::Lookup::unlookup(scenario.creator1.clone()), - BondExtra::Rewards, - ); - - // commission of 50% deducted here. + }: _(RuntimeOrigin::Signed(claimer), T::Lookup::unlookup(scenario.creator1.clone()), BondExtra::Rewards) + verify { + // commission of 50% deducted here. assert!( T::StakeAdapter::active_stake(Pool::from(scenario.origin1)) >= - scenario.dest_weight / 2u32.into() + scenario.dest_weight / 2u32.into() ); } - #[benchmark] - fn claim_payout() { + claim_payout { let claimer: T::AccountId = account("claimer", USER_SEED + 4, 0); let commission = Perbill::from_percent(50); let origin_weight = Pools::::depositor_min_bond() * 2u32.into(); let ed = CurrencyOf::::minimum_balance(); - let (depositor, _pool_account) = - create_pool_account::(0, origin_weight, Some(commission)); + let (depositor, pool_account) = create_pool_account::(0, origin_weight, Some(commission)); let reward_account = Pools::::generate_reward_account(1); // Send funds to the reward account of the pool @@ -372,32 +356,33 @@ mod benchmarks { // set claim preferences to `PermissionlessAll` so any account can claim rewards on member's // behalf. - let _ = Pools::::set_claim_permission( - RuntimeOrigin::Signed(depositor.clone()).into(), - ClaimPermission::PermissionlessAll, - ); + let _ = Pools::::set_claim_permission(RuntimeOrigin::Signed(depositor.clone()).into(), ClaimPermission::PermissionlessAll); // Sanity check - assert_eq!(CurrencyOf::::balance(&depositor), origin_weight); + assert_eq!( + CurrencyOf::::balance(&depositor), + origin_weight + ); whitelist_account!(depositor); - - #[extrinsic_call] - claim_payout_other(RuntimeOrigin::Signed(claimer), depositor.clone()); - + }:claim_payout_other(RuntimeOrigin::Signed(claimer), depositor.clone()) + verify { assert_eq!( CurrencyOf::::balance(&depositor), origin_weight + commission * origin_weight ); - assert_eq!(CurrencyOf::::balance(&reward_account), ed + commission * origin_weight); + assert_eq!( + CurrencyOf::::balance(&reward_account), + ed + commission * origin_weight + ); } - #[benchmark] - fn unbond() { + + unbond { // The weight the nominator will start at. The value used here is expected to be // significantly higher than the first position in a list (e.g. the first bag threshold). let origin_weight = Pools::::depositor_min_bond() * 200u32.into(); - let scenario = ListScenario::::new(origin_weight, false).unwrap(); + let scenario = ListScenario::::new(origin_weight, false)?; let amount = origin_weight - scenario.dest_weight; let scenario = scenario.add_joiner(amount); @@ -405,30 +390,36 @@ mod benchmarks { let member_id_lookup = T::Lookup::unlookup(member_id.clone()); let all_points = PoolMembers::::get(&member_id).unwrap().points; whitelist_account!(member_id); - - #[extrinsic_call] - _(RuntimeOrigin::Signed(member_id.clone()), member_id_lookup, all_points); - + }: _(RuntimeOrigin::Signed(member_id.clone()), member_id_lookup, all_points) + verify { let bonded_after = T::StakeAdapter::active_stake(Pool::from(scenario.origin1)); // We at least went down to the destination bag assert!(bonded_after <= scenario.dest_weight); - let member = PoolMembers::::get(&member_id).unwrap(); + let member = PoolMembers::::get( + &member_id + ) + .unwrap(); assert_eq!( member.unbonding_eras.keys().cloned().collect::>(), vec![0 + T::StakeAdapter::bonding_duration()] ); - assert_eq!(member.unbonding_eras.values().cloned().collect::>(), vec![all_points]); + assert_eq!( + member.unbonding_eras.values().cloned().collect::>(), + vec![all_points] + ); } - #[benchmark] - fn pool_withdraw_unbonded(s: Linear<0, MAX_SPANS>) { + pool_withdraw_unbonded { + let s in 0 .. MAX_SPANS; + let min_create_bond = Pools::::depositor_min_bond(); - let (_depositor, pool_account) = create_pool_account::(0, min_create_bond, None); + let (depositor, pool_account) = create_pool_account::(0, min_create_bond, None); // Add a new member let min_join_bond = MinJoinBond::::get().max(CurrencyOf::::minimum_balance()); let joiner = create_funded_user_with_balance::("joiner", 0, min_join_bond * 2u32.into()); - Pools::::join(RuntimeOrigin::Signed(joiner.clone()).into(), min_join_bond, 1).unwrap(); + Pools::::join(RuntimeOrigin::Signed(joiner.clone()).into(), min_join_bond, 1) + .unwrap(); // Sanity check join worked assert_eq!( @@ -438,8 +429,7 @@ mod benchmarks { assert_eq!(CurrencyOf::::balance(&joiner), min_join_bond); // Unbond the new member - Pools::::fully_unbond(RuntimeOrigin::Signed(joiner.clone()).into(), joiner.clone()) - .unwrap(); + Pools::::fully_unbond(RuntimeOrigin::Signed(joiner.clone()).into(), joiner.clone()).unwrap(); // Sanity check that unbond worked assert_eq!( @@ -453,26 +443,26 @@ mod benchmarks { // Add `s` count of slashing spans to storage. pallet_staking::benchmarking::add_slashing_spans::(&pool_account, s); whitelist_account!(pool_account); - - #[extrinsic_call] - _(RuntimeOrigin::Signed(pool_account.clone()), 1, s); - + }: _(RuntimeOrigin::Signed(pool_account.clone()), 1, s) + verify { // The joiners funds didn't change assert_eq!(CurrencyOf::::balance(&joiner), min_join_bond); // The unlocking chunk was removed assert_eq!(pallet_staking::Ledger::::get(pool_account).unwrap().unlocking.len(), 0); } - #[benchmark] - fn withdraw_unbonded_update(s: Linear<0, MAX_SPANS>) { + withdraw_unbonded_update { + let s in 0 .. MAX_SPANS; + let min_create_bond = Pools::::depositor_min_bond(); - let (_depositor, pool_account) = create_pool_account::(0, min_create_bond, None); + let (depositor, pool_account) = create_pool_account::(0, min_create_bond, None); // Add a new member let min_join_bond = MinJoinBond::::get().max(CurrencyOf::::minimum_balance()); let joiner = create_funded_user_with_balance::("joiner", 0, min_join_bond * 2u32.into()); let joiner_lookup = T::Lookup::unlookup(joiner.clone()); - Pools::::join(RuntimeOrigin::Signed(joiner.clone()).into(), min_join_bond, 1).unwrap(); + Pools::::join(RuntimeOrigin::Signed(joiner.clone()).into(), min_join_bond, 1) + .unwrap(); // Sanity check join worked assert_eq!( @@ -483,8 +473,7 @@ mod benchmarks { // Unbond the new member pallet_staking::CurrentEra::::put(0); - Pools::::fully_unbond(RuntimeOrigin::Signed(joiner.clone()).into(), joiner.clone()) - .unwrap(); + Pools::::fully_unbond(RuntimeOrigin::Signed(joiner.clone()).into(), joiner.clone()).unwrap(); // Sanity check that unbond worked assert_eq!( @@ -498,17 +487,18 @@ mod benchmarks { pallet_staking::benchmarking::add_slashing_spans::(&pool_account, s); whitelist_account!(joiner); - - #[extrinsic_call] - withdraw_unbonded(RuntimeOrigin::Signed(joiner.clone()), joiner_lookup, s); - - assert_eq!(CurrencyOf::::balance(&joiner), min_join_bond * 2u32.into()); + }: withdraw_unbonded(RuntimeOrigin::Signed(joiner.clone()), joiner_lookup, s) + verify { + assert_eq!( + CurrencyOf::::balance(&joiner), min_join_bond * 2u32.into() + ); // The unlocking chunk was removed assert_eq!(pallet_staking::Ledger::::get(&pool_account).unwrap().unlocking.len(), 0); } - #[benchmark] - fn withdraw_unbonded_kill(s: Linear<0, MAX_SPANS>) { + withdraw_unbonded_kill { + let s in 0 .. MAX_SPANS; + let min_create_bond = Pools::::depositor_min_bond(); let (depositor, pool_account) = create_pool_account::(0, min_create_bond, None); let depositor_lookup = T::Lookup::unlookup(depositor.clone()); @@ -529,14 +519,13 @@ mod benchmarks { // up when unbonding. let reward_account = Pools::::generate_reward_account(1); assert!(frame_system::Account::::contains_key(&reward_account)); - Pools::::fully_unbond( - RuntimeOrigin::Signed(depositor.clone()).into(), - depositor.clone(), - ) - .unwrap(); + Pools::::fully_unbond(RuntimeOrigin::Signed(depositor.clone()).into(), depositor.clone()).unwrap(); // Sanity check that unbond worked - assert_eq!(T::StakeAdapter::active_stake(Pool::from(pool_account.clone())), Zero::zero()); + assert_eq!( + T::StakeAdapter::active_stake(Pool::from(pool_account.clone())), + Zero::zero() + ); assert_eq!( T::StakeAdapter::total_balance(Pool::from(pool_account.clone())), Some(min_create_bond) @@ -555,10 +544,8 @@ mod benchmarks { assert!(frame_system::Account::::contains_key(&reward_account)); whitelist_account!(depositor); - - #[extrinsic_call] - withdraw_unbonded(RuntimeOrigin::Signed(depositor.clone()), depositor_lookup, s); - + }: withdraw_unbonded(RuntimeOrigin::Signed(depositor.clone()), depositor_lookup, s) + verify { // Pool removal worked assert!(!pallet_staking::Ledger::::contains_key(&pool_account)); assert!(!BondedPools::::contains_key(&1)); @@ -576,34 +563,27 @@ mod benchmarks { ); } - #[benchmark] - fn create() { + create { let min_create_bond = Pools::::depositor_min_bond(); let depositor: T::AccountId = account("depositor", USER_SEED, 0); let depositor_lookup = T::Lookup::unlookup(depositor.clone()); // Give the depositor some balance to bond - // it needs to transfer min balance to reward account as well so give additional min - // balance. - CurrencyOf::::set_balance( - &depositor, - min_create_bond + CurrencyOf::::minimum_balance() * 2u32.into(), - ); + // it needs to transfer min balance to reward account as well so give additional min balance. + CurrencyOf::::set_balance(&depositor, min_create_bond + CurrencyOf::::minimum_balance() * 2u32.into()); // Make sure no Pools exist at a pre-condition for our verify checks assert_eq!(RewardPools::::count(), 0); assert_eq!(BondedPools::::count(), 0); whitelist_account!(depositor); - - #[extrinsic_call] - _( + }: _( RuntimeOrigin::Signed(depositor.clone()), min_create_bond, depositor_lookup.clone(), depositor_lookup.clone(), - depositor_lookup, - ); - + depositor_lookup + ) + verify { assert_eq!(RewardPools::::count(), 1); assert_eq!(BondedPools::::count(), 1); let (_, new_pool) = BondedPools::::iter().next().unwrap(); @@ -628,21 +608,22 @@ mod benchmarks { ); } - #[benchmark] - fn nominate(n: Linear<1, { MaxNominationsOf::::get() }>) { + nominate { + let n in 1 .. MaxNominationsOf::::get(); + // Create a pool let min_create_bond = Pools::::depositor_min_bond() * 2u32.into(); - let (depositor, _pool_account) = create_pool_account::(0, min_create_bond, None); + let (depositor, pool_account) = create_pool_account::(0, min_create_bond, None); // Create some accounts to nominate. For the sake of benchmarking they don't need to be // actual validators - let validators: Vec<_> = (0..n).map(|i| account("stash", USER_SEED, i)).collect(); + let validators: Vec<_> = (0..n) + .map(|i| account("stash", USER_SEED, i)) + .collect(); whitelist_account!(depositor); - - #[extrinsic_call] - _(RuntimeOrigin::Signed(depositor.clone()), 1, validators); - + }:_(RuntimeOrigin::Signed(depositor.clone()), 1, validators) + verify { assert_eq!(RewardPools::::count(), 1); assert_eq!(BondedPools::::count(), 1); let (_, new_pool) = BondedPools::::iter().next().unwrap(); @@ -667,12 +648,10 @@ mod benchmarks { ); } - #[benchmark] - fn set_state() { + set_state { // Create a pool let min_create_bond = Pools::::depositor_min_bond(); - // Don't need the accounts, but the pool. - let _ = create_pool_account::(0, min_create_bond, None); + let (depositor, pool_account) = create_pool_account::(0, min_create_bond, None); BondedPools::::mutate(&1, |maybe_pool| { // Force the pool into an invalid state maybe_pool.as_mut().map(|pool| pool.points = min_create_bond * 10u32.into()); @@ -680,44 +659,36 @@ mod benchmarks { let caller = account("caller", 0, USER_SEED); whitelist_account!(caller); - - #[extrinsic_call] - _(RuntimeOrigin::Signed(caller), 1, PoolState::Destroying); - + }:_(RuntimeOrigin::Signed(caller), 1, PoolState::Destroying) + verify { assert_eq!(BondedPools::::get(1).unwrap().state, PoolState::Destroying); } - #[benchmark] - fn set_metadata( - n: Linear<1, { ::MaxMetadataLen::get() }>, - ) { + set_metadata { + let n in 1 .. ::MaxMetadataLen::get(); + // Create a pool - let (depositor, _pool_account) = - create_pool_account::(0, Pools::::depositor_min_bond() * 2u32.into(), None); + let (depositor, pool_account) = create_pool_account::(0, Pools::::depositor_min_bond() * 2u32.into(), None); // Create metadata of the max possible size let metadata: Vec = (0..n).map(|_| 42).collect(); whitelist_account!(depositor); - - #[extrinsic_call] - _(RuntimeOrigin::Signed(depositor), 1, metadata.clone()); + }:_(RuntimeOrigin::Signed(depositor), 1, metadata.clone()) + verify { assert_eq!(Metadata::::get(&1), metadata); } - #[benchmark] - fn set_configs() { - #[extrinsic_call] - _( - RuntimeOrigin::Root, - ConfigOp::Set(BalanceOf::::max_value()), - ConfigOp::Set(BalanceOf::::max_value()), - ConfigOp::Set(u32::MAX), - ConfigOp::Set(u32::MAX), - ConfigOp::Set(u32::MAX), - ConfigOp::Set(Perbill::max_value()), - ); - + set_configs { + }:_( + RuntimeOrigin::Root, + ConfigOp::Set(BalanceOf::::max_value()), + ConfigOp::Set(BalanceOf::::max_value()), + ConfigOp::Set(u32::MAX), + ConfigOp::Set(u32::MAX), + ConfigOp::Set(u32::MAX), + ConfigOp::Set(Perbill::max_value()) + ) verify { assert_eq!(MinJoinBond::::get(), BalanceOf::::max_value()); assert_eq!(MinCreateBond::::get(), BalanceOf::::max_value()); assert_eq!(MaxPools::::get(), Some(u32::MAX)); @@ -726,22 +697,17 @@ mod benchmarks { assert_eq!(GlobalMaxCommission::::get(), Some(Perbill::max_value())); } - #[benchmark] - fn update_roles() { + update_roles { let first_id = pallet_nomination_pools::LastPoolId::::get() + 1; - let (root, _) = - create_pool_account::(0, Pools::::depositor_min_bond() * 2u32.into(), None); - let random: T::AccountId = - account("but is anything really random in computers..?", 0, USER_SEED); - - #[extrinsic_call] - _( - RuntimeOrigin::Signed(root.clone()), - first_id, - ConfigOp::Set(random.clone()), - ConfigOp::Set(random.clone()), - ConfigOp::Set(random.clone()), - ); + let (root, _) = create_pool_account::(0, Pools::::depositor_min_bond() * 2u32.into(), None); + let random: T::AccountId = account("but is anything really random in computers..?", 0, USER_SEED); + }:_( + RuntimeOrigin::Signed(root.clone()), + first_id, + ConfigOp::Set(random.clone()), + ConfigOp::Set(random.clone()), + ConfigOp::Set(random.clone()) + ) verify { assert_eq!( pallet_nomination_pools::BondedPools::::get(first_id).unwrap().roles, pallet_nomination_pools::PoolRoles { @@ -753,14 +719,12 @@ mod benchmarks { ) } - #[benchmark] - fn chill() { + chill { // Create a pool - let (depositor, pool_account) = - create_pool_account::(0, Pools::::depositor_min_bond() * 2u32.into(), None); + let (depositor, pool_account) = create_pool_account::(0, Pools::::depositor_min_bond() * 2u32.into(), None); // Nominate with the pool. - let validators: Vec<_> = (0..MaxNominationsOf::::get()) + let validators: Vec<_> = (0..MaxNominationsOf::::get()) .map(|i| account("stash", USER_SEED, i)) .collect(); @@ -768,176 +732,121 @@ mod benchmarks { assert!(T::StakeAdapter::nominations(Pool::from(pool_account.clone())).is_some()); whitelist_account!(depositor); - - #[extrinsic_call] - _(RuntimeOrigin::Signed(depositor.clone()), 1); - + }:_(RuntimeOrigin::Signed(depositor.clone()), 1) + verify { assert!(T::StakeAdapter::nominations(Pool::from(pool_account.clone())).is_none()); } - #[benchmark] - fn set_commission() { + set_commission { // Create a pool - do not set a commission yet. - let (depositor, _pool_account) = - create_pool_account::(0, Pools::::depositor_min_bond() * 2u32.into(), None); + let (depositor, pool_account) = create_pool_account::(0, Pools::::depositor_min_bond() * 2u32.into(), None); // set a max commission - Pools::::set_commission_max( - RuntimeOrigin::Signed(depositor.clone()).into(), - 1u32.into(), - Perbill::from_percent(50), - ) - .unwrap(); + Pools::::set_commission_max(RuntimeOrigin::Signed(depositor.clone()).into(), 1u32.into(), Perbill::from_percent(50)).unwrap(); // set a change rate - Pools::::set_commission_change_rate( - RuntimeOrigin::Signed(depositor.clone()).into(), - 1u32.into(), - CommissionChangeRate { - max_increase: Perbill::from_percent(20), - min_delay: 0u32.into(), - }, - ) - .unwrap(); + Pools::::set_commission_change_rate(RuntimeOrigin::Signed(depositor.clone()).into(), 1u32.into(), CommissionChangeRate { + max_increase: Perbill::from_percent(20), + min_delay: 0u32.into(), + }).unwrap(); // set a claim permission to an account. Pools::::set_commission_claim_permission( RuntimeOrigin::Signed(depositor.clone()).into(), 1u32.into(), - Some(CommissionClaimPermission::Account(depositor.clone())), - ) - .unwrap(); - - #[extrinsic_call] - _( - RuntimeOrigin::Signed(depositor.clone()), - 1u32.into(), - Some((Perbill::from_percent(20), depositor.clone())), - ); - - assert_eq!( - BondedPools::::get(1).unwrap().commission, - Commission { - current: Some((Perbill::from_percent(20), depositor.clone())), - max: Some(Perbill::from_percent(50)), - change_rate: Some(CommissionChangeRate { + Some(CommissionClaimPermission::Account(depositor.clone())) + ).unwrap(); + + }:_(RuntimeOrigin::Signed(depositor.clone()), 1u32.into(), Some((Perbill::from_percent(20), depositor.clone()))) + verify { + assert_eq!(BondedPools::::get(1).unwrap().commission, Commission { + current: Some((Perbill::from_percent(20), depositor.clone())), + max: Some(Perbill::from_percent(50)), + change_rate: Some(CommissionChangeRate { max_increase: Perbill::from_percent(20), min_delay: 0u32.into() - }), - throttle_from: Some(1u32.into()), - claim_permission: Some(CommissionClaimPermission::Account(depositor)), - } - ); + }), + throttle_from: Some(1u32.into()), + claim_permission: Some(CommissionClaimPermission::Account(depositor)), + }); } - #[benchmark] - fn set_commission_max() { + set_commission_max { // Create a pool, setting a commission that will update when max commission is set. - let (depositor, _pool_account) = create_pool_account::( - 0, - Pools::::depositor_min_bond() * 2u32.into(), - Some(Perbill::from_percent(50)), - ); - - #[extrinsic_call] - _(RuntimeOrigin::Signed(depositor.clone()), 1u32.into(), Perbill::from_percent(50)); - + let (depositor, pool_account) = create_pool_account::(0, Pools::::depositor_min_bond() * 2u32.into(), Some(Perbill::from_percent(50))); + }:_(RuntimeOrigin::Signed(depositor.clone()), 1u32.into(), Perbill::from_percent(50)) + verify { assert_eq!( - BondedPools::::get(1).unwrap().commission, - Commission { - current: Some((Perbill::from_percent(50), depositor)), - max: Some(Perbill::from_percent(50)), - change_rate: None, - throttle_from: Some(0u32.into()), - claim_permission: None, - } - ); + BondedPools::::get(1).unwrap().commission, Commission { + current: Some((Perbill::from_percent(50), depositor)), + max: Some(Perbill::from_percent(50)), + change_rate: None, + throttle_from: Some(0u32.into()), + claim_permission: None, + }); } - #[benchmark] - fn set_commission_change_rate() { + set_commission_change_rate { // Create a pool - let (depositor, _pool_account) = - create_pool_account::(0, Pools::::depositor_min_bond() * 2u32.into(), None); - - #[extrinsic_call] - _( - RuntimeOrigin::Signed(depositor.clone()), - 1u32.into(), - CommissionChangeRate { + let (depositor, pool_account) = create_pool_account::(0, Pools::::depositor_min_bond() * 2u32.into(), None); + }:_(RuntimeOrigin::Signed(depositor.clone()), 1u32.into(), CommissionChangeRate { + max_increase: Perbill::from_percent(50), + min_delay: 1000u32.into(), + }) + verify { + assert_eq!( + BondedPools::::get(1).unwrap().commission, Commission { + current: None, + max: None, + change_rate: Some(CommissionChangeRate { max_increase: Perbill::from_percent(50), min_delay: 1000u32.into(), - }, - ); - - assert_eq!( - BondedPools::::get(1).unwrap().commission, - Commission { - current: None, - max: None, - change_rate: Some(CommissionChangeRate { - max_increase: Perbill::from_percent(50), - min_delay: 1000u32.into(), - }), - throttle_from: Some(1_u32.into()), - claim_permission: None, - } - ); - } + }), + throttle_from: Some(1_u32.into()), + claim_permission: None, + }); + } - #[benchmark] - fn set_commission_claim_permission() { + set_commission_claim_permission { // Create a pool. - let (depositor, _pool_account) = - create_pool_account::(0, Pools::::depositor_min_bond() * 2u32.into(), None); - - #[extrinsic_call] - _( - RuntimeOrigin::Signed(depositor.clone()), - 1u32.into(), - Some(CommissionClaimPermission::Account(depositor.clone())), - ); - + let (depositor, pool_account) = create_pool_account::(0, Pools::::depositor_min_bond() * 2u32.into(), None); + }:_(RuntimeOrigin::Signed(depositor.clone()), 1u32.into(), Some(CommissionClaimPermission::Account(depositor.clone()))) + verify { assert_eq!( - BondedPools::::get(1).unwrap().commission, - Commission { - current: None, - max: None, - change_rate: None, - throttle_from: None, - claim_permission: Some(CommissionClaimPermission::Account(depositor)), - } - ); + BondedPools::::get(1).unwrap().commission, Commission { + current: None, + max: None, + change_rate: None, + throttle_from: None, + claim_permission: Some(CommissionClaimPermission::Account(depositor)), + }); } - #[benchmark] - fn set_claim_permission() { + set_claim_permission { // Create a pool let min_create_bond = Pools::::depositor_min_bond(); - let (_depositor, pool_account) = create_pool_account::(0, min_create_bond, None); + let (depositor, pool_account) = create_pool_account::(0, min_create_bond, None); // Join pool let min_join_bond = MinJoinBond::::get().max(CurrencyOf::::minimum_balance()); let joiner = create_funded_user_with_balance::("joiner", 0, min_join_bond * 4u32.into()); - Pools::::join(RuntimeOrigin::Signed(joiner.clone()).into(), min_join_bond, 1).unwrap(); + let joiner_lookup = T::Lookup::unlookup(joiner.clone()); + Pools::::join(RuntimeOrigin::Signed(joiner.clone()).into(), min_join_bond, 1) + .unwrap(); // Sanity check join worked assert_eq!( T::StakeAdapter::active_stake(Pool::from(pool_account.clone())), min_create_bond + min_join_bond ); - - #[extrinsic_call] - _(RuntimeOrigin::Signed(joiner.clone()), ClaimPermission::Permissioned); - + }:_(RuntimeOrigin::Signed(joiner.clone()), ClaimPermission::Permissioned) + verify { assert_eq!(ClaimPermissions::::get(joiner), ClaimPermission::Permissioned); } - #[benchmark] - fn claim_commission() { + claim_commission { let claimer: T::AccountId = account("claimer_member", USER_SEED + 4, 0); let commission = Perbill::from_percent(50); let origin_weight = Pools::::depositor_min_bond() * 2u32.into(); let ed = CurrencyOf::::minimum_balance(); - let (depositor, _pool_account) = - create_pool_account::(0, origin_weight, Some(commission)); + let (depositor, pool_account) = create_pool_account::(0, origin_weight, Some(commission)); let reward_account = Pools::::generate_reward_account(1); CurrencyOf::::set_balance(&reward_account, ed + origin_weight); @@ -947,60 +856,52 @@ mod benchmarks { let _ = Pools::::set_commission_claim_permission( RuntimeOrigin::Signed(depositor.clone()).into(), 1u32.into(), - Some(CommissionClaimPermission::Account(claimer)), + Some(CommissionClaimPermission::Account(claimer)) ); whitelist_account!(depositor); - - #[extrinsic_call] - _(RuntimeOrigin::Signed(depositor.clone()), 1u32.into()); - + }:_(RuntimeOrigin::Signed(depositor.clone()), 1u32.into()) + verify { assert_eq!( CurrencyOf::::balance(&depositor), origin_weight + commission * origin_weight ); - assert_eq!(CurrencyOf::::balance(&reward_account), ed + commission * origin_weight); + assert_eq!( + CurrencyOf::::balance(&reward_account), + ed + commission * origin_weight + ); } - #[benchmark] - fn adjust_pool_deposit() { + adjust_pool_deposit { // Create a pool - let (depositor, _) = - create_pool_account::(0, Pools::::depositor_min_bond() * 2u32.into(), None); + let (depositor, _) = create_pool_account::(0, Pools::::depositor_min_bond() * 2u32.into(), None); // Remove ed freeze to create a scenario where the ed deposit needs to be adjusted. let _ = Pools::::unfreeze_pool_deposit(&Pools::::generate_reward_account(1)); assert!(&Pools::::check_ed_imbalance().is_err()); whitelist_account!(depositor); - - #[extrinsic_call] - _(RuntimeOrigin::Signed(depositor), 1); - + }:_(RuntimeOrigin::Signed(depositor), 1) + verify { assert!(&Pools::::check_ed_imbalance().is_ok()); } - #[benchmark] - fn apply_slash() { + apply_slash { // Note: With older `TransferStake` strategy, slashing is greedy and apply_slash should // always fail. // We want to fill member's unbonding pools. So let's bond with big enough amount. - let deposit_amount = - Pools::::depositor_min_bond() * T::MaxUnbonding::get().into() * 4u32.into(); + let deposit_amount = Pools::::depositor_min_bond() * T::MaxUnbonding::get().into() * 4u32.into(); let (depositor, pool_account) = create_pool_account::(0, deposit_amount, None); let depositor_lookup = T::Lookup::unlookup(depositor.clone()); // verify user balance in the pool. assert_eq!(PoolMembers::::get(&depositor).unwrap().total_balance(), deposit_amount); // verify delegated balance. - assert_if_delegate::( - T::StakeAdapter::member_delegation_balance(Member::from(depositor.clone())) == - Some(deposit_amount), - ); + assert_if_delegate::(T::StakeAdapter::member_delegation_balance(Member::from(depositor.clone())) == Some(deposit_amount)); // ugly type conversion between balances of pallet staking and pools (which really are same // type). Maybe there is a better way? - let slash_amount: u128 = deposit_amount.into() / 2; + let slash_amount: u128 = deposit_amount.into()/2; // slash pool by half pallet_staking::slashing::do_slash::( @@ -1008,75 +909,49 @@ mod benchmarks { slash_amount.into(), &mut pallet_staking::BalanceOf::::zero(), &mut pallet_staking::NegativeImbalanceOf::::zero(), - EraIndex::zero(), + EraIndex::zero() ); // verify user balance is slashed in the pool. - assert_eq!( - PoolMembers::::get(&depositor).unwrap().total_balance(), - deposit_amount / 2u32.into() - ); + assert_eq!(PoolMembers::::get(&depositor).unwrap().total_balance(), deposit_amount/2u32.into()); // verify delegated balance are not yet slashed. - assert_if_delegate::( - T::StakeAdapter::member_delegation_balance(Member::from(depositor.clone())) == - Some(deposit_amount), - ); + assert_if_delegate::(T::StakeAdapter::member_delegation_balance(Member::from(depositor.clone())) == Some(deposit_amount)); // Fill member's sub pools for the worst case. for i in 1..(T::MaxUnbonding::get() + 1) { pallet_staking::CurrentEra::::put(i); - assert!(Pools::::unbond( - RuntimeOrigin::Signed(depositor.clone()).into(), - depositor_lookup.clone(), - Pools::::depositor_min_bond() - ) - .is_ok()); + assert!(Pools::::unbond(RuntimeOrigin::Signed(depositor.clone()).into(), depositor_lookup.clone(), Pools::::depositor_min_bond()).is_ok()); } pallet_staking::CurrentEra::::put(T::MaxUnbonding::get() + 2); - let slash_reporter = - create_funded_user_with_balance::("slasher", 0, CurrencyOf::::minimum_balance()); + let slash_reporter = create_funded_user_with_balance::("slasher", 0, CurrencyOf::::minimum_balance()); whitelist_account!(depositor); - - #[block] - { - assert_if_delegate::( - Pools::::apply_slash( - RuntimeOrigin::Signed(slash_reporter.clone()).into(), - depositor_lookup.clone(), - ) - .is_ok(), - ); - } - + }: + { + assert_if_delegate::(Pools::::apply_slash(RuntimeOrigin::Signed(slash_reporter.clone()).into(), depositor_lookup.clone()).is_ok()); + } + verify { // verify balances are correct and slash applied. - assert_eq!( - PoolMembers::::get(&depositor).unwrap().total_balance(), - deposit_amount / 2u32.into() - ); - assert_if_delegate::( - T::StakeAdapter::member_delegation_balance(Member::from(depositor.clone())) == - Some(deposit_amount / 2u32.into()), - ); + assert_eq!(PoolMembers::::get(&depositor).unwrap().total_balance(), deposit_amount/2u32.into()); + assert_if_delegate::(T::StakeAdapter::member_delegation_balance(Member::from(depositor.clone())) == Some(deposit_amount/2u32.into())); } - #[benchmark] - fn apply_slash_fail() { + apply_slash_fail { // Bench the scenario where pool has some unapplied slash but the member does not have any // slash to be applied. let deposit_amount = Pools::::depositor_min_bond() * 10u32.into(); // Create pool. - let (_depositor, pool_account) = create_pool_account::(0, deposit_amount, None); + let (depositor, pool_account) = create_pool_account::(0, deposit_amount, None); // slash pool by half - let slash_amount: u128 = deposit_amount.into() / 2; + let slash_amount: u128 = deposit_amount.into()/2; pallet_staking::slashing::do_slash::( &pool_account, slash_amount.into(), &mut pallet_staking::BalanceOf::::zero(), &mut pallet_staking::NegativeImbalanceOf::::zero(), - EraIndex::zero(), + EraIndex::zero() ); pallet_staking::CurrentEra::::put(1); @@ -1086,106 +961,68 @@ mod benchmarks { let join_amount = min_join_bond * T::MaxUnbonding::get().into() * 2u32.into(); let joiner = create_funded_user_with_balance::("joiner", 0, join_amount * 2u32.into()); let joiner_lookup = T::Lookup::unlookup(joiner.clone()); - assert!( - Pools::::join(RuntimeOrigin::Signed(joiner.clone()).into(), join_amount, 1).is_ok() - ); + assert!(Pools::::join(RuntimeOrigin::Signed(joiner.clone()).into(), join_amount, 1).is_ok()); // Fill member's sub pools for the worst case. for i in 0..T::MaxUnbonding::get() { pallet_staking::CurrentEra::::put(i + 2); // +2 because we already set the current era to 1. - assert!(Pools::::unbond( - RuntimeOrigin::Signed(joiner.clone()).into(), - joiner_lookup.clone(), - min_join_bond - ) - .is_ok()); + assert!(Pools::::unbond(RuntimeOrigin::Signed(joiner.clone()).into(), joiner_lookup.clone(), min_join_bond).is_ok()); } pallet_staking::CurrentEra::::put(T::MaxUnbonding::get() + 3); whitelist_account!(joiner); - // Since the StakeAdapter can be different based on the runtime config, the errors could be - // different as well. - #[block] - { - assert!(Pools::::apply_slash( - RuntimeOrigin::Signed(joiner.clone()).into(), - joiner_lookup.clone() - ) - .is_err()); - } + }: { + // Since the StakeAdapter can be different based on the runtime config, the errors could be different as well. + assert!(Pools::::apply_slash(RuntimeOrigin::Signed(joiner.clone()).into(), joiner_lookup.clone()).is_err()); } - #[benchmark] - fn pool_migrate() { + + pool_migrate { // create a pool. let deposit_amount = Pools::::depositor_min_bond() * 2u32.into(); let (depositor, pool_account) = create_pool_account::(0, deposit_amount, None); // migrate pool to transfer stake. let _ = migrate_to_transfer_stake::(1); - #[block] - { - assert_if_delegate::( - Pools::::migrate_pool_to_delegate_stake( - RuntimeOrigin::Signed(depositor.clone()).into(), - 1u32.into(), - ) - .is_ok(), - ); - } + }: { + assert_if_delegate::(Pools::::migrate_pool_to_delegate_stake(RuntimeOrigin::Signed(depositor.clone()).into(), 1u32.into()).is_ok()); + } + verify { // this queries agent balance if `DelegateStake` strategy. - assert_eq!( - T::StakeAdapter::total_balance(Pool::from(pool_account.clone())), - Some(deposit_amount) - ); + assert!(T::StakeAdapter::total_balance(Pool::from(pool_account.clone())) == Some(deposit_amount)); } - #[benchmark] - fn migrate_delegation() { + migrate_delegation { // create a pool. let deposit_amount = Pools::::depositor_min_bond() * 2u32.into(); - let (depositor, _pool_account) = create_pool_account::(0, deposit_amount, None); + let (depositor, pool_account) = create_pool_account::(0, deposit_amount, None); let depositor_lookup = T::Lookup::unlookup(depositor.clone()); // migrate pool to transfer stake. let _ = migrate_to_transfer_stake::(1); // Now migrate pool to delegate stake keeping delegators unmigrated. - assert_if_delegate::( - Pools::::migrate_pool_to_delegate_stake( - RuntimeOrigin::Signed(depositor.clone()).into(), - 1u32.into(), - ) - .is_ok(), - ); + assert_if_delegate::(Pools::::migrate_pool_to_delegate_stake(RuntimeOrigin::Signed(depositor.clone()).into(), 1u32.into()).is_ok()); // delegation does not exist. - assert!( - T::StakeAdapter::member_delegation_balance(Member::from(depositor.clone())).is_none() - ); + assert!(T::StakeAdapter::member_delegation_balance(Member::from(depositor.clone())).is_none()); // contribution exists in the pool. assert_eq!(PoolMembers::::get(&depositor).unwrap().total_balance(), deposit_amount); whitelist_account!(depositor); - - #[block] - { - assert_if_delegate::( - Pools::::migrate_delegation( - RuntimeOrigin::Signed(depositor.clone()).into(), - depositor_lookup.clone(), - ) - .is_ok(), - ); - } + }: { + assert_if_delegate::(Pools::::migrate_delegation(RuntimeOrigin::Signed(depositor.clone()).into(), depositor_lookup.clone()).is_ok()); + } + verify { // verify balances once more. - assert_if_delegate::( - T::StakeAdapter::member_delegation_balance(Member::from(depositor.clone())) == - Some(deposit_amount), - ); + assert_if_delegate::(T::StakeAdapter::member_delegation_balance(Member::from(depositor.clone())) == Some(deposit_amount)); assert_eq!(PoolMembers::::get(&depositor).unwrap().total_balance(), deposit_amount); } - impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::Runtime); + impl_benchmark_test_suite!( + Pallet, + crate::mock::new_test_ext(), + crate::mock::Runtime + ); } diff --git a/substrate/frame/nomination-pools/fuzzer/Cargo.toml b/substrate/frame/nomination-pools/fuzzer/Cargo.toml index 2f84004ece94..e1518ed099ae 100644 --- a/substrate/frame/nomination-pools/fuzzer/Cargo.toml +++ b/substrate/frame/nomination-pools/fuzzer/Cargo.toml @@ -21,15 +21,15 @@ honggfuzz = { workspace = true } pallet-nomination-pools = { features = ["fuzzing"], workspace = true, default-features = true } -frame-support = { workspace = true, default-features = true } frame-system = { workspace = true, default-features = true } +frame-support = { workspace = true, default-features = true } -sp-io = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } sp-tracing = { workspace = true, default-features = true } -log = { workspace = true, default-features = true } rand = { features = ["small_rng"], workspace = true, default-features = true } +log = { workspace = true, default-features = true } [[bin]] name = "call" diff --git a/substrate/frame/nomination-pools/runtime-api/Cargo.toml b/substrate/frame/nomination-pools/runtime-api/Cargo.toml index 337cc31c7cbb..6de9fc8c8844 100644 --- a/substrate/frame/nomination-pools/runtime-api/Cargo.toml +++ b/substrate/frame/nomination-pools/runtime-api/Cargo.toml @@ -17,8 +17,8 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { features = ["derive"], workspace = true } -pallet-nomination-pools = { workspace = true } sp-api = { workspace = true } +pallet-nomination-pools = { workspace = true } [features] default = ["std"] diff --git a/substrate/frame/nomination-pools/runtime-api/src/lib.rs b/substrate/frame/nomination-pools/runtime-api/src/lib.rs index 644ee07fd634..4138dd22d898 100644 --- a/substrate/frame/nomination-pools/runtime-api/src/lib.rs +++ b/substrate/frame/nomination-pools/runtime-api/src/lib.rs @@ -43,9 +43,6 @@ sp_api::decl_runtime_apis! { fn pool_pending_slash(pool_id: PoolId) -> Balance; /// Returns the pending slash for a given pool member. - /// - /// If pending slash of the member exceeds `ExistentialDeposit`, it can be reported on - /// chain. fn member_pending_slash(member: AccountId) -> Balance; /// Returns true if the pool with `pool_id` needs migration. diff --git a/substrate/frame/nomination-pools/src/lib.rs b/substrate/frame/nomination-pools/src/lib.rs index dc82bf3a37c6..201b0af1d608 100644 --- a/substrate/frame/nomination-pools/src/lib.rs +++ b/substrate/frame/nomination-pools/src/lib.rs @@ -1944,8 +1944,6 @@ pub mod pallet { NothingToAdjust, /// No slash pending that can be applied to the member. NothingToSlash, - /// The slash amount is too low to be applied. - SlashTooLow, /// The pool or member delegation has already migrated to delegate stake. AlreadyMigrated, /// The pool or member delegation has not migrated yet to delegate stake. @@ -2302,7 +2300,7 @@ pub mod pallet { let slash_weight = // apply slash if any before withdraw. - match Self::do_apply_slash(&member_account, None, false) { + match Self::do_apply_slash(&member_account, None) { Ok(_) => T::WeightInfo::apply_slash(), Err(e) => { let no_pending_slash: DispatchResult = Err(Error::::NothingToSlash.into()); @@ -2976,10 +2974,8 @@ pub mod pallet { /// Fails unless [`crate::pallet::Config::StakeAdapter`] is of strategy type: /// [`adapter::StakeStrategyType::Delegate`]. /// - /// The pending slash amount of the member must be equal or more than `ExistentialDeposit`. - /// This call can be dispatched permissionlessly (i.e. by any account). If the execution - /// is successful, fee is refunded and caller may be rewarded with a part of the slash - /// based on the [`crate::pallet::Config::StakeAdapter`] configuration. + /// This call can be dispatched permissionlessly (i.e. by any account). If the member has + /// slash to be applied, caller may be rewarded with the part of the slash. #[pallet::call_index(23)] #[pallet::weight(T::WeightInfo::apply_slash())] pub fn apply_slash( @@ -2993,7 +2989,7 @@ pub mod pallet { let who = ensure_signed(origin)?; let member_account = T::Lookup::lookup(member_account)?; - Self::do_apply_slash(&member_account, Some(who), true)?; + Self::do_apply_slash(&member_account, Some(who))?; // If successful, refund the fees. Ok(Pays::No.into()) @@ -3578,21 +3574,15 @@ impl Pallet { fn do_apply_slash( member_account: &T::AccountId, reporter: Option, - enforce_min_slash: bool, ) -> DispatchResult { let member = PoolMembers::::get(member_account).ok_or(Error::::PoolMemberNotFound)?; let pending_slash = Self::member_pending_slash(Member::from(member_account.clone()), member.clone())?; - // ensure there is something to slash. + // if nothing to slash, return error. ensure!(!pending_slash.is_zero(), Error::::NothingToSlash); - if enforce_min_slash { - // ensure slashed amount is at least the minimum balance. - ensure!(pending_slash >= T::Currency::minimum_balance(), Error::::SlashTooLow); - } - T::StakeAdapter::member_slash( Member::from(member_account.clone()), Pool::from(Pallet::::generate_bonded_account(member.pool_id)), @@ -3956,9 +3946,6 @@ impl Pallet { /// Returns the unapplied slash of a member. /// /// Pending slash is only applicable with [`adapter::DelegateStake`] strategy. - /// - /// If pending slash of the member exceeds `ExistentialDeposit`, it can be reported on - /// chain via [`Call::apply_slash`]. pub fn api_member_pending_slash(who: T::AccountId) -> BalanceOf { PoolMembers::::get(who.clone()) .map(|pool_member| { diff --git a/substrate/frame/nomination-pools/src/weights.rs b/substrate/frame/nomination-pools/src/weights.rs index 086def4759a8..21711a499b62 100644 --- a/substrate/frame/nomination-pools/src/weights.rs +++ b/substrate/frame/nomination-pools/src/weights.rs @@ -1382,4 +1382,4 @@ impl WeightInfo for () { Weight::from_parts(37_038_000, 27847) .saturating_add(RocksDbWeight::get().reads(6_u64)) } -} \ No newline at end of file +} diff --git a/substrate/frame/nomination-pools/test-delegate-stake/Cargo.toml b/substrate/frame/nomination-pools/test-delegate-stake/Cargo.toml index fe3743d7e5da..7940caaff775 100644 --- a/substrate/frame/nomination-pools/test-delegate-stake/Cargo.toml +++ b/substrate/frame/nomination-pools/test-delegate-stake/Cargo.toml @@ -19,23 +19,23 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { features = ["derive"], workspace = true, default-features = true } scale-info = { features = ["derive"], workspace = true, default-features = true } -sp-core = { workspace = true, default-features = true } -sp-io = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } -sp-staking = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } sp-std = { workspace = true, default-features = true } +sp-staking = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } -frame-election-provider-support = { workspace = true, default-features = true } -frame-support = { features = ["experimental"], workspace = true, default-features = true } frame-system = { workspace = true, default-features = true } +frame-support = { workspace = true, default-features = true } +frame-election-provider-support = { workspace = true, default-features = true } -pallet-bags-list = { workspace = true, default-features = true } +pallet-timestamp = { workspace = true, default-features = true } pallet-balances = { workspace = true, default-features = true } -pallet-delegated-staking = { workspace = true, default-features = true } -pallet-nomination-pools = { workspace = true, default-features = true } pallet-staking = { workspace = true, default-features = true } +pallet-delegated-staking = { workspace = true, default-features = true } +pallet-bags-list = { workspace = true, default-features = true } pallet-staking-reward-curve = { workspace = true, default-features = true } -pallet-timestamp = { workspace = true, default-features = true } +pallet-nomination-pools = { workspace = true, default-features = true } -log = { workspace = true, default-features = true } sp-tracing = { workspace = true, default-features = true } +log = { workspace = true, default-features = true } diff --git a/substrate/frame/nomination-pools/test-delegate-stake/src/lib.rs b/substrate/frame/nomination-pools/test-delegate-stake/src/lib.rs index cc6335959ab7..7fee2a0bdb23 100644 --- a/substrate/frame/nomination-pools/test-delegate-stake/src/lib.rs +++ b/substrate/frame/nomination-pools/test-delegate-stake/src/lib.rs @@ -20,7 +20,7 @@ mod mock; use frame_support::{ - assert_noop, assert_ok, hypothetically, + assert_noop, assert_ok, traits::{fungible::InspectHold, Currency}, }; use mock::*; @@ -41,7 +41,7 @@ use sp_staking::Agent; fn pool_lifecycle_e2e() { new_test_ext().execute_with(|| { assert_eq!(Balances::minimum_balance(), 5); - assert_eq!(CurrentEra::::get(), None); + assert_eq!(Staking::current_era(), None); // create the pool, we know this has id 1. assert_ok!(Pools::create(RuntimeOrigin::signed(10), 50, 10, 10, 10)); @@ -204,7 +204,7 @@ fn pool_lifecycle_e2e() { fn pool_chill_e2e() { new_test_ext().execute_with(|| { assert_eq!(Balances::minimum_balance(), 5); - assert_eq!(CurrentEra::::get(), None); + assert_eq!(Staking::current_era(), None); // create the pool, we know this has id 1. assert_ok!(Pools::create(RuntimeOrigin::signed(10), 50, 10, 10, 10)); @@ -330,7 +330,7 @@ fn pool_slash_e2e() { new_test_ext().execute_with(|| { ExistentialDeposit::set(1); assert_eq!(Balances::minimum_balance(), 1); - assert_eq!(CurrentEra::::get(), None); + assert_eq!(Staking::current_era(), None); // create the pool, we know this has id 1. assert_ok!(Pools::create(RuntimeOrigin::signed(10), 40, 10, 10, 10)); @@ -537,9 +537,9 @@ fn pool_slash_proportional() { // a typical example where 3 pool members unbond in era 99, 100, and 101, and a slash that // happened in era 100 should only affect the latter two. new_test_ext().execute_with(|| { - ExistentialDeposit::set(2); + ExistentialDeposit::set(1); BondingDuration::set(28); - assert_eq!(Balances::minimum_balance(), 2); + assert_eq!(Balances::minimum_balance(), 1); assert_eq!(Staking::current_era(), None); // create the pool, we know this has id 1. @@ -670,34 +670,6 @@ fn pool_slash_proportional() { // no pending slash yet. assert_eq!(Pools::api_pool_pending_slash(1), 0); - // and therefore applying slash fails - assert_noop!( - Pools::apply_slash(RuntimeOrigin::signed(10), 21), - PoolsError::::NothingToSlash - ); - - hypothetically!({ - // a very small amount is slashed - pallet_staking::slashing::do_slash::( - &POOL1_BONDED, - 3, - &mut Default::default(), - &mut Default::default(), - 100, - ); - - // ensure correct amount is pending to be slashed - assert_eq!(Pools::api_pool_pending_slash(1), 3); - - // 21 has pending slash lower than ED (2) - assert_eq!(Pools::api_member_pending_slash(21), 1); - - // slash fails as minimum pending slash amount not met. - assert_noop!( - Pools::apply_slash(RuntimeOrigin::signed(10), 21), - PoolsError::::SlashTooLow - ); - }); pallet_staking::slashing::do_slash::( &POOL1_BONDED, @@ -786,7 +758,7 @@ fn pool_slash_non_proportional_only_bonded_pool() { ExistentialDeposit::set(1); BondingDuration::set(28); assert_eq!(Balances::minimum_balance(), 1); - assert_eq!(CurrentEra::::get(), None); + assert_eq!(Staking::current_era(), None); // create the pool, we know this has id 1. assert_ok!(Pools::create(RuntimeOrigin::signed(10), 40, 10, 10, 10)); @@ -865,7 +837,7 @@ fn pool_slash_non_proportional_bonded_pool_and_chunks() { ExistentialDeposit::set(1); BondingDuration::set(28); assert_eq!(Balances::minimum_balance(), 1); - assert_eq!(CurrentEra::::get(), None); + assert_eq!(Staking::current_era(), None); // create the pool, we know this has id 1. assert_ok!(Pools::create(RuntimeOrigin::signed(10), 40, 10, 10, 10)); @@ -937,13 +909,12 @@ fn pool_slash_non_proportional_bonded_pool_and_chunks() { ); }); } - #[test] fn pool_migration_e2e() { new_test_ext().execute_with(|| { LegacyAdapter::set(true); assert_eq!(Balances::minimum_balance(), 5); - assert_eq!(CurrentEra::::get(), None); + assert_eq!(Staking::current_era(), None); // create the pool with TransferStake strategy. assert_ok!(Pools::create(RuntimeOrigin::signed(10), 50, 10, 10, 10)); @@ -1221,7 +1192,7 @@ fn disable_pool_operations_on_non_migrated() { new_test_ext().execute_with(|| { LegacyAdapter::set(true); assert_eq!(Balances::minimum_balance(), 5); - assert_eq!(CurrentEra::::get(), None); + assert_eq!(Staking::current_era(), None); // create the pool with TransferStake strategy. assert_ok!(Pools::create(RuntimeOrigin::signed(10), 50, 10, 10, 10)); @@ -1398,7 +1369,7 @@ fn pool_no_dangling_delegation() { new_test_ext().execute_with(|| { ExistentialDeposit::set(1); assert_eq!(Balances::minimum_balance(), 1); - assert_eq!(CurrentEra::::get(), None); + assert_eq!(Staking::current_era(), None); // pool creator let alice = 10; let bob = 20; diff --git a/substrate/frame/nomination-pools/test-transfer-stake/Cargo.toml b/substrate/frame/nomination-pools/test-transfer-stake/Cargo.toml index 2cdc4c41a083..7398404c2351 100644 --- a/substrate/frame/nomination-pools/test-transfer-stake/Cargo.toml +++ b/substrate/frame/nomination-pools/test-transfer-stake/Cargo.toml @@ -19,22 +19,22 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { features = ["derive"], workspace = true, default-features = true } scale-info = { features = ["derive"], workspace = true, default-features = true } -sp-core = { workspace = true, default-features = true } -sp-io = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } -sp-staking = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } sp-std = { workspace = true, default-features = true } +sp-staking = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } -frame-election-provider-support = { workspace = true, default-features = true } -frame-support = { workspace = true, default-features = true } frame-system = { workspace = true, default-features = true } +frame-support = { workspace = true, default-features = true } +frame-election-provider-support = { workspace = true, default-features = true } -pallet-bags-list = { workspace = true, default-features = true } +pallet-timestamp = { workspace = true, default-features = true } pallet-balances = { workspace = true, default-features = true } -pallet-nomination-pools = { workspace = true, default-features = true } pallet-staking = { workspace = true, default-features = true } +pallet-bags-list = { workspace = true, default-features = true } pallet-staking-reward-curve = { workspace = true, default-features = true } -pallet-timestamp = { workspace = true, default-features = true } +pallet-nomination-pools = { workspace = true, default-features = true } -log = { workspace = true, default-features = true } sp-tracing = { workspace = true, default-features = true } +log = { workspace = true, default-features = true } diff --git a/substrate/frame/nomination-pools/test-transfer-stake/src/lib.rs b/substrate/frame/nomination-pools/test-transfer-stake/src/lib.rs index cc39cfee91c8..28e978bba0e5 100644 --- a/substrate/frame/nomination-pools/test-transfer-stake/src/lib.rs +++ b/substrate/frame/nomination-pools/test-transfer-stake/src/lib.rs @@ -34,7 +34,7 @@ use sp_runtime::{bounded_btree_map, traits::Zero}; fn pool_lifecycle_e2e() { new_test_ext().execute_with(|| { assert_eq!(Balances::minimum_balance(), 5); - assert_eq!(CurrentEra::::get(), None); + assert_eq!(Staking::current_era(), None); // create the pool, we know this has id 1. assert_ok!(Pools::create(RuntimeOrigin::signed(10), 50, 10, 10, 10)); @@ -286,7 +286,7 @@ fn destroy_pool_with_erroneous_consumer() { fn pool_chill_e2e() { new_test_ext().execute_with(|| { assert_eq!(Balances::minimum_balance(), 5); - assert_eq!(CurrentEra::::get(), None); + assert_eq!(Staking::current_era(), None); // create the pool, we know this has id 1. assert_ok!(Pools::create(RuntimeOrigin::signed(10), 50, 10, 10, 10)); @@ -412,7 +412,7 @@ fn pool_slash_e2e() { new_test_ext().execute_with(|| { ExistentialDeposit::set(1); assert_eq!(Balances::minimum_balance(), 1); - assert_eq!(CurrentEra::::get(), None); + assert_eq!(Staking::current_era(), None); // create the pool, we know this has id 1. assert_ok!(Pools::create(RuntimeOrigin::signed(10), 40, 10, 10, 10)); @@ -622,7 +622,7 @@ fn pool_slash_proportional() { ExistentialDeposit::set(1); BondingDuration::set(28); assert_eq!(Balances::minimum_balance(), 1); - assert_eq!(CurrentEra::::get(), None); + assert_eq!(Staking::current_era(), None); // create the pool, we know this has id 1. assert_ok!(Pools::create(RuntimeOrigin::signed(10), 40, 10, 10, 10)); @@ -759,7 +759,7 @@ fn pool_slash_non_proportional_only_bonded_pool() { ExistentialDeposit::set(1); BondingDuration::set(28); assert_eq!(Balances::minimum_balance(), 1); - assert_eq!(CurrentEra::::get(), None); + assert_eq!(Staking::current_era(), None); // create the pool, we know this has id 1. assert_ok!(Pools::create(RuntimeOrigin::signed(10), 40, 10, 10, 10)); @@ -838,7 +838,7 @@ fn pool_slash_non_proportional_bonded_pool_and_chunks() { ExistentialDeposit::set(1); BondingDuration::set(28); assert_eq!(Balances::minimum_balance(), 1); - assert_eq!(CurrentEra::::get(), None); + assert_eq!(Staking::current_era(), None); // create the pool, we know this has id 1. assert_ok!(Pools::create(RuntimeOrigin::signed(10), 40, 10, 10, 10)); diff --git a/substrate/frame/offences/Cargo.toml b/substrate/frame/offences/Cargo.toml index 4dd9d7f10c9f..98c320e1f808 100644 --- a/substrate/frame/offences/Cargo.toml +++ b/substrate/frame/offences/Cargo.toml @@ -17,12 +17,12 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { features = ["derive"], workspace = true } -frame-support = { workspace = true } -frame-system = { workspace = true } log = { workspace = true } -pallet-balances = { workspace = true } scale-info = { features = ["derive"], workspace = true } serde = { optional = true, workspace = true, default-features = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +pallet-balances = { workspace = true } sp-runtime = { workspace = true } sp-staking = { workspace = true } diff --git a/substrate/frame/offences/benchmarking/Cargo.toml b/substrate/frame/offences/benchmarking/Cargo.toml index 76b167ebdb33..28c7895180c4 100644 --- a/substrate/frame/offences/benchmarking/Cargo.toml +++ b/substrate/frame/offences/benchmarking/Cargo.toml @@ -17,11 +17,11 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { workspace = true } +scale-info = { features = ["derive"], workspace = true } frame-benchmarking = { workspace = true } frame-election-provider-support = { workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } -log = { workspace = true } pallet-babe = { workspace = true } pallet-balances = { workspace = true } pallet-grandpa = { workspace = true } @@ -29,9 +29,9 @@ pallet-im-online = { workspace = true } pallet-offences = { workspace = true } pallet-session = { workspace = true } pallet-staking = { workspace = true } -scale-info = { features = ["derive"], workspace = true } sp-runtime = { workspace = true } sp-staking = { workspace = true } +log = { workspace = true } [dev-dependencies] pallet-staking-reward-curve = { workspace = true, default-features = true } diff --git a/substrate/frame/offences/benchmarking/src/inner.rs b/substrate/frame/offences/benchmarking/src/inner.rs index 75f3e9931e34..573114de0742 100644 --- a/substrate/frame/offences/benchmarking/src/inner.rs +++ b/substrate/frame/offences/benchmarking/src/inner.rs @@ -19,7 +19,7 @@ use alloc::{vec, vec::Vec}; -use frame_benchmarking::v2::*; +use frame_benchmarking::v1::{account, benchmarks}; use frame_support::traits::Get; use frame_system::{Config as SystemConfig, Pallet as System, RawOrigin}; @@ -144,7 +144,7 @@ fn create_offender(n: u32, nominators: u32) -> Result, &' fn make_offenders( num_offenders: u32, num_nominators: u32, -) -> Result>, &'static str> { +) -> Result<(Vec>, Vec>), &'static str> { Staking::::new_session(0); let mut offenders = vec![]; @@ -167,50 +167,21 @@ fn make_offenders( .expect("failed to convert validator id to full identification") }) .collect::>>(); - Ok(id_tuples) + Ok((id_tuples, offenders)) } -#[cfg(test)] -fn assert_all_slashes_applied(offender_count: usize) -where - T: Config, - ::RuntimeEvent: TryInto>, - ::RuntimeEvent: TryInto>, - ::RuntimeEvent: TryInto, - ::RuntimeEvent: TryInto>, -{ - // make sure that all slashes have been applied - // (n nominators + one validator) * (slashed + unlocked) + deposit to reporter + - // reporter account endowed + some funds rescinded from issuance. - assert_eq!( - System::::read_events_for_pallet::>().len(), - 2 * (offender_count + 1) + 3 - ); - // (n nominators + one validator) * slashed + Slash Reported - assert_eq!( - System::::read_events_for_pallet::>().len(), - 1 * (offender_count + 1) + 1 - ); - // offence - assert_eq!(System::::read_events_for_pallet::().len(), 1); - // reporter new account - assert_eq!(System::::read_events_for_pallet::>().len(), 1); -} - -#[benchmarks( - where +benchmarks! { + where_clause { + where ::RuntimeEvent: TryInto>, ::RuntimeEvent: TryInto>, ::RuntimeEvent: TryInto, ::RuntimeEvent: TryInto>, -)] -mod benchmarks { - use super::*; - - #[benchmark] - pub fn report_offence_grandpa( - n: Linear<0, { MAX_NOMINATORS.min(MaxNominationsOf::::get()) }>, - ) -> Result<(), BenchmarkError> { + } + + report_offence_grandpa { + let n in 0 .. MAX_NOMINATORS.min(MaxNominationsOf::::get()); + // for grandpa equivocation reports the number of reporters // and offenders is always 1 let reporters = vec![account("reporter", 1, SEED)]; @@ -218,7 +189,7 @@ mod benchmarks { // make sure reporters actually get rewarded Staking::::set_slash_reward_fraction(Perbill::one()); - let mut offenders = make_offenders::(1, n)?; + let (mut offenders, raw_offenders) = make_offenders::(1, n)?; let validator_set_count = Session::::validators().len() as u32; let offence = GrandpaEquivocationOffence { @@ -228,24 +199,28 @@ mod benchmarks { offender: T::convert(offenders.pop().unwrap()), }; assert_eq!(System::::event_count(), 0); - - #[block] - { - let _ = Offences::::report_offence(reporters, offence); - } - + }: { + let _ = Offences::::report_offence(reporters, offence); + } + verify { #[cfg(test)] { - assert_all_slashes_applied::(n as usize); + // make sure that all slashes have been applied + // (n nominators + one validator) * (slashed + unlocked) + deposit to reporter + reporter + // account endowed + some funds rescinded from issuance. + assert_eq!(System::::read_events_for_pallet::>().len(), 2 * (n + 1) as usize + 3); + // (n nominators + one validator) * slashed + Slash Reported + assert_eq!(System::::read_events_for_pallet::>().len(), 1 * (n + 1) as usize + 1); + // offence + assert_eq!(System::::read_events_for_pallet::().len(), 1); + // reporter new account + assert_eq!(System::::read_events_for_pallet::>().len(), 1); } - - Ok(()) } - #[benchmark] - fn report_offence_babe( - n: Linear<0, { MAX_NOMINATORS.min(MaxNominationsOf::::get()) }>, - ) -> Result<(), BenchmarkError> { + report_offence_babe { + let n in 0 .. MAX_NOMINATORS.min(MaxNominationsOf::::get()); + // for babe equivocation reports the number of reporters // and offenders is always 1 let reporters = vec![account("reporter", 1, SEED)]; @@ -253,7 +228,7 @@ mod benchmarks { // make sure reporters actually get rewarded Staking::::set_slash_reward_fraction(Perbill::one()); - let mut offenders = make_offenders::(1, n)?; + let (mut offenders, raw_offenders) = make_offenders::(1, n)?; let validator_set_count = Session::::validators().len() as u32; let offence = BabeEquivocationOffence { @@ -263,17 +238,23 @@ mod benchmarks { offender: T::convert(offenders.pop().unwrap()), }; assert_eq!(System::::event_count(), 0); - - #[block] - { - let _ = Offences::::report_offence(reporters, offence); - } + }: { + let _ = Offences::::report_offence(reporters, offence); + } + verify { #[cfg(test)] { - assert_all_slashes_applied::(n as usize); + // make sure that all slashes have been applied + // (n nominators + one validator) * (slashed + unlocked) + deposit to reporter + reporter + // account endowed + some funds rescinded from issuance. + assert_eq!(System::::read_events_for_pallet::>().len(), 2 * (n + 1) as usize + 3); + // (n nominators + one validator) * slashed + Slash Reported + assert_eq!(System::::read_events_for_pallet::>().len(), 1 * (n + 1) as usize + 1); + // offence + assert_eq!(System::::read_events_for_pallet::().len(), 1); + // reporter new account + assert_eq!(System::::read_events_for_pallet::>().len(), 1); } - - Ok(()) } impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::Test); diff --git a/substrate/frame/offences/benchmarking/src/mock.rs b/substrate/frame/offences/benchmarking/src/mock.rs index c5c178aa4443..efaec49a65b3 100644 --- a/substrate/frame/offences/benchmarking/src/mock.rs +++ b/substrate/frame/offences/benchmarking/src/mock.rs @@ -29,7 +29,7 @@ use frame_system as system; use pallet_session::historical as pallet_session_historical; use sp_runtime::{ testing::{Header, UintAuthorityId}, - BuildStorage, KeyTypeId, Perbill, + BuildStorage, Perbill, }; type AccountId = u64; @@ -66,8 +66,7 @@ sp_runtime::impl_opaque_keys! { pub struct TestSessionHandler; impl pallet_session::SessionHandler for TestSessionHandler { - // corresponds to the opaque key id above - const KEY_TYPE_IDS: &'static [KeyTypeId] = &[KeyTypeId([100u8, 117u8, 109u8, 121u8])]; + const KEY_TYPE_IDS: &'static [sp_runtime::KeyTypeId] = &[]; fn on_genesis_session(_validators: &[(AccountId, Ks)]) {} diff --git a/substrate/frame/paged-list/Cargo.toml b/substrate/frame/paged-list/Cargo.toml index da029bdd7423..a680139c5fdc 100644 --- a/substrate/frame/paged-list/Cargo.toml +++ b/substrate/frame/paged-list/Cargo.toml @@ -23,10 +23,10 @@ frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } +sp-runtime = { workspace = true } sp-core = { workspace = true } sp-io = { workspace = true } sp-metadata-ir = { optional = true, workspace = true } -sp-runtime = { workspace = true } [features] default = ["std"] diff --git a/substrate/frame/paged-list/fuzzer/Cargo.toml b/substrate/frame/paged-list/fuzzer/Cargo.toml index 7e6162df09ba..d0108254ed2d 100644 --- a/substrate/frame/paged-list/fuzzer/Cargo.toml +++ b/substrate/frame/paged-list/fuzzer/Cargo.toml @@ -21,5 +21,5 @@ arbitrary = { workspace = true } honggfuzz = { workspace = true } frame-support = { features = ["std"], workspace = true } -pallet-paged-list = { features = ["std"], workspace = true } sp-io = { features = ["std"], workspace = true } +pallet-paged-list = { features = ["std"], workspace = true } diff --git a/substrate/frame/parameters/Cargo.toml b/substrate/frame/parameters/Cargo.toml index dda218b618c4..a97ba1172a50 100644 --- a/substrate/frame/parameters/Cargo.toml +++ b/substrate/frame/parameters/Cargo.toml @@ -9,22 +9,22 @@ edition.workspace = true [dependencies] codec = { features = ["max-encoded-len"], workspace = true } -docify = { workspace = true } -paste = { workspace = true } scale-info = { features = ["derive"], workspace = true } +paste = { workspace = true } serde = { features = ["derive"], optional = true, workspace = true, default-features = true } +docify = { workspace = true } -frame-benchmarking = { optional = true, workspace = true } frame-support = { features = ["experimental"], workspace = true } frame-system = { workspace = true } sp-core = { workspace = true } sp-runtime = { workspace = true } +frame-benchmarking = { optional = true, workspace = true } [dev-dependencies] -pallet-balances = { features = ["std"], workspace = true, default-features = true } -pallet-example-basic = { features = ["std"], workspace = true, default-features = true } sp-core = { features = ["std"], workspace = true, default-features = true } sp-io = { features = ["std"], workspace = true, default-features = true } +pallet-example-basic = { features = ["std"], workspace = true, default-features = true } +pallet-balances = { features = ["std"], workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/frame/parameters/src/weights.rs b/substrate/frame/parameters/src/weights.rs index 5601247dad2b..6510db9ebce5 100644 --- a/substrate/frame/parameters/src/weights.rs +++ b/substrate/frame/parameters/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_parameters` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-04-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -63,8 +63,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `3` // Estimated: `3501` - // Minimum execution time: 8_202_000 picoseconds. - Weight::from_parts(8_485_000, 3501) + // Minimum execution time: 8_360_000 picoseconds. + Weight::from_parts(8_568_000, 3501) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -78,8 +78,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `3` // Estimated: `3501` - // Minimum execution time: 8_202_000 picoseconds. - Weight::from_parts(8_485_000, 3501) + // Minimum execution time: 8_360_000 picoseconds. + Weight::from_parts(8_568_000, 3501) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/substrate/frame/preimage/Cargo.toml b/substrate/frame/preimage/Cargo.toml index fae6627b6315..1356ac403d38 100644 --- a/substrate/frame/preimage/Cargo.toml +++ b/substrate/frame/preimage/Cargo.toml @@ -13,14 +13,14 @@ workspace = true [dependencies] codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } -log = { workspace = true } -scale-info = { features = ["derive"], workspace = true } sp-core = { optional = true, workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } +log = { workspace = true } [dev-dependencies] pallet-balances = { workspace = true, default-features = true } diff --git a/substrate/frame/preimage/src/benchmarking.rs b/substrate/frame/preimage/src/benchmarking.rs index ea635bf3ef77..3d0c5b900579 100644 --- a/substrate/frame/preimage/src/benchmarking.rs +++ b/substrate/frame/preimage/src/benchmarking.rs @@ -17,13 +17,14 @@ //! Preimage pallet benchmarking. +use super::*; use alloc::vec; -use frame_benchmarking::v2::*; +use frame_benchmarking::v1::{account, benchmarks, whitelisted_caller, BenchmarkError}; use frame_support::assert_ok; use frame_system::RawOrigin; use sp_runtime::traits::Bounded; -use crate::*; +use crate::Pallet as Preimage; fn funded_account() -> T::AccountId { let caller: T::AccountId = whitelisted_caller(); @@ -42,225 +43,206 @@ fn sized_preimage_and_hash(size: u32) -> (Vec, T::Hash) { (preimage, hash) } -fn insert_old_unrequested(s: u32) -> ::Hash { - let acc = account("old", s, 0); - T::Currency::make_free_balance_be(&acc, BalanceOf::::max_value() / 2u32.into()); - - // The preimage size does not matter here as it is not touched. - let preimage = s.to_le_bytes(); - let hash = ::Hashing::hash(&preimage[..]); - - #[allow(deprecated)] - StatusFor::::insert( - &hash, - OldRequestStatus::Unrequested { deposit: (acc, 123u32.into()), len: preimage.len() as u32 }, - ); - hash -} - -#[benchmarks] -mod benchmarks { - use super::*; - +benchmarks! { // Expensive note - will reserve. - #[benchmark] - fn note_preimage(s: Linear<0, MAX_SIZE>) { + note_preimage { + let s in 0 .. MAX_SIZE; let caller = funded_account::(); let (preimage, hash) = sized_preimage_and_hash::(s); - - #[extrinsic_call] - _(RawOrigin::Signed(caller), preimage); - - assert!(Pallet::::have_preimage(&hash)); + }: _(RawOrigin::Signed(caller), preimage) + verify { + assert!(Preimage::::have_preimage(&hash)); } - // Cheap note - will not reserve since it was requested. - #[benchmark] - fn note_requested_preimage(s: Linear<0, MAX_SIZE>) { + note_requested_preimage { + let s in 0 .. MAX_SIZE; let caller = funded_account::(); let (preimage, hash) = sized_preimage_and_hash::(s); - assert_ok!(Pallet::::request_preimage( + assert_ok!(Preimage::::request_preimage( T::ManagerOrigin::try_successful_origin() .expect("ManagerOrigin has no successful origin required for the benchmark"), hash, )); - - #[extrinsic_call] - note_preimage(RawOrigin::Signed(caller), preimage); - - assert!(Pallet::::have_preimage(&hash)); + }: note_preimage(RawOrigin::Signed(caller), preimage) + verify { + assert!(Preimage::::have_preimage(&hash)); } - // Cheap note - will not reserve since it's the manager. - #[benchmark] - fn note_no_deposit_preimage(s: Linear<0, MAX_SIZE>) { - let o = T::ManagerOrigin::try_successful_origin() - .expect("ManagerOrigin has no successful origin required for the benchmark"); + note_no_deposit_preimage { + let s in 0 .. MAX_SIZE; let (preimage, hash) = sized_preimage_and_hash::(s); - assert_ok!(Pallet::::request_preimage(o.clone(), hash,)); - - #[extrinsic_call] - note_preimage(o as T::RuntimeOrigin, preimage); - - assert!(Pallet::::have_preimage(&hash)); + assert_ok!(Preimage::::request_preimage( + T::ManagerOrigin::try_successful_origin() + .expect("ManagerOrigin has no successful origin required for the benchmark"), + hash, + )); + }: note_preimage( + T::ManagerOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?, + preimage + ) verify { + assert!(Preimage::::have_preimage(&hash)); } // Expensive unnote - will unreserve. - #[benchmark] - fn unnote_preimage() { + unnote_preimage { let caller = funded_account::(); let (preimage, hash) = preimage_and_hash::(); - assert_ok!(Pallet::::note_preimage(RawOrigin::Signed(caller.clone()).into(), preimage)); - - #[extrinsic_call] - _(RawOrigin::Signed(caller), hash); - - assert!(!Pallet::::have_preimage(&hash)); + assert_ok!(Preimage::::note_preimage(RawOrigin::Signed(caller.clone()).into(), preimage)); + }: _(RawOrigin::Signed(caller), hash) + verify { + assert!(!Preimage::::have_preimage(&hash)); } - // Cheap unnote - will not unreserve since there's no deposit held. - #[benchmark] - fn unnote_no_deposit_preimage() { - let o = T::ManagerOrigin::try_successful_origin() - .expect("ManagerOrigin has no successful origin required for the benchmark"); + unnote_no_deposit_preimage { let (preimage, hash) = preimage_and_hash::(); - assert_ok!(Pallet::::note_preimage(o.clone(), preimage,)); - - #[extrinsic_call] - unnote_preimage(o as T::RuntimeOrigin, hash); - - assert!(!Pallet::::have_preimage(&hash)); + assert_ok!(Preimage::::note_preimage( + T::ManagerOrigin::try_successful_origin() + .expect("ManagerOrigin has no successful origin required for the benchmark"), + preimage, + )); + }: unnote_preimage( + T::ManagerOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?, + hash + ) verify { + assert!(!Preimage::::have_preimage(&hash)); } // Expensive request - will unreserve the noter's deposit. - #[benchmark] - fn request_preimage() { - let o = T::ManagerOrigin::try_successful_origin() - .expect("ManagerOrigin has no successful origin required for the benchmark"); + request_preimage { let (preimage, hash) = preimage_and_hash::(); let noter = funded_account::(); - assert_ok!(Pallet::::note_preimage(RawOrigin::Signed(noter.clone()).into(), preimage)); - - #[extrinsic_call] - _(o as T::RuntimeOrigin, hash); - - let ticket = - TicketOf::::new(¬er, Footprint { count: 1, size: MAX_SIZE as u64 }).unwrap(); - let s = RequestStatus::Requested { - maybe_ticket: Some((noter, ticket)), - count: 1, - maybe_len: Some(MAX_SIZE), - }; + assert_ok!(Preimage::::note_preimage(RawOrigin::Signed(noter.clone()).into(), preimage)); + }: _( + T::ManagerOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?, + hash + ) verify { + let ticket = TicketOf::::new(¬er, Footprint { count: 1, size: MAX_SIZE as u64 }).unwrap(); + let s = RequestStatus::Requested { maybe_ticket: Some((noter, ticket)), count: 1, maybe_len: Some(MAX_SIZE) }; assert_eq!(RequestStatusFor::::get(&hash), Some(s)); } - // Cheap request - would unreserve the deposit but none was held. - #[benchmark] - fn request_no_deposit_preimage() { - let o = T::ManagerOrigin::try_successful_origin() - .expect("ManagerOrigin has no successful origin required for the benchmark"); + request_no_deposit_preimage { let (preimage, hash) = preimage_and_hash::(); - assert_ok!(Pallet::::note_preimage(o.clone(), preimage,)); - - #[extrinsic_call] - request_preimage(o as T::RuntimeOrigin, hash); - - let s = - RequestStatus::Requested { maybe_ticket: None, count: 2, maybe_len: Some(MAX_SIZE) }; + assert_ok!(Preimage::::note_preimage( + T::ManagerOrigin::try_successful_origin() + .expect("ManagerOrigin has no successful origin required for the benchmark"), + preimage, + )); + }: request_preimage( + T::ManagerOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?, + hash + ) verify { + let s = RequestStatus::Requested { maybe_ticket: None, count: 2, maybe_len: Some(MAX_SIZE) }; assert_eq!(RequestStatusFor::::get(&hash), Some(s)); } - // Cheap request - the preimage is not yet noted, so deposit to unreserve. - #[benchmark] - fn request_unnoted_preimage() { - let o = T::ManagerOrigin::try_successful_origin() - .expect("ManagerOrigin has no successful origin required for the benchmark"); + request_unnoted_preimage { let (_, hash) = preimage_and_hash::(); - - #[extrinsic_call] - request_preimage(o as T::RuntimeOrigin, hash); - + }: request_preimage( + T::ManagerOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?, + hash + ) verify { let s = RequestStatus::Requested { maybe_ticket: None, count: 1, maybe_len: None }; assert_eq!(RequestStatusFor::::get(&hash), Some(s)); } - // Cheap request - the preimage is already requested, so just a counter bump. - #[benchmark] - fn request_requested_preimage() { - let o = T::ManagerOrigin::try_successful_origin() - .expect("ManagerOrigin has no successful origin required for the benchmark"); + request_requested_preimage { let (_, hash) = preimage_and_hash::(); - assert_ok!(Pallet::::request_preimage(o.clone(), hash,)); - - #[extrinsic_call] - request_preimage(o as T::RuntimeOrigin, hash); - + assert_ok!(Preimage::::request_preimage( + T::ManagerOrigin::try_successful_origin() + .expect("ManagerOrigin has no successful origin required for the benchmark"), + hash, + )); + }: request_preimage( + T::ManagerOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?, + hash + ) verify { let s = RequestStatus::Requested { maybe_ticket: None, count: 2, maybe_len: None }; assert_eq!(RequestStatusFor::::get(&hash), Some(s)); } // Expensive unrequest - last reference and it's noted, so will destroy the preimage. - #[benchmark] - fn unrequest_preimage() { - let o = T::ManagerOrigin::try_successful_origin() - .expect("ManagerOrigin has no successful origin required for the benchmark"); + unrequest_preimage { let (preimage, hash) = preimage_and_hash::(); - assert_ok!(Pallet::::request_preimage(o.clone(), hash,)); - assert_ok!(Pallet::::note_preimage(o.clone(), preimage)); - - #[extrinsic_call] - _(o as T::RuntimeOrigin, hash); - + assert_ok!(Preimage::::request_preimage( + T::ManagerOrigin::try_successful_origin() + .expect("ManagerOrigin has no successful origin required for the benchmark"), + hash, + )); + assert_ok!(Preimage::::note_preimage( + T::ManagerOrigin::try_successful_origin() + .expect("ManagerOrigin has no successful origin required for the benchmark"), + preimage, + )); + }: _( + T::ManagerOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?, + hash + ) verify { assert_eq!(RequestStatusFor::::get(&hash), None); } - // Cheap unrequest - last reference, but it's not noted. - #[benchmark] - fn unrequest_unnoted_preimage() { - let o = T::ManagerOrigin::try_successful_origin() - .expect("ManagerOrigin has no successful origin required for the benchmark"); + unrequest_unnoted_preimage { let (_, hash) = preimage_and_hash::(); - assert_ok!(Pallet::::request_preimage(o.clone(), hash,)); - - #[extrinsic_call] - unrequest_preimage(o as T::RuntimeOrigin, hash); - + assert_ok!(Preimage::::request_preimage( + T::ManagerOrigin::try_successful_origin() + .expect("ManagerOrigin has no successful origin required for the benchmark"), + hash, + )); + }: unrequest_preimage( + T::ManagerOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?, + hash + ) verify { assert_eq!(RequestStatusFor::::get(&hash), None); } - // Cheap unrequest - not the last reference. - #[benchmark] - fn unrequest_multi_referenced_preimage() { - let o = T::ManagerOrigin::try_successful_origin() - .expect("ManagerOrigin has no successful origin required for the benchmark"); + unrequest_multi_referenced_preimage { let (_, hash) = preimage_and_hash::(); - assert_ok!(Pallet::::request_preimage(o.clone(), hash,)); - assert_ok!(Pallet::::request_preimage(o.clone(), hash,)); - - #[extrinsic_call] - unrequest_preimage(o as T::RuntimeOrigin, hash); - + assert_ok!(Preimage::::request_preimage( + T::ManagerOrigin::try_successful_origin() + .expect("ManagerOrigin has no successful origin required for the benchmark"), + hash, + )); + assert_ok!(Preimage::::request_preimage( + T::ManagerOrigin::try_successful_origin() + .expect("ManagerOrigin has no successful origin required for the benchmark"), + hash, + )); + }: unrequest_preimage( + T::ManagerOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?, + hash + ) verify { let s = RequestStatus::Requested { maybe_ticket: None, count: 1, maybe_len: None }; assert_eq!(RequestStatusFor::::get(&hash), Some(s)); } - #[benchmark] - fn ensure_updated(n: Linear<1, MAX_HASH_UPGRADE_BULK_COUNT>) { + ensure_updated { + let n in 1..MAX_HASH_UPGRADE_BULK_COUNT; + let caller = funded_account::(); let hashes = (0..n).map(|i| insert_old_unrequested::(i)).collect::>(); - - #[extrinsic_call] - _(RawOrigin::Signed(caller), hashes); - + }: _(RawOrigin::Signed(caller), hashes) + verify { assert_eq!(RequestStatusFor::::iter_keys().count(), n as usize); #[allow(deprecated)] let c = StatusFor::::iter_keys().count(); assert_eq!(c, 0); } - impl_benchmark_test_suite! { - Pallet, - mock::new_test_ext(), - mock::Test - } + impl_benchmark_test_suite!(Preimage, crate::mock::new_test_ext(), crate::mock::Test); +} + +fn insert_old_unrequested(s: u32) -> ::Hash { + let acc = account("old", s, 0); + T::Currency::make_free_balance_be(&acc, BalanceOf::::max_value() / 2u32.into()); + + // The preimage size does not matter here as it is not touched. + let preimage = s.to_le_bytes(); + let hash = ::Hashing::hash(&preimage[..]); + + #[allow(deprecated)] + StatusFor::::insert( + &hash, + OldRequestStatus::Unrequested { deposit: (acc, 123u32.into()), len: preimage.len() as u32 }, + ); + hash } diff --git a/substrate/frame/preimage/src/weights.rs b/substrate/frame/preimage/src/weights.rs index a3aec7e7546e..4e389e3a7340 100644 --- a/substrate/frame/preimage/src/weights.rs +++ b/substrate/frame/preimage/src/weights.rs @@ -18,25 +18,27 @@ //! Autogenerated weights for `pallet_preimage` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-11-28, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-04-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// target/production/substrate-node +// ./target/production/substrate-node // benchmark // pallet +// --chain=dev // --steps=50 // --repeat=20 +// --pallet=pallet_preimage +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --extrinsic=* // --wasm-execution=compiled // --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_preimage -// --chain=dev -// --header=./substrate/HEADER-APACHE2 // --output=./substrate/frame/preimage/src/weights.rs +// --header=./substrate/HEADER-APACHE2 // --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -74,18 +76,18 @@ impl WeightInfo for SubstrateWeight { /// Storage: `Parameters::Parameters` (r:2 w:0) /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) /// Storage: `Preimage::PreimageFor` (r:0 w:1) /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 4194304]`. fn note_preimage(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `7` + // Measured: `112` // Estimated: `6012` - // Minimum execution time: 51_305_000 picoseconds. - Weight::from_parts(51_670_000, 6012) - // Standard Error: 5 - .saturating_add(Weight::from_parts(2_337, 0).saturating_mul(s.into())) + // Minimum execution time: 52_531_000 picoseconds. + Weight::from_parts(53_245_000, 6012) + // Standard Error: 1 + .saturating_add(Weight::from_parts(1_744, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -98,12 +100,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `s` is `[0, 4194304]`. fn note_requested_preimage(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `68` + // Measured: `173` // Estimated: `3556` - // Minimum execution time: 16_204_000 picoseconds. - Weight::from_parts(16_613_000, 3556) - // Standard Error: 6 - .saturating_add(Weight::from_parts(2_503, 0).saturating_mul(s.into())) + // Minimum execution time: 15_601_000 picoseconds. + Weight::from_parts(15_871_000, 3556) + // Standard Error: 2 + .saturating_add(Weight::from_parts(1_836, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -116,12 +118,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `s` is `[0, 4194304]`. fn note_no_deposit_preimage(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `68` + // Measured: `173` // Estimated: `3556` - // Minimum execution time: 15_118_000 picoseconds. - Weight::from_parts(15_412_000, 3556) - // Standard Error: 6 - .saturating_add(Weight::from_parts(2_411, 0).saturating_mul(s.into())) + // Minimum execution time: 15_614_000 picoseconds. + Weight::from_parts(15_934_000, 3556) + // Standard Error: 2 + .saturating_add(Weight::from_parts(1_832, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -130,15 +132,15 @@ impl WeightInfo for SubstrateWeight { /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) /// Storage: `Preimage::PreimageFor` (r:0 w:1) /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) fn unnote_preimage() -> Weight { // Proof Size summary in bytes: - // Measured: `206` - // Estimated: `3820` - // Minimum execution time: 57_218_000 picoseconds. - Weight::from_parts(61_242_000, 3820) + // Measured: `311` + // Estimated: `3658` + // Minimum execution time: 53_001_000 picoseconds. + Weight::from_parts(55_866_000, 3658) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -150,10 +152,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) fn unnote_no_deposit_preimage() -> Weight { // Proof Size summary in bytes: - // Measured: `106` + // Measured: `211` // Estimated: `3556` - // Minimum execution time: 25_140_000 picoseconds. - Weight::from_parts(27_682_000, 3556) + // Minimum execution time: 26_901_000 picoseconds. + Weight::from_parts(28_079_000, 3556) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -163,10 +165,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn request_preimage() -> Weight { // Proof Size summary in bytes: - // Measured: `150` + // Measured: `255` // Estimated: `3556` - // Minimum execution time: 25_296_000 picoseconds. - Weight::from_parts(27_413_000, 3556) + // Minimum execution time: 21_716_000 picoseconds. + Weight::from_parts(25_318_000, 3556) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -176,10 +178,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn request_no_deposit_preimage() -> Weight { // Proof Size summary in bytes: - // Measured: `106` + // Measured: `211` // Estimated: `3556` - // Minimum execution time: 15_011_000 picoseconds. - Weight::from_parts(16_524_000, 3556) + // Minimum execution time: 13_890_000 picoseconds. + Weight::from_parts(14_744_000, 3556) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -189,10 +191,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn request_unnoted_preimage() -> Weight { // Proof Size summary in bytes: - // Measured: `4` + // Measured: `109` // Estimated: `3556` - // Minimum execution time: 14_649_000 picoseconds. - Weight::from_parts(15_439_000, 3556) + // Minimum execution time: 14_192_000 picoseconds. + Weight::from_parts(15_113_000, 3556) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -202,10 +204,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn request_requested_preimage() -> Weight { // Proof Size summary in bytes: - // Measured: `68` + // Measured: `173` // Estimated: `3556` - // Minimum execution time: 10_914_000 picoseconds. - Weight::from_parts(11_137_000, 3556) + // Minimum execution time: 9_909_000 picoseconds. + Weight::from_parts(10_134_000, 3556) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -217,10 +219,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) fn unrequest_preimage() -> Weight { // Proof Size summary in bytes: - // Measured: `106` + // Measured: `211` // Estimated: `3556` - // Minimum execution time: 22_512_000 picoseconds. - Weight::from_parts(24_376_000, 3556) + // Minimum execution time: 21_725_000 picoseconds. + Weight::from_parts(24_058_000, 3556) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -230,10 +232,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn unrequest_unnoted_preimage() -> Weight { // Proof Size summary in bytes: - // Measured: `68` + // Measured: `173` // Estimated: `3556` - // Minimum execution time: 10_571_000 picoseconds. - Weight::from_parts(10_855_000, 3556) + // Minimum execution time: 9_854_000 picoseconds. + Weight::from_parts(10_175_000, 3556) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -243,10 +245,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn unrequest_multi_referenced_preimage() -> Weight { // Proof Size summary in bytes: - // Measured: `68` + // Measured: `173` // Estimated: `3556` - // Minimum execution time: 10_312_000 picoseconds. - Weight::from_parts(10_653_000, 3556) + // Minimum execution time: 10_143_000 picoseconds. + Weight::from_parts(10_539_000, 3556) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -257,22 +259,22 @@ impl WeightInfo for SubstrateWeight { /// Storage: `Parameters::Parameters` (r:2 w:0) /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1023 w:1023) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) /// Storage: `Preimage::RequestStatusFor` (r:0 w:1023) /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// The range of component `n` is `[1, 1024]`. fn ensure_updated(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0 + n * (227 ±0)` - // Estimated: `6012 + n * (2830 ±0)` - // Minimum execution time: 61_990_000 picoseconds. - Weight::from_parts(62_751_000, 6012) - // Standard Error: 44_079 - .saturating_add(Weight::from_parts(57_343_378, 0).saturating_mul(n.into())) + // Estimated: `6012 + n * (2668 ±0)` + // Minimum execution time: 59_384_000 picoseconds. + Weight::from_parts(60_000_000, 6012) + // Standard Error: 39_890 + .saturating_add(Weight::from_parts(56_317_686, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes((4_u64).saturating_mul(n.into()))) - .saturating_add(Weight::from_parts(0, 2830).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(0, 2668).saturating_mul(n.into())) } } @@ -285,18 +287,18 @@ impl WeightInfo for () { /// Storage: `Parameters::Parameters` (r:2 w:0) /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) /// Storage: `Preimage::PreimageFor` (r:0 w:1) /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 4194304]`. fn note_preimage(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `7` + // Measured: `112` // Estimated: `6012` - // Minimum execution time: 51_305_000 picoseconds. - Weight::from_parts(51_670_000, 6012) - // Standard Error: 5 - .saturating_add(Weight::from_parts(2_337, 0).saturating_mul(s.into())) + // Minimum execution time: 52_531_000 picoseconds. + Weight::from_parts(53_245_000, 6012) + // Standard Error: 1 + .saturating_add(Weight::from_parts(1_744, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -309,12 +311,12 @@ impl WeightInfo for () { /// The range of component `s` is `[0, 4194304]`. fn note_requested_preimage(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `68` + // Measured: `173` // Estimated: `3556` - // Minimum execution time: 16_204_000 picoseconds. - Weight::from_parts(16_613_000, 3556) - // Standard Error: 6 - .saturating_add(Weight::from_parts(2_503, 0).saturating_mul(s.into())) + // Minimum execution time: 15_601_000 picoseconds. + Weight::from_parts(15_871_000, 3556) + // Standard Error: 2 + .saturating_add(Weight::from_parts(1_836, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -327,12 +329,12 @@ impl WeightInfo for () { /// The range of component `s` is `[0, 4194304]`. fn note_no_deposit_preimage(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `68` + // Measured: `173` // Estimated: `3556` - // Minimum execution time: 15_118_000 picoseconds. - Weight::from_parts(15_412_000, 3556) - // Standard Error: 6 - .saturating_add(Weight::from_parts(2_411, 0).saturating_mul(s.into())) + // Minimum execution time: 15_614_000 picoseconds. + Weight::from_parts(15_934_000, 3556) + // Standard Error: 2 + .saturating_add(Weight::from_parts(1_832, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -341,15 +343,15 @@ impl WeightInfo for () { /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) /// Storage: `Preimage::PreimageFor` (r:0 w:1) /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) fn unnote_preimage() -> Weight { // Proof Size summary in bytes: - // Measured: `206` - // Estimated: `3820` - // Minimum execution time: 57_218_000 picoseconds. - Weight::from_parts(61_242_000, 3820) + // Measured: `311` + // Estimated: `3658` + // Minimum execution time: 53_001_000 picoseconds. + Weight::from_parts(55_866_000, 3658) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -361,10 +363,10 @@ impl WeightInfo for () { /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) fn unnote_no_deposit_preimage() -> Weight { // Proof Size summary in bytes: - // Measured: `106` + // Measured: `211` // Estimated: `3556` - // Minimum execution time: 25_140_000 picoseconds. - Weight::from_parts(27_682_000, 3556) + // Minimum execution time: 26_901_000 picoseconds. + Weight::from_parts(28_079_000, 3556) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -374,10 +376,10 @@ impl WeightInfo for () { /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn request_preimage() -> Weight { // Proof Size summary in bytes: - // Measured: `150` + // Measured: `255` // Estimated: `3556` - // Minimum execution time: 25_296_000 picoseconds. - Weight::from_parts(27_413_000, 3556) + // Minimum execution time: 21_716_000 picoseconds. + Weight::from_parts(25_318_000, 3556) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -387,10 +389,10 @@ impl WeightInfo for () { /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn request_no_deposit_preimage() -> Weight { // Proof Size summary in bytes: - // Measured: `106` + // Measured: `211` // Estimated: `3556` - // Minimum execution time: 15_011_000 picoseconds. - Weight::from_parts(16_524_000, 3556) + // Minimum execution time: 13_890_000 picoseconds. + Weight::from_parts(14_744_000, 3556) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -400,10 +402,10 @@ impl WeightInfo for () { /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn request_unnoted_preimage() -> Weight { // Proof Size summary in bytes: - // Measured: `4` + // Measured: `109` // Estimated: `3556` - // Minimum execution time: 14_649_000 picoseconds. - Weight::from_parts(15_439_000, 3556) + // Minimum execution time: 14_192_000 picoseconds. + Weight::from_parts(15_113_000, 3556) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -413,10 +415,10 @@ impl WeightInfo for () { /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn request_requested_preimage() -> Weight { // Proof Size summary in bytes: - // Measured: `68` + // Measured: `173` // Estimated: `3556` - // Minimum execution time: 10_914_000 picoseconds. - Weight::from_parts(11_137_000, 3556) + // Minimum execution time: 9_909_000 picoseconds. + Weight::from_parts(10_134_000, 3556) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -428,10 +430,10 @@ impl WeightInfo for () { /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) fn unrequest_preimage() -> Weight { // Proof Size summary in bytes: - // Measured: `106` + // Measured: `211` // Estimated: `3556` - // Minimum execution time: 22_512_000 picoseconds. - Weight::from_parts(24_376_000, 3556) + // Minimum execution time: 21_725_000 picoseconds. + Weight::from_parts(24_058_000, 3556) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -441,10 +443,10 @@ impl WeightInfo for () { /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn unrequest_unnoted_preimage() -> Weight { // Proof Size summary in bytes: - // Measured: `68` + // Measured: `173` // Estimated: `3556` - // Minimum execution time: 10_571_000 picoseconds. - Weight::from_parts(10_855_000, 3556) + // Minimum execution time: 9_854_000 picoseconds. + Weight::from_parts(10_175_000, 3556) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -454,10 +456,10 @@ impl WeightInfo for () { /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn unrequest_multi_referenced_preimage() -> Weight { // Proof Size summary in bytes: - // Measured: `68` + // Measured: `173` // Estimated: `3556` - // Minimum execution time: 10_312_000 picoseconds. - Weight::from_parts(10_653_000, 3556) + // Minimum execution time: 10_143_000 picoseconds. + Weight::from_parts(10_539_000, 3556) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -468,21 +470,21 @@ impl WeightInfo for () { /// Storage: `Parameters::Parameters` (r:2 w:0) /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1023 w:1023) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) /// Storage: `Preimage::RequestStatusFor` (r:0 w:1023) /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// The range of component `n` is `[1, 1024]`. fn ensure_updated(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0 + n * (227 ±0)` - // Estimated: `6012 + n * (2830 ±0)` - // Minimum execution time: 61_990_000 picoseconds. - Weight::from_parts(62_751_000, 6012) - // Standard Error: 44_079 - .saturating_add(Weight::from_parts(57_343_378, 0).saturating_mul(n.into())) + // Estimated: `6012 + n * (2668 ±0)` + // Minimum execution time: 59_384_000 picoseconds. + Weight::from_parts(60_000_000, 6012) + // Standard Error: 39_890 + .saturating_add(Weight::from_parts(56_317_686, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((3_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes((4_u64).saturating_mul(n.into()))) - .saturating_add(Weight::from_parts(0, 2830).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(0, 2668).saturating_mul(n.into())) } } diff --git a/substrate/frame/proxy/Cargo.toml b/substrate/frame/proxy/Cargo.toml index a36b2c1cb9c3..8897c66419c7 100644 --- a/substrate/frame/proxy/Cargo.toml +++ b/substrate/frame/proxy/Cargo.toml @@ -17,8 +17,8 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { features = ["max-encoded-len"], workspace = true } -frame = { workspace = true, features = ["experimental", "runtime"] } scale-info = { features = ["derive"], workspace = true } +frame = { workspace = true, features = ["experimental", "runtime"] } [dev-dependencies] pallet-balances = { workspace = true, default-features = true } diff --git a/substrate/frame/proxy/src/benchmarking.rs b/substrate/frame/proxy/src/benchmarking.rs index b72f53af8e72..eebb506bf374 100644 --- a/substrate/frame/proxy/src/benchmarking.rs +++ b/substrate/frame/proxy/src/benchmarking.rs @@ -22,9 +22,7 @@ use super::*; use crate::Pallet as Proxy; use alloc::{boxed::Box, vec}; -use frame::benchmarking::prelude::{ - account, benchmarks, impl_test_function, whitelisted_caller, BenchmarkError, RawOrigin, -}; +use frame::benchmarking::prelude::*; const SEED: u32 = 0; @@ -319,7 +317,7 @@ mod benchmarks { BlockNumberFor::::zero(), 0, )?; - let height = T::BlockNumberProvider::current_block_number(); + let height = frame_system::Pallet::::block_number(); let ext_index = frame_system::Pallet::::extrinsic_index().unwrap_or(0); let pure_account = Pallet::::pure_account(&caller, &T::ProxyType::default(), 0, None); diff --git a/substrate/frame/proxy/src/lib.rs b/substrate/frame/proxy/src/lib.rs index cc21db7469b2..cc8aeedcc5f9 100644 --- a/substrate/frame/proxy/src/lib.rs +++ b/substrate/frame/proxy/src/lib.rs @@ -47,9 +47,6 @@ type CallHashOf = <::CallHasher as Hash>::Output; type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; -pub type BlockNumberFor = - <::BlockNumberProvider as BlockNumberProvider>::BlockNumber; - type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; /// The parameters under which a particular account has a proxy relationship with some other @@ -166,9 +163,6 @@ pub mod pallet { /// into a pre-existing storage value. #[pallet::constant] type AnnouncementDepositFactor: Get>; - - /// Provider for the block number. Normally this is the `frame_system` pallet. - type BlockNumberProvider: BlockNumberProvider; } #[pallet::call] @@ -385,7 +379,7 @@ pub mod pallet { let announcement = Announcement { real: real.clone(), call_hash, - height: T::BlockNumberProvider::current_block_number(), + height: frame_system::Pallet::::block_number(), }; Announcements::::try_mutate(&who, |(ref mut pending, ref mut deposit)| { @@ -496,7 +490,7 @@ pub mod pallet { let def = Self::find_proxy(&real, &delegate, force_proxy_type)?; let call_hash = T::CallHasher::hash_of(&call); - let now = T::BlockNumberProvider::current_block_number(); + let now = frame_system::Pallet::::block_number(); Self::edit_announcements(&delegate, |ann| { ann.real != real || ann.call_hash != call_hash || @@ -632,7 +626,7 @@ impl Pallet { ) -> T::AccountId { let (height, ext_index) = maybe_when.unwrap_or_else(|| { ( - T::BlockNumberProvider::current_block_number(), + frame_system::Pallet::::block_number(), frame_system::Pallet::::extrinsic_index().unwrap_or_default(), ) }); diff --git a/substrate/frame/proxy/src/tests.rs b/substrate/frame/proxy/src/tests.rs index afc668188e6c..5baf9bb9e838 100644 --- a/substrate/frame/proxy/src/tests.rs +++ b/substrate/frame/proxy/src/tests.rs @@ -119,7 +119,6 @@ impl Config for Test { type MaxPending = ConstU32<2>; type AnnouncementDepositBase = ConstU64<1>; type AnnouncementDepositFactor = ConstU64<1>; - type BlockNumberProvider = frame_system::Pallet; } use super::{Call as ProxyCall, Event as ProxyEvent}; diff --git a/substrate/frame/proxy/src/weights.rs b/substrate/frame/proxy/src/weights.rs index 851c0ba98a82..eab2cb4b2683 100644 --- a/substrate/frame/proxy/src/weights.rs +++ b/substrate/frame/proxy/src/weights.rs @@ -411,4 +411,4 @@ impl WeightInfo for () { .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } -} \ No newline at end of file +} diff --git a/substrate/frame/ranked-collective/Cargo.toml b/substrate/frame/ranked-collective/Cargo.toml index 78a02bec8e97..eca59cf7fc22 100644 --- a/substrate/frame/ranked-collective/Cargo.toml +++ b/substrate/frame/ranked-collective/Cargo.toml @@ -17,16 +17,16 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { features = ["derive"], workspace = true } +log = { workspace = true } +scale-info = { features = ["derive"], workspace = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } -impl-trait-for-tuples = { workspace = true } -log = { workspace = true } -scale-info = { features = ["derive"], workspace = true } sp-arithmetic = { workspace = true } sp-core = { workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } +impl-trait-for-tuples = { workspace = true } [features] default = ["std"] diff --git a/substrate/frame/ranked-collective/src/benchmarking.rs b/substrate/frame/ranked-collective/src/benchmarking.rs index 978489fb8485..dc7f4aaca773 100644 --- a/substrate/frame/ranked-collective/src/benchmarking.rs +++ b/substrate/frame/ranked-collective/src/benchmarking.rs @@ -21,12 +21,11 @@ use super::*; #[allow(unused_imports)] use crate::Pallet as RankedCollective; use alloc::vec::Vec; -use frame_benchmarking::{ - v1::{account, BenchmarkError}, - v2::*, -}; -use frame_support::{assert_err, assert_ok, traits::NoOpPoll}; +use frame_benchmarking::v1::{ + account, benchmarks_instance_pallet, whitelisted_caller, BenchmarkError, +}; +use frame_support::{assert_ok, traits::UnfilteredDispatchable}; use frame_system::RawOrigin as SystemOrigin; const SEED: u32 = 0; @@ -57,273 +56,131 @@ fn make_member, I: 'static>(rank: Rank) -> T::AccountId { who } -#[instance_benchmarks( -where <>::Polls as frame_support::traits::Polling>>>::Index: From -)] -mod benchmarks { - use super::*; - - #[benchmark] - fn add_member() -> Result<(), BenchmarkError> { - // Generate a test account for the new member. +benchmarks_instance_pallet! { + add_member { let who = account::("member", 0, SEED); let who_lookup = T::Lookup::unlookup(who.clone()); - - // Attempt to get the successful origin for adding a member. let origin = T::AddOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; - - #[extrinsic_call] - _(origin as T::RuntimeOrigin, who_lookup); - - // Ensure the member count has increased (or is 1 for rank 0). + let call = Call::::add_member { who: who_lookup }; + }: { call.dispatch_bypass_filter(origin)? } + verify { assert_eq!(MemberCount::::get(0), 1); - - // Check that the correct event was emitted. assert_last_event::(Event::MemberAdded { who }.into()); - - Ok(()) } - #[benchmark] - fn remove_member(r: Linear<0, 10>) -> Result<(), BenchmarkError> { - // Convert `r` to a rank and create members. + remove_member { + let r in 0 .. 10; let rank = r as u16; + let first = make_member::(rank); let who = make_member::(rank); let who_lookup = T::Lookup::unlookup(who.clone()); let last = make_member::(rank); - - // Collect the index of the `last` member for each rank. - let last_index: Vec<_> = - (0..=rank).map(|r| IdToIndex::::get(r, &last).unwrap()).collect(); - - // Fetch the remove origin. + let last_index = (0..=rank).map(|r| IdToIndex::::get(r, &last).unwrap()).collect::>(); let origin = T::RemoveOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; - - #[extrinsic_call] - _(origin as T::RuntimeOrigin, who_lookup, rank); - + let call = Call::::remove_member { who: who_lookup, min_rank: rank }; + }: { call.dispatch_bypass_filter(origin)? } + verify { for r in 0..=rank { - assert_eq!(MemberCount::::get(r), 1); + assert_eq!(MemberCount::::get(r), 2); assert_ne!(last_index[r as usize], IdToIndex::::get(r, &last).unwrap()); } - - // Ensure the correct event was emitted for the member removal. assert_last_event::(Event::MemberRemoved { who, rank }.into()); - - Ok(()) } - #[benchmark] - fn promote_member(r: Linear<0, 10>) -> Result<(), BenchmarkError> { - // Convert `r` to a rank and create the member. + promote_member { + let r in 0 .. 10; let rank = r as u16; let who = make_member::(rank); let who_lookup = T::Lookup::unlookup(who.clone()); - - // Try to fetch the promotion origin. let origin = T::PromoteOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; - - #[extrinsic_call] - _(origin as T::RuntimeOrigin, who_lookup); - - // Ensure the member's rank has increased by 1. + let call = Call::::promote_member { who: who_lookup }; + }: { call.dispatch_bypass_filter(origin)? } + verify { assert_eq!(Members::::get(&who).unwrap().rank, rank + 1); - - // Ensure the correct event was emitted for the rank change. assert_last_event::(Event::RankChanged { who, rank: rank + 1 }.into()); - - Ok(()) } - #[benchmark] - fn demote_member(r: Linear<0, 10>) -> Result<(), BenchmarkError> { - // Convert `r` to a rank and create necessary members for the benchmark. + demote_member { + let r in 0 .. 10; let rank = r as u16; + let first = make_member::(rank); let who = make_member::(rank); let who_lookup = T::Lookup::unlookup(who.clone()); let last = make_member::(rank); - - // Get the last index for the member. let last_index = IdToIndex::::get(rank, &last).unwrap(); - - // Try to fetch the demotion origin. let origin = T::DemoteOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; - - #[extrinsic_call] - _(origin as T::RuntimeOrigin, who_lookup); - - // Ensure the member's rank has decreased by 1. + let call = Call::::demote_member { who: who_lookup }; + }: { call.dispatch_bypass_filter(origin)? } + verify { assert_eq!(Members::::get(&who).map(|x| x.rank), rank.checked_sub(1)); - - // Ensure the member count remains as expected. - assert_eq!(MemberCount::::get(rank), 1); - - // Ensure the index of the last member has changed. + assert_eq!(MemberCount::::get(rank), 2); assert_ne!(last_index, IdToIndex::::get(rank, &last).unwrap()); - - // Ensure the correct event was emitted depending on the member's rank. - assert_last_event::( - match rank { - 0 => Event::MemberRemoved { who, rank: 0 }, - r => Event::RankChanged { who, rank: r - 1 }, - } - .into(), - ); - - Ok(()) + assert_last_event::(match rank { + 0 => Event::MemberRemoved { who, rank: 0 }, + r => Event::RankChanged { who, rank: r - 1 }, + }.into()); } - #[benchmark] - fn vote() -> Result<(), BenchmarkError> { - // Get the first available class or set it to None if no class exists. - let class = T::Polls::classes().into_iter().next(); - - // Convert the class to a rank if it exists, otherwise use the default rank. - let rank = class.as_ref().map_or( - as frame_support::traits::RankedMembers>::Rank::default(), - |class| T::MinRankOfClass::convert(class.clone()), - ); + vote { + let class = T::Polls::classes().into_iter().next().unwrap(); + let rank = T::MinRankOfClass::convert(class.clone()); - // Create a caller based on the rank. let caller = make_member::(rank); + let caller_lookup = T::Lookup::unlookup(caller.clone()); - // Determine the poll to use: create an ongoing poll if class exists, or use an invalid - // poll. - let poll = if let Some(ref class) = class { - T::Polls::create_ongoing(class.clone()) - .expect("Poll creation should succeed for rank 0") - } else { - >::Index::MAX.into() - }; - - // Benchmark the vote logic for a positive vote (true). - #[block] - { - let vote_result = - Pallet::::vote(SystemOrigin::Signed(caller.clone()).into(), poll, true); - - // If the class exists, expect success; otherwise expect a "NotPolling" error. - if class.is_some() { - assert_ok!(vote_result); - } else { - assert_err!(vote_result, crate::Error::::NotPolling); - }; - } - - // Vote logic for a negative vote (false). - let vote_result = - Pallet::::vote(SystemOrigin::Signed(caller.clone()).into(), poll, false); - - // Check the result of the negative vote. - if class.is_some() { - assert_ok!(vote_result); - } else { - assert_err!(vote_result, crate::Error::::NotPolling); - }; - - // If the class exists, verify the vote event and tally. - if let Some(_) = class { - let tally = Tally::from_parts(0, 0, 1); - let vote_event = Event::Voted { who: caller, poll, vote: VoteRecord::Nay(1), tally }; - assert_last_event::(vote_event.into()); - } + let poll = T::Polls::create_ongoing(class).expect("Must always be able to create a poll for rank 0"); - Ok(()) + // Vote once. + assert_ok!(Pallet::::vote(SystemOrigin::Signed(caller.clone()).into(), poll, true)); + }: _(SystemOrigin::Signed(caller.clone()), poll, false) + verify { + let tally = Tally::from_parts(0, 0, 1); + let ev = Event::Voted { who: caller, poll, vote: VoteRecord::Nay(1), tally }; + assert_last_event::(ev.into()); } - #[benchmark] - fn cleanup_poll(n: Linear<0, 100>) -> Result<(), BenchmarkError> { - let alice: T::AccountId = whitelisted_caller(); - let origin = SystemOrigin::Signed(alice.clone()); - - // Try to retrieve the first class if it exists. - let class = T::Polls::classes().into_iter().next(); - - // Convert the class to a rank, or use a default rank if no class exists. - let rank = class.as_ref().map_or( - as frame_support::traits::RankedMembers>::Rank::default(), - |class| T::MinRankOfClass::convert(class.clone()), - ); + cleanup_poll { + let n in 0 .. 100; - // Determine the poll to use: create an ongoing poll if class exists, or use an invalid - // poll. - let poll = if let Some(ref class) = class { - T::Polls::create_ongoing(class.clone()) - .expect("Poll creation should succeed for rank 0") - } else { - >::Index::MAX.into() - }; + // Create a poll + let class = T::Polls::classes().into_iter().next().unwrap(); + let rank = T::MinRankOfClass::convert(class.clone()); + let poll = T::Polls::create_ongoing(class).expect("Must always be able to create a poll"); - // Simulate voting by `n` members. - for _ in 0..n { - let voter = make_member::(rank); - let result = Pallet::::vote(SystemOrigin::Signed(voter).into(), poll, true); - - // Check voting results based on class existence. - if class.is_some() { - assert_ok!(result); - } else { - assert_err!(result, crate::Error::::NotPolling); - } - } - - // End the poll if the class exists. - if class.is_some() { - T::Polls::end_ongoing(poll, false) - .map_err(|_| BenchmarkError::Stop("Failed to end poll"))?; + // Vote in the poll by each of `n` members + for i in 0..n { + let who = make_member::(rank); + assert_ok!(Pallet::::vote(SystemOrigin::Signed(who).into(), poll, true)); } - // Verify the number of votes cast. - let expected_votes = if class.is_some() { n as usize } else { 0 }; - assert_eq!(Voting::::iter_prefix(poll).count(), expected_votes); + // End the poll. + T::Polls::end_ongoing(poll, false).expect("Must always be able to end a poll"); - // Benchmark the cleanup function. - #[extrinsic_call] - _(origin, poll, n); - - // Ensure all votes are cleaned up after the extrinsic call. + assert_eq!(Voting::::iter_prefix(poll).count(), n as usize); + }: _(SystemOrigin::Signed(whitelisted_caller()), poll, n) + verify { assert_eq!(Voting::::iter().count(), 0); - - Ok(()) } - #[benchmark] - fn exchange_member() -> Result<(), BenchmarkError> { - // Create an existing member. + exchange_member { let who = make_member::(1); T::BenchmarkSetup::ensure_member(&who); let who_lookup = T::Lookup::unlookup(who.clone()); - - // Create a new account for the new member. let new_who = account::("new-member", 0, SEED); let new_who_lookup = T::Lookup::unlookup(new_who.clone()); - - // Attempt to get the successful origin for exchanging a member. let origin = T::ExchangeOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; - - #[extrinsic_call] - _(origin as T::RuntimeOrigin, who_lookup, new_who_lookup); - - // Check that the new member was successfully exchanged and holds the correct rank. + let call = Call::::exchange_member { who: who_lookup, new_who: new_who_lookup }; + }: { call.dispatch_bypass_filter(origin)? } + verify { assert_eq!(Members::::get(&new_who).unwrap().rank, 1); - - // Ensure the old member no longer exists. assert_eq!(Members::::get(&who), None); - - // Ensure the correct event was emitted. assert_has_event::(Event::MemberExchanged { who, new_who }.into()); - - Ok(()) } - impl_benchmark_test_suite!( - RankedCollective, - crate::tests::ExtBuilder::default().build(), - crate::tests::Test - ); + impl_benchmark_test_suite!(RankedCollective, crate::tests::ExtBuilder::default().build(), crate::tests::Test); } diff --git a/substrate/frame/ranked-collective/src/weights.rs b/substrate/frame/ranked-collective/src/weights.rs index 09215c1ec096..e728635f2e72 100644 --- a/substrate/frame/ranked-collective/src/weights.rs +++ b/substrate/frame/ranked-collective/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_ranked_collective` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-04-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -75,8 +75,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3507` - // Minimum execution time: 16_363_000 picoseconds. - Weight::from_parts(16_792_000, 3507) + // Minimum execution time: 15_440_000 picoseconds. + Weight::from_parts(15_990_000, 3507) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -93,10 +93,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `616 + r * (281 ±0)` // Estimated: `3519 + r * (2529 ±0)` - // Minimum execution time: 37_472_000 picoseconds. - Weight::from_parts(38_888_667, 3519) - // Standard Error: 36_527 - .saturating_add(Weight::from_parts(18_271_687, 0).saturating_mul(r.into())) + // Minimum execution time: 30_171_000 picoseconds. + Weight::from_parts(33_395_037, 3519) + // Standard Error: 21_741 + .saturating_add(Weight::from_parts(16_589_950, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(6_u64)) @@ -116,10 +116,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `314 + r * (17 ±0)` // Estimated: `3507` - // Minimum execution time: 20_069_000 picoseconds. - Weight::from_parts(21_231_820, 3507) - // Standard Error: 5_686 - .saturating_add(Weight::from_parts(415_623, 0).saturating_mul(r.into())) + // Minimum execution time: 18_597_000 picoseconds. + Weight::from_parts(19_774_947, 3507) + // Standard Error: 5_735 + .saturating_add(Weight::from_parts(339_013, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -136,10 +136,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `632 + r * (72 ±0)` // Estimated: `3519` - // Minimum execution time: 37_085_000 picoseconds. - Weight::from_parts(40_627_931, 3519) - // Standard Error: 23_398 - .saturating_add(Weight::from_parts(847_496, 0).saturating_mul(r.into())) + // Minimum execution time: 29_670_000 picoseconds. + Weight::from_parts(33_022_564, 3519) + // Standard Error: 28_521 + .saturating_add(Weight::from_parts(817_563, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -157,8 +157,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `628` // Estimated: `219984` - // Minimum execution time: 49_474_000 picoseconds. - Weight::from_parts(50_506_000, 219984) + // Minimum execution time: 42_072_000 picoseconds. + Weight::from_parts(43_360_000, 219984) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -173,10 +173,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `462 + n * (50 ±0)` // Estimated: `3795 + n * (2540 ±0)` - // Minimum execution time: 20_009_000 picoseconds. - Weight::from_parts(23_414_747, 3795) - // Standard Error: 2_751 - .saturating_add(Weight::from_parts(1_314_498, 0).saturating_mul(n.into())) + // Minimum execution time: 14_338_000 picoseconds. + Weight::from_parts(18_144_424, 3795) + // Standard Error: 2_482 + .saturating_add(Weight::from_parts(1_200_576, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) @@ -200,8 +200,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `625` // Estimated: `19894` - // Minimum execution time: 79_257_000 picoseconds. - Weight::from_parts(81_293_000, 19894) + // Minimum execution time: 73_317_000 picoseconds. + Weight::from_parts(75_103_000, 19894) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(14_u64)) } @@ -221,8 +221,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3507` - // Minimum execution time: 16_363_000 picoseconds. - Weight::from_parts(16_792_000, 3507) + // Minimum execution time: 15_440_000 picoseconds. + Weight::from_parts(15_990_000, 3507) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -239,10 +239,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `616 + r * (281 ±0)` // Estimated: `3519 + r * (2529 ±0)` - // Minimum execution time: 37_472_000 picoseconds. - Weight::from_parts(38_888_667, 3519) - // Standard Error: 36_527 - .saturating_add(Weight::from_parts(18_271_687, 0).saturating_mul(r.into())) + // Minimum execution time: 30_171_000 picoseconds. + Weight::from_parts(33_395_037, 3519) + // Standard Error: 21_741 + .saturating_add(Weight::from_parts(16_589_950, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().reads((3_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(6_u64)) @@ -262,10 +262,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `314 + r * (17 ±0)` // Estimated: `3507` - // Minimum execution time: 20_069_000 picoseconds. - Weight::from_parts(21_231_820, 3507) - // Standard Error: 5_686 - .saturating_add(Weight::from_parts(415_623, 0).saturating_mul(r.into())) + // Minimum execution time: 18_597_000 picoseconds. + Weight::from_parts(19_774_947, 3507) + // Standard Error: 5_735 + .saturating_add(Weight::from_parts(339_013, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -282,10 +282,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `632 + r * (72 ±0)` // Estimated: `3519` - // Minimum execution time: 37_085_000 picoseconds. - Weight::from_parts(40_627_931, 3519) - // Standard Error: 23_398 - .saturating_add(Weight::from_parts(847_496, 0).saturating_mul(r.into())) + // Minimum execution time: 29_670_000 picoseconds. + Weight::from_parts(33_022_564, 3519) + // Standard Error: 28_521 + .saturating_add(Weight::from_parts(817_563, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -303,8 +303,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `628` // Estimated: `219984` - // Minimum execution time: 49_474_000 picoseconds. - Weight::from_parts(50_506_000, 219984) + // Minimum execution time: 42_072_000 picoseconds. + Weight::from_parts(43_360_000, 219984) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -319,10 +319,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `462 + n * (50 ±0)` // Estimated: `3795 + n * (2540 ±0)` - // Minimum execution time: 20_009_000 picoseconds. - Weight::from_parts(23_414_747, 3795) - // Standard Error: 2_751 - .saturating_add(Weight::from_parts(1_314_498, 0).saturating_mul(n.into())) + // Minimum execution time: 14_338_000 picoseconds. + Weight::from_parts(18_144_424, 3795) + // Standard Error: 2_482 + .saturating_add(Weight::from_parts(1_200_576, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(n.into()))) @@ -346,8 +346,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `625` // Estimated: `19894` - // Minimum execution time: 79_257_000 picoseconds. - Weight::from_parts(81_293_000, 19894) + // Minimum execution time: 73_317_000 picoseconds. + Weight::from_parts(75_103_000, 19894) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(14_u64)) } diff --git a/substrate/frame/recovery/Cargo.toml b/substrate/frame/recovery/Cargo.toml index 4f3a734d9868..44335e8f575c 100644 --- a/substrate/frame/recovery/Cargo.toml +++ b/substrate/frame/recovery/Cargo.toml @@ -17,10 +17,10 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } -scale-info = { features = ["derive"], workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } diff --git a/substrate/frame/recovery/src/lib.rs b/substrate/frame/recovery/src/lib.rs index d8f3c33fbea9..f8622880538e 100644 --- a/substrate/frame/recovery/src/lib.rs +++ b/substrate/frame/recovery/src/lib.rs @@ -156,10 +156,7 @@ use alloc::{boxed::Box, vec::Vec}; use codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; use sp_runtime::{ - traits::{ - BlockNumberProvider, CheckedAdd, CheckedMul, Dispatchable, SaturatedConversion, - StaticLookup, - }, + traits::{CheckedAdd, CheckedMul, Dispatchable, SaturatedConversion, StaticLookup}, RuntimeDebug, }; @@ -181,12 +178,11 @@ mod mock; mod tests; pub mod weights; -type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; -type BlockNumberFromProviderOf = - <::BlockNumberProvider as BlockNumberProvider>::BlockNumber; + type FriendsOf = BoundedVec<::AccountId, ::MaxFriends>; +type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; /// An active recovery process. #[derive(Clone, Eq, PartialEq, Encode, Decode, Default, RuntimeDebug, TypeInfo, MaxEncodedLen)] @@ -194,7 +190,7 @@ pub struct ActiveRecovery { /// The block number when the recovery process started. created: BlockNumber, /// The amount held in reserve of the `depositor`, - /// to be returned once this recovery process is closed. + /// To be returned once this recovery process is closed. deposit: Balance, /// The friends which have vouched so far. Always sorted. friends: Friends, @@ -240,9 +236,6 @@ pub mod pallet { + GetDispatchInfo + From>; - /// Provider for the block number. Normally this is the `frame_system` pallet. - type BlockNumberProvider: BlockNumberProvider; - /// The currency mechanism. type Currency: ReservableCurrency; @@ -346,7 +339,7 @@ pub mod pallet { _, Twox64Concat, T::AccountId, - RecoveryConfig, BalanceOf, FriendsOf>, + RecoveryConfig, BalanceOf, FriendsOf>, >; /// Active recovery attempts. @@ -361,7 +354,7 @@ pub mod pallet { T::AccountId, Twox64Concat, T::AccountId, - ActiveRecovery, BalanceOf, FriendsOf>, + ActiveRecovery, BalanceOf, FriendsOf>, >; /// The list of allowed proxy accounts. @@ -452,7 +445,7 @@ pub mod pallet { origin: OriginFor, friends: Vec, threshold: u16, - delay_period: BlockNumberFromProviderOf, + delay_period: BlockNumberFor, ) -> DispatchResult { let who = ensure_signed(origin)?; // Check account is not already set up for recovery @@ -518,7 +511,7 @@ pub mod pallet { T::Currency::reserve(&who, recovery_deposit)?; // Create an active recovery status let recovery_status = ActiveRecovery { - created: T::BlockNumberProvider::current_block_number(), + created: >::block_number(), deposit: recovery_deposit, friends: Default::default(), }; @@ -603,7 +596,7 @@ pub mod pallet { Self::active_recovery(&account, &who).ok_or(Error::::NotStarted)?; ensure!(!Proxy::::contains_key(&who), Error::::AlreadyProxy); // Make sure the delay period has passed - let current_block_number = T::BlockNumberProvider::current_block_number(); + let current_block_number = >::block_number(); let recoverable_block_number = active_recovery .created .checked_add(&recovery_config.delay_period) diff --git a/substrate/frame/recovery/src/mock.rs b/substrate/frame/recovery/src/mock.rs index 3930db82d6c7..8e30cbe997e1 100644 --- a/substrate/frame/recovery/src/mock.rs +++ b/substrate/frame/recovery/src/mock.rs @@ -66,7 +66,6 @@ impl Config for Test { type RuntimeEvent = RuntimeEvent; type WeightInfo = (); type RuntimeCall = RuntimeCall; - type BlockNumberProvider = System; type Currency = Balances; type ConfigDepositBase = ConfigDepositBase; type FriendDepositFactor = FriendDepositFactor; diff --git a/substrate/frame/recovery/src/weights.rs b/substrate/frame/recovery/src/weights.rs index 38b085f0a293..e38ad0461afd 100644 --- a/substrate/frame/recovery/src/weights.rs +++ b/substrate/frame/recovery/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_recovery` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-04-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -73,10 +73,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `TxPause::PausedCalls` (`max_values`: None, `max_size`: Some(532), added: 3007, mode: `MaxEncodedLen`) fn as_recovered() -> Weight { // Proof Size summary in bytes: - // Measured: `530` + // Measured: `497` // Estimated: `3997` - // Minimum execution time: 21_063_000 picoseconds. - Weight::from_parts(21_784_000, 3997) + // Minimum execution time: 15_318_000 picoseconds. + Weight::from_parts(15_767_000, 3997) .saturating_add(T::DbWeight::get().reads(3_u64)) } /// Storage: `Recovery::Proxy` (r:0 w:1) @@ -85,8 +85,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_653_000 picoseconds. - Weight::from_parts(7_009_000, 0) + // Minimum execution time: 7_153_000 picoseconds. + Weight::from_parts(7_578_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Recovery::Recoverable` (r:1 w:1) @@ -94,12 +94,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[1, 9]`. fn create_recovery(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `279` + // Measured: `246` // Estimated: `3816` - // Minimum execution time: 27_992_000 picoseconds. - Weight::from_parts(29_149_096, 3816) - // Standard Error: 5_733 - .saturating_add(Weight::from_parts(87_755, 0).saturating_mul(n.into())) + // Minimum execution time: 23_303_000 picoseconds. + Weight::from_parts(24_725_158, 3816) + // Standard Error: 5_723 + .saturating_add(Weight::from_parts(13_638, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -109,10 +109,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Recovery::ActiveRecoveries` (`max_values`: None, `max_size`: Some(389), added: 2864, mode: `MaxEncodedLen`) fn initiate_recovery() -> Weight { // Proof Size summary in bytes: - // Measured: `376` + // Measured: `343` // Estimated: `3854` - // Minimum execution time: 32_675_000 picoseconds. - Weight::from_parts(34_217_000, 3854) + // Minimum execution time: 26_914_000 picoseconds. + Weight::from_parts(28_041_000, 3854) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -123,12 +123,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[1, 9]`. fn vouch_recovery(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `464 + n * (64 ±0)` + // Measured: `431 + n * (64 ±0)` // Estimated: `3854` - // Minimum execution time: 23_557_000 picoseconds. - Weight::from_parts(24_517_150, 3854) - // Standard Error: 5_550 - .saturating_add(Weight::from_parts(156_378, 0).saturating_mul(n.into())) + // Minimum execution time: 17_695_000 picoseconds. + Weight::from_parts(18_591_642, 3854) + // Standard Error: 5_582 + .saturating_add(Weight::from_parts(188_668, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -141,12 +141,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[1, 9]`. fn claim_recovery(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `496 + n * (64 ±0)` + // Measured: `463 + n * (64 ±0)` // Estimated: `3854` - // Minimum execution time: 28_261_000 picoseconds. - Weight::from_parts(29_298_729, 3854) - // Standard Error: 5_392 - .saturating_add(Weight::from_parts(162_096, 0).saturating_mul(n.into())) + // Minimum execution time: 22_580_000 picoseconds. + Weight::from_parts(23_526_020, 3854) + // Standard Error: 6_604 + .saturating_add(Weight::from_parts(134_340, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -157,12 +157,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[1, 9]`. fn close_recovery(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `617 + n * (32 ±0)` + // Measured: `584 + n * (32 ±0)` // Estimated: `3854` - // Minimum execution time: 38_953_000 picoseconds. - Weight::from_parts(40_675_824, 3854) - // Standard Error: 6_163 - .saturating_add(Weight::from_parts(144_246, 0).saturating_mul(n.into())) + // Minimum execution time: 32_017_000 picoseconds. + Weight::from_parts(33_401_086, 3854) + // Standard Error: 6_498 + .saturating_add(Weight::from_parts(95_507, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -173,12 +173,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[1, 9]`. fn remove_recovery(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `374 + n * (32 ±0)` + // Measured: `341 + n * (32 ±0)` // Estimated: `3854` - // Minimum execution time: 32_735_000 picoseconds. - Weight::from_parts(33_830_787, 3854) - // Standard Error: 7_758 - .saturating_add(Weight::from_parts(194_601, 0).saturating_mul(n.into())) + // Minimum execution time: 28_641_000 picoseconds. + Weight::from_parts(30_230_511, 3854) + // Standard Error: 7_058 + .saturating_add(Weight::from_parts(61_004, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -186,10 +186,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Recovery::Proxy` (`max_values`: None, `max_size`: Some(80), added: 2555, mode: `MaxEncodedLen`) fn cancel_recovered() -> Weight { // Proof Size summary in bytes: - // Measured: `385` + // Measured: `352` // Estimated: `3545` - // Minimum execution time: 17_356_000 picoseconds. - Weight::from_parts(18_101_000, 3545) + // Minimum execution time: 11_767_000 picoseconds. + Weight::from_parts(12_275_000, 3545) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -205,10 +205,10 @@ impl WeightInfo for () { /// Proof: `TxPause::PausedCalls` (`max_values`: None, `max_size`: Some(532), added: 3007, mode: `MaxEncodedLen`) fn as_recovered() -> Weight { // Proof Size summary in bytes: - // Measured: `530` + // Measured: `497` // Estimated: `3997` - // Minimum execution time: 21_063_000 picoseconds. - Weight::from_parts(21_784_000, 3997) + // Minimum execution time: 15_318_000 picoseconds. + Weight::from_parts(15_767_000, 3997) .saturating_add(RocksDbWeight::get().reads(3_u64)) } /// Storage: `Recovery::Proxy` (r:0 w:1) @@ -217,8 +217,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_653_000 picoseconds. - Weight::from_parts(7_009_000, 0) + // Minimum execution time: 7_153_000 picoseconds. + Weight::from_parts(7_578_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Recovery::Recoverable` (r:1 w:1) @@ -226,12 +226,12 @@ impl WeightInfo for () { /// The range of component `n` is `[1, 9]`. fn create_recovery(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `279` + // Measured: `246` // Estimated: `3816` - // Minimum execution time: 27_992_000 picoseconds. - Weight::from_parts(29_149_096, 3816) - // Standard Error: 5_733 - .saturating_add(Weight::from_parts(87_755, 0).saturating_mul(n.into())) + // Minimum execution time: 23_303_000 picoseconds. + Weight::from_parts(24_725_158, 3816) + // Standard Error: 5_723 + .saturating_add(Weight::from_parts(13_638, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -241,10 +241,10 @@ impl WeightInfo for () { /// Proof: `Recovery::ActiveRecoveries` (`max_values`: None, `max_size`: Some(389), added: 2864, mode: `MaxEncodedLen`) fn initiate_recovery() -> Weight { // Proof Size summary in bytes: - // Measured: `376` + // Measured: `343` // Estimated: `3854` - // Minimum execution time: 32_675_000 picoseconds. - Weight::from_parts(34_217_000, 3854) + // Minimum execution time: 26_914_000 picoseconds. + Weight::from_parts(28_041_000, 3854) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -255,12 +255,12 @@ impl WeightInfo for () { /// The range of component `n` is `[1, 9]`. fn vouch_recovery(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `464 + n * (64 ±0)` + // Measured: `431 + n * (64 ±0)` // Estimated: `3854` - // Minimum execution time: 23_557_000 picoseconds. - Weight::from_parts(24_517_150, 3854) - // Standard Error: 5_550 - .saturating_add(Weight::from_parts(156_378, 0).saturating_mul(n.into())) + // Minimum execution time: 17_695_000 picoseconds. + Weight::from_parts(18_591_642, 3854) + // Standard Error: 5_582 + .saturating_add(Weight::from_parts(188_668, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -273,12 +273,12 @@ impl WeightInfo for () { /// The range of component `n` is `[1, 9]`. fn claim_recovery(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `496 + n * (64 ±0)` + // Measured: `463 + n * (64 ±0)` // Estimated: `3854` - // Minimum execution time: 28_261_000 picoseconds. - Weight::from_parts(29_298_729, 3854) - // Standard Error: 5_392 - .saturating_add(Weight::from_parts(162_096, 0).saturating_mul(n.into())) + // Minimum execution time: 22_580_000 picoseconds. + Weight::from_parts(23_526_020, 3854) + // Standard Error: 6_604 + .saturating_add(Weight::from_parts(134_340, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -289,12 +289,12 @@ impl WeightInfo for () { /// The range of component `n` is `[1, 9]`. fn close_recovery(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `617 + n * (32 ±0)` + // Measured: `584 + n * (32 ±0)` // Estimated: `3854` - // Minimum execution time: 38_953_000 picoseconds. - Weight::from_parts(40_675_824, 3854) - // Standard Error: 6_163 - .saturating_add(Weight::from_parts(144_246, 0).saturating_mul(n.into())) + // Minimum execution time: 32_017_000 picoseconds. + Weight::from_parts(33_401_086, 3854) + // Standard Error: 6_498 + .saturating_add(Weight::from_parts(95_507, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -305,12 +305,12 @@ impl WeightInfo for () { /// The range of component `n` is `[1, 9]`. fn remove_recovery(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `374 + n * (32 ±0)` + // Measured: `341 + n * (32 ±0)` // Estimated: `3854` - // Minimum execution time: 32_735_000 picoseconds. - Weight::from_parts(33_830_787, 3854) - // Standard Error: 7_758 - .saturating_add(Weight::from_parts(194_601, 0).saturating_mul(n.into())) + // Minimum execution time: 28_641_000 picoseconds. + Weight::from_parts(30_230_511, 3854) + // Standard Error: 7_058 + .saturating_add(Weight::from_parts(61_004, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -318,10 +318,10 @@ impl WeightInfo for () { /// Proof: `Recovery::Proxy` (`max_values`: None, `max_size`: Some(80), added: 2555, mode: `MaxEncodedLen`) fn cancel_recovered() -> Weight { // Proof Size summary in bytes: - // Measured: `385` + // Measured: `352` // Estimated: `3545` - // Minimum execution time: 17_356_000 picoseconds. - Weight::from_parts(18_101_000, 3545) + // Minimum execution time: 11_767_000 picoseconds. + Weight::from_parts(12_275_000, 3545) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/substrate/frame/referenda/Cargo.toml b/substrate/frame/referenda/Cargo.toml index 0f35dc74382e..32dba3436595 100644 --- a/substrate/frame/referenda/Cargo.toml +++ b/substrate/frame/referenda/Cargo.toml @@ -20,15 +20,15 @@ assert_matches = { optional = true, workspace = true } codec = { features = [ "derive", ], workspace = true } -frame-benchmarking = { optional = true, workspace = true } -frame-support = { workspace = true } -frame-system = { workspace = true } -log = { workspace = true } scale-info = { features = ["derive"], workspace = true } serde = { features = ["derive"], optional = true, workspace = true, default-features = true } sp-arithmetic = { workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } +log = { workspace = true } [dev-dependencies] assert_matches = { workspace = true } @@ -57,6 +57,7 @@ std = [ ] runtime-benchmarks = [ "assert_matches", + "frame-benchmarking", "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", diff --git a/substrate/frame/referenda/src/weights.rs b/substrate/frame/referenda/src/weights.rs index 7c94b2b1799f..b34758ee4667 100644 --- a/substrate/frame/referenda/src/weights.rs +++ b/substrate/frame/referenda/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_referenda` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-04-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -96,8 +96,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `286` // Estimated: `110487` - // Minimum execution time: 38_152_000 picoseconds. - Weight::from_parts(39_632_000, 110487) + // Minimum execution time: 33_162_000 picoseconds. + Weight::from_parts(34_217_000, 110487) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -111,8 +111,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `539` // Estimated: `219984` - // Minimum execution time: 52_369_000 picoseconds. - Weight::from_parts(55_689_000, 219984) + // Minimum execution time: 45_276_000 picoseconds. + Weight::from_parts(46_903_000, 219984) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -130,8 +130,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `3326` // Estimated: `110487` - // Minimum execution time: 68_807_000 picoseconds. - Weight::from_parts(71_917_000, 110487) + // Minimum execution time: 63_832_000 picoseconds. + Weight::from_parts(65_616_000, 110487) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -149,8 +149,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `3346` // Estimated: `110487` - // Minimum execution time: 68_971_000 picoseconds. - Weight::from_parts(71_317_000, 110487) + // Minimum execution time: 63_726_000 picoseconds. + Weight::from_parts(64_909_000, 110487) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -166,8 +166,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `539` // Estimated: `219984` - // Minimum execution time: 59_447_000 picoseconds. - Weight::from_parts(61_121_000, 219984) + // Minimum execution time: 53_001_000 picoseconds. + Weight::from_parts(54_489_000, 219984) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -183,8 +183,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `539` // Estimated: `219984` - // Minimum execution time: 58_243_000 picoseconds. - Weight::from_parts(59_671_000, 219984) + // Minimum execution time: 51_021_000 picoseconds. + Weight::from_parts(53_006_000, 219984) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -194,8 +194,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `417` // Estimated: `3831` - // Minimum execution time: 31_621_000 picoseconds. - Weight::from_parts(32_628_000, 3831) + // Minimum execution time: 26_572_000 picoseconds. + Weight::from_parts(27_534_000, 3831) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -205,8 +205,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `407` // Estimated: `3831` - // Minimum execution time: 32_483_000 picoseconds. - Weight::from_parts(33_427_000, 3831) + // Minimum execution time: 26_897_000 picoseconds. + Weight::from_parts(27_883_000, 3831) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -220,8 +220,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `447` // Estimated: `219984` - // Minimum execution time: 36_283_000 picoseconds. - Weight::from_parts(37_748_000, 219984) + // Minimum execution time: 31_767_000 picoseconds. + Weight::from_parts(33_045_000, 219984) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -237,8 +237,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `688` // Estimated: `219984` - // Minimum execution time: 75_460_000 picoseconds. - Weight::from_parts(77_956_000, 219984) + // Minimum execution time: 67_798_000 picoseconds. + Weight::from_parts(70_044_000, 219984) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -250,8 +250,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `240` // Estimated: `5477` - // Minimum execution time: 15_139_000 picoseconds. - Weight::from_parts(15_651_000, 5477) + // Minimum execution time: 10_056_000 picoseconds. + Weight::from_parts(10_460_000, 5477) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -265,8 +265,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `3216` // Estimated: `110487` - // Minimum execution time: 48_590_000 picoseconds. - Weight::from_parts(50_207_000, 110487) + // Minimum execution time: 44_293_000 picoseconds. + Weight::from_parts(45_784_000, 110487) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -280,8 +280,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `3216` // Estimated: `110487` - // Minimum execution time: 48_555_000 picoseconds. - Weight::from_parts(49_956_000, 110487) + // Minimum execution time: 45_642_000 picoseconds. + Weight::from_parts(47_252_000, 110487) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -293,8 +293,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `3077` // Estimated: `5477` - // Minimum execution time: 28_326_000 picoseconds. - Weight::from_parts(29_735_000, 5477) + // Minimum execution time: 22_096_000 picoseconds. + Weight::from_parts(22_496_000, 5477) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -306,8 +306,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `3077` // Estimated: `5477` - // Minimum execution time: 28_209_000 picoseconds. - Weight::from_parts(29_375_000, 5477) + // Minimum execution time: 21_931_000 picoseconds. + Weight::from_parts(22_312_000, 5477) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -321,8 +321,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `3081` // Estimated: `5477` - // Minimum execution time: 33_973_000 picoseconds. - Weight::from_parts(35_732_000, 5477) + // Minimum execution time: 28_890_000 picoseconds. + Weight::from_parts(29_679_000, 5477) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -336,8 +336,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `3101` // Estimated: `5477` - // Minimum execution time: 34_112_000 picoseconds. - Weight::from_parts(35_748_000, 5477) + // Minimum execution time: 28_875_000 picoseconds. + Weight::from_parts(29_492_000, 5477) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -349,8 +349,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `399` // Estimated: `110487` - // Minimum execution time: 26_135_000 picoseconds. - Weight::from_parts(27_080_000, 110487) + // Minimum execution time: 19_787_000 picoseconds. + Weight::from_parts(20_493_000, 110487) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -362,8 +362,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `447` // Estimated: `110487` - // Minimum execution time: 26_494_000 picoseconds. - Weight::from_parts(27_290_000, 110487) + // Minimum execution time: 19_987_000 picoseconds. + Weight::from_parts(20_860_000, 110487) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -373,8 +373,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `344` // Estimated: `3831` - // Minimum execution time: 15_294_000 picoseconds. - Weight::from_parts(15_761_000, 3831) + // Minimum execution time: 13_416_000 picoseconds. + Weight::from_parts(13_857_000, 3831) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -388,8 +388,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `447` // Estimated: `110487` - // Minimum execution time: 32_360_000 picoseconds. - Weight::from_parts(33_747_000, 110487) + // Minimum execution time: 27_199_000 picoseconds. + Weight::from_parts(28_562_000, 110487) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -403,8 +403,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `447` // Estimated: `110487` - // Minimum execution time: 34_133_000 picoseconds. - Weight::from_parts(35_784_000, 110487) + // Minimum execution time: 29_205_000 picoseconds. + Weight::from_parts(30_407_000, 110487) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -416,8 +416,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `500` // Estimated: `110487` - // Minimum execution time: 30_009_000 picoseconds. - Weight::from_parts(30_985_000, 110487) + // Minimum execution time: 24_136_000 picoseconds. + Weight::from_parts(24_868_000, 110487) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -429,8 +429,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `483` // Estimated: `110487` - // Minimum execution time: 29_439_000 picoseconds. - Weight::from_parts(30_386_000, 110487) + // Minimum execution time: 23_860_000 picoseconds. + Weight::from_parts(24_556_000, 110487) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -442,8 +442,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `500` // Estimated: `110487` - // Minimum execution time: 29_293_000 picoseconds. - Weight::from_parts(30_577_000, 110487) + // Minimum execution time: 23_409_000 picoseconds. + Weight::from_parts(24_354_000, 110487) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -455,8 +455,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `504` // Estimated: `110487` - // Minimum execution time: 27_418_000 picoseconds. - Weight::from_parts(28_718_000, 110487) + // Minimum execution time: 21_947_000 picoseconds. + Weight::from_parts(22_485_000, 110487) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -470,8 +470,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `504` // Estimated: `219984` - // Minimum execution time: 40_020_000 picoseconds. - Weight::from_parts(40_861_000, 219984) + // Minimum execution time: 34_643_000 picoseconds. + Weight::from_parts(36_193_000, 219984) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -483,8 +483,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `500` // Estimated: `110487` - // Minimum execution time: 29_843_000 picoseconds. - Weight::from_parts(30_764_000, 110487) + // Minimum execution time: 24_097_000 picoseconds. + Weight::from_parts(24_881_000, 110487) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -498,10 +498,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Referenda::MetadataOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) fn set_some_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `450` + // Measured: `555` // Estimated: `3831` - // Minimum execution time: 24_642_000 picoseconds. - Weight::from_parts(25_498_000, 3831) + // Minimum execution time: 19_947_000 picoseconds. + Weight::from_parts(20_396_000, 3831) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -513,8 +513,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `421` // Estimated: `3831` - // Minimum execution time: 20_867_000 picoseconds. - Weight::from_parts(21_803_000, 3831) + // Minimum execution time: 15_516_000 picoseconds. + Weight::from_parts(16_094_000, 3831) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -532,8 +532,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `286` // Estimated: `110487` - // Minimum execution time: 38_152_000 picoseconds. - Weight::from_parts(39_632_000, 110487) + // Minimum execution time: 33_162_000 picoseconds. + Weight::from_parts(34_217_000, 110487) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -547,8 +547,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `539` // Estimated: `219984` - // Minimum execution time: 52_369_000 picoseconds. - Weight::from_parts(55_689_000, 219984) + // Minimum execution time: 45_276_000 picoseconds. + Weight::from_parts(46_903_000, 219984) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -566,8 +566,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `3326` // Estimated: `110487` - // Minimum execution time: 68_807_000 picoseconds. - Weight::from_parts(71_917_000, 110487) + // Minimum execution time: 63_832_000 picoseconds. + Weight::from_parts(65_616_000, 110487) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -585,8 +585,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `3346` // Estimated: `110487` - // Minimum execution time: 68_971_000 picoseconds. - Weight::from_parts(71_317_000, 110487) + // Minimum execution time: 63_726_000 picoseconds. + Weight::from_parts(64_909_000, 110487) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -602,8 +602,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `539` // Estimated: `219984` - // Minimum execution time: 59_447_000 picoseconds. - Weight::from_parts(61_121_000, 219984) + // Minimum execution time: 53_001_000 picoseconds. + Weight::from_parts(54_489_000, 219984) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -619,8 +619,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `539` // Estimated: `219984` - // Minimum execution time: 58_243_000 picoseconds. - Weight::from_parts(59_671_000, 219984) + // Minimum execution time: 51_021_000 picoseconds. + Weight::from_parts(53_006_000, 219984) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -630,8 +630,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `417` // Estimated: `3831` - // Minimum execution time: 31_621_000 picoseconds. - Weight::from_parts(32_628_000, 3831) + // Minimum execution time: 26_572_000 picoseconds. + Weight::from_parts(27_534_000, 3831) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -641,8 +641,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `407` // Estimated: `3831` - // Minimum execution time: 32_483_000 picoseconds. - Weight::from_parts(33_427_000, 3831) + // Minimum execution time: 26_897_000 picoseconds. + Weight::from_parts(27_883_000, 3831) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -656,8 +656,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `447` // Estimated: `219984` - // Minimum execution time: 36_283_000 picoseconds. - Weight::from_parts(37_748_000, 219984) + // Minimum execution time: 31_767_000 picoseconds. + Weight::from_parts(33_045_000, 219984) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -673,8 +673,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `688` // Estimated: `219984` - // Minimum execution time: 75_460_000 picoseconds. - Weight::from_parts(77_956_000, 219984) + // Minimum execution time: 67_798_000 picoseconds. + Weight::from_parts(70_044_000, 219984) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -686,8 +686,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `240` // Estimated: `5477` - // Minimum execution time: 15_139_000 picoseconds. - Weight::from_parts(15_651_000, 5477) + // Minimum execution time: 10_056_000 picoseconds. + Weight::from_parts(10_460_000, 5477) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -701,8 +701,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `3216` // Estimated: `110487` - // Minimum execution time: 48_590_000 picoseconds. - Weight::from_parts(50_207_000, 110487) + // Minimum execution time: 44_293_000 picoseconds. + Weight::from_parts(45_784_000, 110487) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -716,8 +716,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `3216` // Estimated: `110487` - // Minimum execution time: 48_555_000 picoseconds. - Weight::from_parts(49_956_000, 110487) + // Minimum execution time: 45_642_000 picoseconds. + Weight::from_parts(47_252_000, 110487) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -729,8 +729,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `3077` // Estimated: `5477` - // Minimum execution time: 28_326_000 picoseconds. - Weight::from_parts(29_735_000, 5477) + // Minimum execution time: 22_096_000 picoseconds. + Weight::from_parts(22_496_000, 5477) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -742,8 +742,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `3077` // Estimated: `5477` - // Minimum execution time: 28_209_000 picoseconds. - Weight::from_parts(29_375_000, 5477) + // Minimum execution time: 21_931_000 picoseconds. + Weight::from_parts(22_312_000, 5477) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -757,8 +757,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `3081` // Estimated: `5477` - // Minimum execution time: 33_973_000 picoseconds. - Weight::from_parts(35_732_000, 5477) + // Minimum execution time: 28_890_000 picoseconds. + Weight::from_parts(29_679_000, 5477) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -772,8 +772,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `3101` // Estimated: `5477` - // Minimum execution time: 34_112_000 picoseconds. - Weight::from_parts(35_748_000, 5477) + // Minimum execution time: 28_875_000 picoseconds. + Weight::from_parts(29_492_000, 5477) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -785,8 +785,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `399` // Estimated: `110487` - // Minimum execution time: 26_135_000 picoseconds. - Weight::from_parts(27_080_000, 110487) + // Minimum execution time: 19_787_000 picoseconds. + Weight::from_parts(20_493_000, 110487) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -798,8 +798,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `447` // Estimated: `110487` - // Minimum execution time: 26_494_000 picoseconds. - Weight::from_parts(27_290_000, 110487) + // Minimum execution time: 19_987_000 picoseconds. + Weight::from_parts(20_860_000, 110487) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -809,8 +809,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `344` // Estimated: `3831` - // Minimum execution time: 15_294_000 picoseconds. - Weight::from_parts(15_761_000, 3831) + // Minimum execution time: 13_416_000 picoseconds. + Weight::from_parts(13_857_000, 3831) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -824,8 +824,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `447` // Estimated: `110487` - // Minimum execution time: 32_360_000 picoseconds. - Weight::from_parts(33_747_000, 110487) + // Minimum execution time: 27_199_000 picoseconds. + Weight::from_parts(28_562_000, 110487) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -839,8 +839,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `447` // Estimated: `110487` - // Minimum execution time: 34_133_000 picoseconds. - Weight::from_parts(35_784_000, 110487) + // Minimum execution time: 29_205_000 picoseconds. + Weight::from_parts(30_407_000, 110487) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -852,8 +852,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `500` // Estimated: `110487` - // Minimum execution time: 30_009_000 picoseconds. - Weight::from_parts(30_985_000, 110487) + // Minimum execution time: 24_136_000 picoseconds. + Weight::from_parts(24_868_000, 110487) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -865,8 +865,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `483` // Estimated: `110487` - // Minimum execution time: 29_439_000 picoseconds. - Weight::from_parts(30_386_000, 110487) + // Minimum execution time: 23_860_000 picoseconds. + Weight::from_parts(24_556_000, 110487) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -878,8 +878,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `500` // Estimated: `110487` - // Minimum execution time: 29_293_000 picoseconds. - Weight::from_parts(30_577_000, 110487) + // Minimum execution time: 23_409_000 picoseconds. + Weight::from_parts(24_354_000, 110487) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -891,8 +891,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `504` // Estimated: `110487` - // Minimum execution time: 27_418_000 picoseconds. - Weight::from_parts(28_718_000, 110487) + // Minimum execution time: 21_947_000 picoseconds. + Weight::from_parts(22_485_000, 110487) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -906,8 +906,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `504` // Estimated: `219984` - // Minimum execution time: 40_020_000 picoseconds. - Weight::from_parts(40_861_000, 219984) + // Minimum execution time: 34_643_000 picoseconds. + Weight::from_parts(36_193_000, 219984) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -919,8 +919,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `500` // Estimated: `110487` - // Minimum execution time: 29_843_000 picoseconds. - Weight::from_parts(30_764_000, 110487) + // Minimum execution time: 24_097_000 picoseconds. + Weight::from_parts(24_881_000, 110487) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -934,10 +934,10 @@ impl WeightInfo for () { /// Proof: `Referenda::MetadataOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) fn set_some_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `450` + // Measured: `555` // Estimated: `3831` - // Minimum execution time: 24_642_000 picoseconds. - Weight::from_parts(25_498_000, 3831) + // Minimum execution time: 19_947_000 picoseconds. + Weight::from_parts(20_396_000, 3831) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -949,8 +949,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `421` // Estimated: `3831` - // Minimum execution time: 20_867_000 picoseconds. - Weight::from_parts(21_803_000, 3831) + // Minimum execution time: 15_516_000 picoseconds. + Weight::from_parts(16_094_000, 3831) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/substrate/frame/remark/Cargo.toml b/substrate/frame/remark/Cargo.toml index a40b577b52ea..487bada593cd 100644 --- a/substrate/frame/remark/Cargo.toml +++ b/substrate/frame/remark/Cargo.toml @@ -17,11 +17,11 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { workspace = true } +scale-info = { features = ["derive"], workspace = true } +serde = { optional = true, workspace = true, default-features = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } -scale-info = { features = ["derive"], workspace = true } -serde = { optional = true, workspace = true, default-features = true } sp-core = { workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } diff --git a/substrate/frame/remark/src/weights.rs b/substrate/frame/remark/src/weights.rs index 26838f74a319..8a8bdef6dd0f 100644 --- a/substrate/frame/remark/src/weights.rs +++ b/substrate/frame/remark/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_remark` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-04-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -62,10 +62,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_242_000 picoseconds. - Weight::from_parts(15_241_545, 0) + // Minimum execution time: 6_652_000 picoseconds. + Weight::from_parts(6_793_000, 0) // Standard Error: 0 - .saturating_add(Weight::from_parts(1_643, 0).saturating_mul(l.into())) + .saturating_add(Weight::from_parts(1_364, 0).saturating_mul(l.into())) } } @@ -76,9 +76,9 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_242_000 picoseconds. - Weight::from_parts(15_241_545, 0) + // Minimum execution time: 6_652_000 picoseconds. + Weight::from_parts(6_793_000, 0) // Standard Error: 0 - .saturating_add(Weight::from_parts(1_643, 0).saturating_mul(l.into())) + .saturating_add(Weight::from_parts(1_364, 0).saturating_mul(l.into())) } } diff --git a/substrate/frame/revive/Cargo.toml b/substrate/frame/revive/Cargo.toml index fa008f8e836a..81fbbc8cf38e 100644 --- a/substrate/frame/revive/Cargo.toml +++ b/substrate/frame/revive/Cargo.toml @@ -17,29 +17,32 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { features = ["derive", "max-encoded-len"], workspace = true } -derive_more = { workspace = true } environmental = { workspace = true } -ethereum-types = { workspace = true, features = ["codec", "rlp", "serialize"] } -hex = { workspace = true } -impl-trait-for-tuples = { workspace = true } -log = { workspace = true } paste = { workspace = true } -polkavm = { version = "0.18.0", default-features = false } -rlp = { workspace = true } +polkavm = { version = "0.13.0", default-features = false } +bitflags = { workspace = true } +codec = { features = ["derive", "max-encoded-len"], workspace = true } scale-info = { features = ["derive"], workspace = true } +log = { workspace = true } serde = { features = [ "alloc", "derive", ], workspace = true, default-features = false } +impl-trait-for-tuples = { workspace = true } +rlp = { workspace = true } +derive_more = { workspace = true } +hex = { workspace = true } +jsonrpsee = { workspace = true, features = ["full"], optional = true } +ethereum-types = { workspace = true, features = ["codec", "rlp", "serialize"] } # Polkadot SDK Dependencies frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } -pallet-revive-fixtures = { workspace = true, optional = true } -pallet-revive-proc-macro = { workspace = true } -pallet-revive-uapi = { workspace = true, features = ["scale"] } +pallet-balances = { optional = true, workspace = true } +pallet-revive-fixtures = { workspace = true, default-features = false, optional = true } +pallet-revive-uapi = { workspace = true, default-features = true } +pallet-revive-proc-macro = { workspace = true, default-features = true } pallet-transaction-payment = { workspace = true } sp-api = { workspace = true } sp-arithmetic = { workspace = true } @@ -47,26 +50,29 @@ sp-core = { workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } sp-std = { workspace = true } +sp-weights = { workspace = true } +xcm = { workspace = true } +xcm-builder = { workspace = true } subxt-signer = { workspace = true, optional = true, features = [ "unstable-eth", ] } -xcm = { workspace = true } -xcm-builder = { workspace = true } [dev-dependencies] array-bytes = { workspace = true, default-features = true } assert_matches = { workspace = true } -hex-literal = { workspace = true } pretty_assertions = { workspace = true } +pallet-revive-fixtures = { workspace = true, default-features = true } secp256k1 = { workspace = true, features = ["recovery"] } serde_json = { workspace = true } +hex-literal = { workspace = true } # Polkadot SDK Dependencies pallet-balances = { workspace = true, default-features = true } -pallet-proxy = { workspace = true, default-features = true } -pallet-revive-fixtures = { workspace = true, default-features = true } pallet-timestamp = { workspace = true, default-features = true } +pallet-message-queue = { workspace = true, default-features = true } pallet-utility = { workspace = true, default-features = true } +pallet-assets = { workspace = true, default-features = true } +pallet-proxy = { workspace = true, default-features = true } sp-keystore = { workspace = true, default-features = true } sp-tracing = { workspace = true, default-features = true } xcm-builder = { workspace = true, default-features = true } @@ -81,7 +87,9 @@ std = [ "frame-support/std", "frame-system/std", "hex/std", + "jsonrpsee", "log/std", + "pallet-balances?/std", "pallet-proxy/std", "pallet-revive-fixtures?/std", "pallet-timestamp/std", @@ -100,6 +108,7 @@ std = [ "sp-keystore/std", "sp-runtime/std", "sp-std/std", + "sp-weights/std", "subxt-signer", "xcm-builder/std", "xcm/std", @@ -108,7 +117,9 @@ runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", + "pallet-assets/runtime-benchmarks", "pallet-balances/runtime-benchmarks", + "pallet-message-queue/runtime-benchmarks", "pallet-proxy/runtime-benchmarks", "pallet-revive-fixtures", "pallet-timestamp/runtime-benchmarks", @@ -116,12 +127,13 @@ runtime-benchmarks = [ "pallet-utility/runtime-benchmarks", "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", - "xcm/runtime-benchmarks", ] try-runtime = [ "frame-support/try-runtime", "frame-system/try-runtime", + "pallet-assets/try-runtime", "pallet-balances/try-runtime", + "pallet-message-queue/try-runtime", "pallet-proxy/try-runtime", "pallet-timestamp/try-runtime", "pallet-transaction-payment/try-runtime", diff --git a/substrate/frame/revive/README.md b/substrate/frame/revive/README.md index 575920dfaac7..5352e636c252 100644 --- a/substrate/frame/revive/README.md +++ b/substrate/frame/revive/README.md @@ -92,7 +92,7 @@ Driven by the desire to have an iterative approach in developing new contract in concept of an unstable interface. Akin to the rust nightly compiler it allows us to add new interfaces but mark them as unstable so that contract languages can experiment with them and give feedback before we stabilize those. -In order to access interfaces which don't have a stable `#[stable]` in [`runtime.rs`](src/wasm/runtime.rs) +In order to access interfaces which don't have a stable `#[api_version(x)]` in [`runtime.rs`](src/wasm/runtime.rs) one need to set `pallet_revive::Config::UnsafeUnstableInterface` to `ConstU32`. **It should be obvious that any production runtime should never be compiled with this feature: In addition to be subject to change or removal those interfaces might not have proper weights associated with them and are therefore diff --git a/substrate/frame/revive/fixtures/Cargo.toml b/substrate/frame/revive/fixtures/Cargo.toml index e17bc88a3847..7a5452853d65 100644 --- a/substrate/frame/revive/fixtures/Cargo.toml +++ b/substrate/frame/revive/fixtures/Cargo.toml @@ -5,26 +5,26 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "Fixtures for testing and benchmarking" -homepage.workspace = true -repository.workspace = true - -[package.metadata.polkadot-sdk] -exclude-from-umbrella = true [lints] workspace = true [dependencies] -anyhow = { workspace = true, default-features = true, optional = true } +frame-system = { workspace = true, default-features = true, optional = true } sp-core = { workspace = true, default-features = true, optional = true } sp-io = { workspace = true, default-features = true, optional = true } +sp-runtime = { workspace = true, default-features = true, optional = true } +anyhow = { workspace = true, default-features = true, optional = true } +log = { workspace = true } [build-dependencies] -anyhow = { workspace = true, default-features = true } -polkavm-linker = { version = "0.18.0" } +parity-wasm = { workspace = true } +tempfile = { workspace = true } toml = { workspace = true } +polkavm-linker = { version = "0.14.0" } +anyhow = { workspace = true, default-features = true } [features] default = ["std"] # only when std is enabled all fixtures are available -std = ["anyhow", "sp-core", "sp-io"] +std = ["anyhow", "frame-system", "log/std", "sp-core", "sp-io", "sp-runtime"] diff --git a/substrate/frame/revive/fixtures/build.rs b/substrate/frame/revive/fixtures/build.rs index eca547bc6ddd..a5b23e58c0d6 100644 --- a/substrate/frame/revive/fixtures/build.rs +++ b/substrate/frame/revive/fixtures/build.rs @@ -20,8 +20,7 @@ use anyhow::Result; use anyhow::{bail, Context}; use std::{ - env, fs, - io::Write, + cfg, env, fs, path::{Path, PathBuf}, process::Command, }; @@ -83,7 +82,7 @@ fn create_cargo_toml<'a>( entries: impl Iterator, output_dir: &Path, ) -> Result<()> { - let mut cargo_toml: toml::Value = toml::from_str(include_str!("./build/_Cargo.toml"))?; + let mut cargo_toml: toml::Value = toml::from_str(include_str!("./build/Cargo.toml"))?; let mut set_dep = |name, path| -> Result<()> { cargo_toml["dependencies"][name]["path"] = toml::Value::String( fixtures_dir.join(path).canonicalize()?.to_str().unwrap().to_string(), @@ -107,26 +106,21 @@ fn create_cargo_toml<'a>( ); let cargo_toml = toml::to_string_pretty(&cargo_toml)?; - fs::write(output_dir.join("Cargo.toml"), cargo_toml.clone()) - .with_context(|| format!("Failed to write {cargo_toml:?}"))?; - fs::copy( - fixtures_dir.join("build/_rust-toolchain.toml"), - output_dir.join("rust-toolchain.toml"), - ) - .context("Failed to write toolchain file")?; - Ok(()) + fs::write(output_dir.join("Cargo.toml"), cargo_toml).map_err(Into::into) } -fn invoke_build(current_dir: &Path) -> Result<()> { +fn invoke_build(target: &Path, current_dir: &Path) -> Result<()> { let encoded_rustflags = ["-Dwarnings"].join("\x1f"); - let mut build_command = Command::new("cargo"); + let mut build_command = Command::new(env::var("CARGO")?); build_command .current_dir(current_dir) .env_clear() .env("PATH", env::var("PATH").unwrap_or_default()) .env("CARGO_ENCODED_RUSTFLAGS", encoded_rustflags) + .env("RUSTC_BOOTSTRAP", "1") .env("RUSTUP_HOME", env::var("RUSTUP_HOME").unwrap_or_default()) + .env("RUSTUP_TOOLCHAIN", env::var("RUSTUP_TOOLCHAIN").unwrap_or_default()) .args([ "build", "--release", @@ -134,7 +128,7 @@ fn invoke_build(current_dir: &Path) -> Result<()> { "-Zbuild-std-features=panic_immediate_abort", ]) .arg("--target") - .arg(polkavm_linker::target_json_64_path().unwrap()); + .arg(target); if let Ok(toolchain) = env::var(OVERRIDE_RUSTUP_TOOLCHAIN_ENV_VAR) { build_command.env("RUSTUP_TOOLCHAIN", &toolchain); @@ -160,11 +154,10 @@ fn post_process(input_path: &Path, output_path: &Path) -> Result<()> { let mut config = polkavm_linker::Config::default(); config.set_strip(strip); config.set_optimize(optimize); - let orig = fs::read(input_path).with_context(|| format!("Failed to read {input_path:?}"))?; + let orig = fs::read(input_path).with_context(|| format!("Failed to read {:?}", input_path))?; let linked = polkavm_linker::program_from_elf(config, orig.as_ref()) .map_err(|err| anyhow::format_err!("Failed to link polkavm program: {}", err))?; - fs::write(output_path, linked).with_context(|| format!("Failed to write {output_path:?}"))?; - Ok(()) + fs::write(output_path, linked).map_err(Into::into) } /// Write the compiled contracts to the given output directory. @@ -172,7 +165,7 @@ fn write_output(build_dir: &Path, out_dir: &Path, entries: Vec) -> Result for entry in entries { post_process( &build_dir - .join("target/riscv64emac-unknown-none-polkavm/release") + .join("target/riscv32emac-unknown-none-polkavm/release") .join(entry.name()), &out_dir.join(entry.out_filename()), )?; @@ -181,66 +174,11 @@ fn write_output(build_dir: &Path, out_dir: &Path, entries: Vec) -> Result Ok(()) } -/// Create a directory in the `target` as output directory -fn create_out_dir() -> Result { - let temp_dir: PathBuf = env::var("OUT_DIR")?.into(); - - // this is set in case the user has overriden the target directory - let out_dir = if let Ok(path) = env::var("CARGO_TARGET_DIR") { - path.into() - } else { - // otherwise just traverse up from the out dir - let mut out_dir: PathBuf = temp_dir.clone(); - loop { - if !out_dir.pop() { - bail!("Cannot find project root.") - } - if out_dir.join("Cargo.lock").exists() { - break; - } - } - out_dir.join("target") - } - .join("pallet-revive-fixtures"); - - // clean up some leftover symlink from previous versions of this script - let mut out_exists = out_dir.exists(); - if out_exists && !out_dir.is_dir() { - fs::remove_file(&out_dir)?; - out_exists = false; - } - - if !out_exists { - fs::create_dir(&out_dir).context("Failed to create output directory")?; - } - - // write the location of the out dir so it can be found later - let mut file = fs::File::create(temp_dir.join("fixture_location.rs")) - .context("Failed to create fixture_location.rs")?; - write!( - file, - r#" - #[allow(dead_code)] - const FIXTURE_DIR: &str = "{0}"; - macro_rules! fixture {{ - ($name: literal) => {{ - include_bytes!(concat!("{0}", "/", $name, ".polkavm")) - }}; - }} - "#, - out_dir.display() - ) - .context("Failed to write to fixture_location.rs")?; - - Ok(out_dir) -} - pub fn main() -> Result<()> { let fixtures_dir: PathBuf = env::var("CARGO_MANIFEST_DIR")?.into(); let contracts_dir = fixtures_dir.join("contracts"); - let out_dir = create_out_dir().context("Cannot determine output directory")?; - let build_dir = out_dir.join("build"); - fs::create_dir_all(&build_dir).context("Failed to create build directory")?; + let out_dir: PathBuf = env::var("OUT_DIR")?.into(); + let target = fixtures_dir.join("riscv32emac-unknown-none-polkavm.json"); println!("cargo::rerun-if-env-changed={OVERRIDE_RUSTUP_TOOLCHAIN_ENV_VAR}"); println!("cargo::rerun-if-env-changed={OVERRIDE_STRIP_ENV_VAR}"); @@ -258,9 +196,23 @@ pub fn main() -> Result<()> { return Ok(()) } - create_cargo_toml(&fixtures_dir, entries.iter(), &build_dir)?; - invoke_build(&build_dir)?; - write_output(&build_dir, &out_dir, entries)?; + let tmp_dir = tempfile::tempdir()?; + let tmp_dir_path = tmp_dir.path(); + + create_cargo_toml(&fixtures_dir, entries.iter(), tmp_dir.path())?; + invoke_build(&target, tmp_dir_path)?; + + write_output(tmp_dir_path, &out_dir, entries)?; + + #[cfg(unix)] + if let Ok(symlink_dir) = env::var("CARGO_WORKSPACE_ROOT_DIR") { + let symlink_dir: PathBuf = symlink_dir.into(); + let symlink_dir: PathBuf = symlink_dir.join("target").join("pallet-revive-fixtures"); + if symlink_dir.is_symlink() { + fs::remove_file(&symlink_dir)? + } + std::os::unix::fs::symlink(&out_dir, &symlink_dir)?; + } Ok(()) } diff --git a/substrate/frame/revive/fixtures/build/_Cargo.toml b/substrate/frame/revive/fixtures/build/Cargo.toml similarity index 62% rename from substrate/frame/revive/fixtures/build/_Cargo.toml rename to substrate/frame/revive/fixtures/build/Cargo.toml index bfb9aaedd6f5..5d0e256e2e73 100644 --- a/substrate/frame/revive/fixtures/build/_Cargo.toml +++ b/substrate/frame/revive/fixtures/build/Cargo.toml @@ -4,17 +4,14 @@ publish = false version = "1.0.0" edition = "2021" -# Make sure this is not included into the workspace -[workspace] - # Binary targets are injected dynamically by the build script. [[bin]] # All paths are injected dynamically by the build script. [dependencies] -uapi = { package = 'pallet-revive-uapi', path = "", features = ["unstable-hostfn"], default-features = false } +uapi = { package = 'pallet-revive-uapi', path = "", default-features = false } common = { package = 'pallet-revive-fixtures-common', path = "" } -polkavm-derive = { version = "0.18.0" } +polkavm-derive = { version = "0.14.0" } [profile.release] opt-level = 3 diff --git a/substrate/frame/revive/fixtures/build/_rust-toolchain.toml b/substrate/frame/revive/fixtures/build/_rust-toolchain.toml deleted file mode 100644 index 4c757c708d58..000000000000 --- a/substrate/frame/revive/fixtures/build/_rust-toolchain.toml +++ /dev/null @@ -1,4 +0,0 @@ -[toolchain] -channel = "nightly-2024-11-19" -components = ["rust-src"] -profile = "minimal" diff --git a/substrate/frame/revive/fixtures/contracts/base_fee.rs b/substrate/frame/revive/fixtures/contracts/base_fee.rs deleted file mode 100644 index 157909463ee4..000000000000 --- a/substrate/frame/revive/fixtures/contracts/base_fee.rs +++ /dev/null @@ -1,36 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Returns the base fee back to the caller. - -#![no_std] -#![no_main] - -extern crate common; -use uapi::{HostFn, HostFnImpl as api, ReturnFlags}; - -#[no_mangle] -#[polkavm_derive::polkavm_export] -pub extern "C" fn deploy() {} - -#[no_mangle] -#[polkavm_derive::polkavm_export] -pub extern "C" fn call() { - let mut buf = [0; 32]; - api::base_fee(&mut buf); - api::return_value(ReturnFlags::empty(), &buf); -} diff --git a/substrate/frame/revive/fixtures/contracts/call_data_copy.rs b/substrate/frame/revive/fixtures/contracts/call_data_copy.rs deleted file mode 100644 index ccf1664058e8..000000000000 --- a/substrate/frame/revive/fixtures/contracts/call_data_copy.rs +++ /dev/null @@ -1,53 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Expects a call data of [0xFF; 32] and executes the test vectors from -//! [https://www.evm.codes/?fork=cancun#37] and some additional tests. - -#![no_std] -#![no_main] - -extern crate common; -use uapi::{HostFn, HostFnImpl as api}; - -const TEST_DATA: [u8; 32] = [ - 255, 0, 0, 0, 0, 0, 0, 0, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, -]; - -#[no_mangle] -#[polkavm_derive::polkavm_export] -pub extern "C" fn deploy() {} - -#[no_mangle] -#[polkavm_derive::polkavm_export] -pub extern "C" fn call() { - let mut buf = [0; 32]; - - api::call_data_copy(&mut &mut buf[..], 0); - assert_eq!(buf, [255; 32]); - - api::call_data_copy(&mut &mut buf[..8], 31); - assert_eq!(buf, TEST_DATA); - - api::call_data_copy(&mut &mut buf[..], 32); - assert_eq!(buf, [0; 32]); - - let mut buf = [255; 32]; - api::call_data_copy(&mut &mut buf[..], u32::MAX); - assert_eq!(buf, [0; 32]); -} diff --git a/substrate/frame/revive/fixtures/contracts/call_data_load.rs b/substrate/frame/revive/fixtures/contracts/call_data_load.rs deleted file mode 100644 index d3df9433f5d1..000000000000 --- a/substrate/frame/revive/fixtures/contracts/call_data_load.rs +++ /dev/null @@ -1,44 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! This uses the call data load API to first the first input byte. -//! This single input byte is used as the offset for a second call -//! to the call data load API. -//! The output of the second API call is returned. - -#![no_std] -#![no_main] - -extern crate common; -use uapi::{HostFn, HostFnImpl as api, ReturnFlags}; - -#[no_mangle] -#[polkavm_derive::polkavm_export] -pub extern "C" fn deploy() {} - -#[no_mangle] -#[polkavm_derive::polkavm_export] -pub extern "C" fn call() { - let mut buf = [0; 32]; - api::call_data_load(&mut buf, 0); - - let offset = buf[31] as u32; - let mut buf = [0; 32]; - api::call_data_load(&mut buf, offset); - - api::return_value(ReturnFlags::empty(), &buf); -} diff --git a/substrate/frame/revive/fixtures/contracts/call_data_size.rs b/substrate/frame/revive/fixtures/contracts/call_data_size.rs deleted file mode 100644 index 7caf18d440b8..000000000000 --- a/substrate/frame/revive/fixtures/contracts/call_data_size.rs +++ /dev/null @@ -1,34 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Returns the call data size back to the caller. - -#![no_std] -#![no_main] - -extern crate common; -use uapi::{HostFn, HostFnImpl as api, ReturnFlags}; - -#[no_mangle] -#[polkavm_derive::polkavm_export] -pub extern "C" fn deploy() {} - -#[no_mangle] -#[polkavm_derive::polkavm_export] -pub extern "C" fn call() { - api::return_value(ReturnFlags::empty(), &api::call_data_size().to_le_bytes()); -} diff --git a/substrate/frame/revive/fixtures/contracts/caller_contract.rs b/substrate/frame/revive/fixtures/contracts/caller_contract.rs index edad43fae251..f9a30b87df47 100644 --- a/substrate/frame/revive/fixtures/contracts/caller_contract.rs +++ b/substrate/frame/revive/fixtures/contracts/caller_contract.rs @@ -65,7 +65,7 @@ pub extern "C" fn call() { None, Some(&salt), ); - assert!(matches!(res, Err(ReturnErrorCode::OutOfResources))); + assert!(matches!(res, Err(ReturnErrorCode::CalleeTrapped))); // Fail to deploy the contract due to insufficient proof_size weight. let res = api::instantiate( @@ -79,7 +79,7 @@ pub extern "C" fn call() { None, Some(&salt), ); - assert!(matches!(res, Err(ReturnErrorCode::OutOfResources))); + assert!(matches!(res, Err(ReturnErrorCode::CalleeTrapped))); // Deploy the contract successfully. let mut callee = [0u8; 20]; @@ -121,7 +121,7 @@ pub extern "C" fn call() { &input, None, ); - assert!(matches!(res, Err(ReturnErrorCode::OutOfResources))); + assert!(matches!(res, Err(ReturnErrorCode::CalleeTrapped))); // Fail to call the contract due to insufficient proof_size weight. let res = api::call( @@ -134,7 +134,7 @@ pub extern "C" fn call() { &input, None, ); - assert!(matches!(res, Err(ReturnErrorCode::OutOfResources))); + assert!(matches!(res, Err(ReturnErrorCode::CalleeTrapped))); // Call the contract successfully. let mut output = [0u8; 4]; diff --git a/substrate/frame/revive/fixtures/contracts/common/src/lib.rs b/substrate/frame/revive/fixtures/contracts/common/src/lib.rs index 302608ccf87c..abfba282bec1 100644 --- a/substrate/frame/revive/fixtures/contracts/common/src/lib.rs +++ b/substrate/frame/revive/fixtures/contracts/common/src/lib.rs @@ -121,9 +121,8 @@ macro_rules! input { // e.g input!(buffer, 512, var1: u32, var2: [u8], ); ($buffer:ident, $size:expr, $($rest:tt)*) => { let mut $buffer = [0u8; $size]; - let input_size = $crate::api::call_data_size(); - let $buffer = &mut &mut $buffer[..$size.min(input_size as usize)]; - $crate::api::call_data_copy($buffer, 0); + let $buffer = &mut &mut $buffer[..]; + $crate::api::input($buffer); input!(@inner $buffer, 0, $($rest)*); }; diff --git a/substrate/frame/revive/fixtures/contracts/create_storage_and_call.rs b/substrate/frame/revive/fixtures/contracts/create_storage_and_call.rs index a12c36af856a..4fa2db0c8c1c 100644 --- a/substrate/frame/revive/fixtures/contracts/create_storage_and_call.rs +++ b/substrate/frame/revive/fixtures/contracts/create_storage_and_call.rs @@ -40,7 +40,7 @@ pub extern "C" fn call() { api::set_storage(StorageFlags::empty(), buffer, &[1u8; 4]); // Call the callee - let ret = api::call( + api::call( uapi::CallFlags::empty(), callee, 0u64, // How much ref_time weight to devote for the execution. 0 = all. @@ -49,10 +49,8 @@ pub extern "C" fn call() { &[0u8; 32], // Value transferred to the contract. input, None, - ); - if let Err(code) = ret { - api::return_value(uapi::ReturnFlags::REVERT, &(code as u32).to_le_bytes()); - }; + ) + .unwrap(); // create 8 byte of storage after calling // item of 12 bytes because we override 4 bytes diff --git a/substrate/frame/revive/fixtures/contracts/create_storage_and_instantiate.rs b/substrate/frame/revive/fixtures/contracts/create_storage_and_instantiate.rs index ecc0fc79e6fd..463706457a15 100644 --- a/substrate/frame/revive/fixtures/contracts/create_storage_and_instantiate.rs +++ b/substrate/frame/revive/fixtures/contracts/create_storage_and_instantiate.rs @@ -39,7 +39,7 @@ pub extern "C" fn call() { let salt = [0u8; 32]; let mut address = [0u8; 20]; - let ret = api::instantiate( + api::instantiate( code_hash, 0u64, // How much ref_time weight to devote for the execution. 0 = all. 0u64, // How much proof_size weight to devote for the execution. 0 = all. @@ -49,10 +49,8 @@ pub extern "C" fn call() { Some(&mut address), None, Some(&salt), - ); - if let Err(code) = ret { - api::return_value(uapi::ReturnFlags::REVERT, &(code as u32).to_le_bytes()); - }; + ) + .unwrap(); // Return the deployed contract address. api::return_value(uapi::ReturnFlags::empty(), &address); diff --git a/substrate/frame/revive/fixtures/contracts/create_transient_storage_and_call.rs b/substrate/frame/revive/fixtures/contracts/create_transient_storage_and_call.rs index cf12fed27563..d2efb26e5ceb 100644 --- a/substrate/frame/revive/fixtures/contracts/create_transient_storage_and_call.rs +++ b/substrate/frame/revive/fixtures/contracts/create_transient_storage_and_call.rs @@ -22,7 +22,7 @@ use common::input; use uapi::{HostFn, HostFnImpl as api, StorageFlags}; -static BUFFER: [u8; 448] = [0u8; 448]; +static BUFFER: [u8; 512] = [0u8; 512]; #[no_mangle] #[polkavm_derive::polkavm_export] diff --git a/substrate/frame/revive/fixtures/contracts/delegate_call.rs b/substrate/frame/revive/fixtures/contracts/delegate_call.rs index 3cf74acf1321..9fd155408af3 100644 --- a/substrate/frame/revive/fixtures/contracts/delegate_call.rs +++ b/substrate/frame/revive/fixtures/contracts/delegate_call.rs @@ -28,11 +28,7 @@ pub extern "C" fn deploy() {} #[no_mangle] #[polkavm_derive::polkavm_export] pub extern "C" fn call() { - input!( - address: &[u8; 20], - ref_time: u64, - proof_size: u64, - ); + input!(code_hash: &[u8; 32],); let mut key = [0u8; 32]; key[0] = 1u8; @@ -46,7 +42,7 @@ pub extern "C" fn call() { assert!(value[0] == 2u8); let input = [0u8; 0]; - api::delegate_call(uapi::CallFlags::empty(), address, ref_time, proof_size, None, &input, None).unwrap(); + api::delegate_call(uapi::CallFlags::empty(), code_hash, &input, None).unwrap(); api::get_storage(StorageFlags::empty(), &key, value).unwrap(); assert!(value[0] == 1u8); diff --git a/substrate/frame/revive/fixtures/contracts/delegate_call_deposit_limit.rs b/substrate/frame/revive/fixtures/contracts/delegate_call_deposit_limit.rs deleted file mode 100644 index 0f157f5a18ac..000000000000 --- a/substrate/frame/revive/fixtures/contracts/delegate_call_deposit_limit.rs +++ /dev/null @@ -1,50 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#![no_std] -#![no_main] - -use common::{input, u256_bytes}; -use uapi::{HostFn, HostFnImpl as api, StorageFlags}; - -#[no_mangle] -#[polkavm_derive::polkavm_export] -pub extern "C" fn deploy() {} - -#[no_mangle] -#[polkavm_derive::polkavm_export] -pub extern "C" fn call() { - input!( - address: &[u8; 20], - deposit_limit: u64, - ); - - let input = [0u8; 0]; - let ret = api::delegate_call(uapi::CallFlags::empty(), address, 0, 0, Some(&u256_bytes(deposit_limit)), &input, None); - - if let Err(code) = ret { - api::return_value(uapi::ReturnFlags::REVERT, &(code as u32).to_le_bytes()); - }; - - let mut key = [0u8; 32]; - key[0] = 1u8; - - let mut value = [0u8; 32]; - - api::get_storage(StorageFlags::empty(), &key, &mut &mut value[..]).unwrap(); - assert!(value[0] == 1u8); -} diff --git a/substrate/frame/revive/fixtures/contracts/delegate_call_simple.rs b/substrate/frame/revive/fixtures/contracts/delegate_call_simple.rs index a8501dad4692..20f8ec3364ee 100644 --- a/substrate/frame/revive/fixtures/contracts/delegate_call_simple.rs +++ b/substrate/frame/revive/fixtures/contracts/delegate_call_simple.rs @@ -28,9 +28,9 @@ pub extern "C" fn deploy() {} #[no_mangle] #[polkavm_derive::polkavm_export] pub extern "C" fn call() { - input!(address: &[u8; 20],); + input!(code_hash: &[u8; 32],); - // Delegate call into passed address. + // Delegate call into passed code hash. let input = [0u8; 0]; - api::delegate_call(uapi::CallFlags::empty(), address, 0, 0, None, &input, None).unwrap(); + api::delegate_call(uapi::CallFlags::empty(), code_hash, &input, None).unwrap(); } diff --git a/substrate/frame/revive/fixtures/contracts/extcodesize.rs b/substrate/frame/revive/fixtures/contracts/extcodesize.rs index 3f51b69b46db..0a1171be30e9 100644 --- a/substrate/frame/revive/fixtures/contracts/extcodesize.rs +++ b/substrate/frame/revive/fixtures/contracts/extcodesize.rs @@ -18,7 +18,7 @@ #![no_std] #![no_main] -use common::input; +use common::{input, u64_output}; use uapi::{HostFn, HostFnImpl as api}; #[no_mangle] @@ -30,7 +30,7 @@ pub extern "C" fn deploy() {} pub extern "C" fn call() { input!(address: &[u8; 20], expected: u64,); - let received = api::code_size(address); + let received = u64_output!(api::code_size, address); assert_eq!(expected, received); } diff --git a/substrate/frame/revive/fixtures/contracts/gas_limit.rs b/substrate/frame/revive/fixtures/contracts/gas_limit.rs deleted file mode 100644 index 9ce82227b64d..000000000000 --- a/substrate/frame/revive/fixtures/contracts/gas_limit.rs +++ /dev/null @@ -1,34 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Returns the block ref_time limit back to the caller. - -#![no_std] -#![no_main] - -extern crate common; -use uapi::{HostFn, HostFnImpl as api, ReturnFlags}; - -#[no_mangle] -#[polkavm_derive::polkavm_export] -pub extern "C" fn deploy() {} - -#[no_mangle] -#[polkavm_derive::polkavm_export] -pub extern "C" fn call() { - api::return_value(ReturnFlags::empty(), &api::gas_limit().to_le_bytes()); -} diff --git a/substrate/frame/revive/fixtures/contracts/gas_price.rs b/substrate/frame/revive/fixtures/contracts/gas_price.rs deleted file mode 100644 index c1c8109fafbe..000000000000 --- a/substrate/frame/revive/fixtures/contracts/gas_price.rs +++ /dev/null @@ -1,34 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Returns the gas price back to the caller. - -#![no_std] -#![no_main] - -extern crate common; -use uapi::{HostFn, HostFnImpl as api, ReturnFlags}; - -#[no_mangle] -#[polkavm_derive::polkavm_export] -pub extern "C" fn deploy() {} - -#[no_mangle] -#[polkavm_derive::polkavm_export] -pub extern "C" fn call() { - api::return_value(ReturnFlags::empty(), &api::gas_price().to_le_bytes()); -} diff --git a/substrate/frame/revive/fixtures/contracts/locking_delegate_dependency.rs b/substrate/frame/revive/fixtures/contracts/locking_delegate_dependency.rs index 3d7702c6537a..54c7c7f3d5e2 100644 --- a/substrate/frame/revive/fixtures/contracts/locking_delegate_dependency.rs +++ b/substrate/frame/revive/fixtures/contracts/locking_delegate_dependency.rs @@ -30,7 +30,6 @@ const ALICE_FALLBACK: [u8; 20] = [1u8; 20]; fn load_input(delegate_call: bool) { input!( action: u32, - address: &[u8; 20], code_hash: &[u8; 32], ); @@ -52,7 +51,7 @@ fn load_input(delegate_call: bool) { } if delegate_call { - api::delegate_call(uapi::CallFlags::empty(), address, 0, 0, None, &[], None).unwrap(); + api::delegate_call(uapi::CallFlags::empty(), code_hash, &[], None).unwrap(); } } diff --git a/substrate/frame/revive/fixtures/contracts/ref_time_left.rs b/substrate/frame/revive/fixtures/contracts/ref_time_left.rs deleted file mode 100644 index aa892a8ba440..000000000000 --- a/substrate/frame/revive/fixtures/contracts/ref_time_left.rs +++ /dev/null @@ -1,34 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#![no_std] -#![no_main] - -extern crate common; -use uapi::{HostFn, HostFnImpl as api, ReturnFlags}; - -#[no_mangle] -#[polkavm_derive::polkavm_export] -pub extern "C" fn deploy() { - assert!(api::ref_time_left() > api::ref_time_left()); -} - -#[no_mangle] -#[polkavm_derive::polkavm_export] -pub extern "C" fn call() { - api::return_value(ReturnFlags::empty(), &api::ref_time_left().to_le_bytes()); -} diff --git a/substrate/frame/revive/fixtures/contracts/return_data_api.rs b/substrate/frame/revive/fixtures/contracts/return_data_api.rs index 1d483373cffd..2a390296a419 100644 --- a/substrate/frame/revive/fixtures/contracts/return_data_api.rs +++ b/substrate/frame/revive/fixtures/contracts/return_data_api.rs @@ -75,7 +75,9 @@ fn recursion_guard() -> [u8; 20] { /// Assert [api::return_data_size] to match the `expected` value. fn assert_return_data_size_of(expected: u64) { - assert_eq!(api::return_data_size(), expected); + let mut return_data_size = [0xff; 32]; + api::return_data_size(&mut return_data_size); + assert_eq!(return_data_size, u256_bytes(expected)); } /// Assert the return data to be reset after a balance transfer. diff --git a/substrate/frame/revive/fixtures/contracts/rpc_demo.rs b/substrate/frame/revive/fixtures/contracts/rpc_demo.rs index 4c61f2ea82ec..0d75c6eb8df6 100644 --- a/substrate/frame/revive/fixtures/contracts/rpc_demo.rs +++ b/substrate/frame/revive/fixtures/contracts/rpc_demo.rs @@ -18,7 +18,7 @@ #![no_std] #![no_main] -use common::{input, u64_output}; +use common::input; use uapi::{HostFn, HostFnImpl as api}; #[no_mangle] @@ -31,12 +31,6 @@ pub extern "C" fn deploy() { #[no_mangle] #[polkavm_derive::polkavm_export] pub extern "C" fn call() { - // Not payable - let value = u64_output!(api::value_transferred,); - if value > 0 { - panic!(); - } - input!(128, data: [u8],); api::deposit_event(&[], data); } diff --git a/substrate/frame/revive/fixtures/contracts/set_code_hash.rs b/substrate/frame/revive/fixtures/contracts/set_code_hash.rs index 7292c6fd10ae..75995d7bb8a2 100644 --- a/substrate/frame/revive/fixtures/contracts/set_code_hash.rs +++ b/substrate/frame/revive/fixtures/contracts/set_code_hash.rs @@ -29,7 +29,7 @@ pub extern "C" fn deploy() {} #[polkavm_derive::polkavm_export] pub extern "C" fn call() { input!(addr: &[u8; 32],); - api::set_code_hash(addr); + api::set_code_hash(addr).unwrap(); // we return 1 after setting new code_hash // next `call` will NOT return this value, because contract code has been changed diff --git a/substrate/frame/revive/fixtures/contracts/unknown_syscall.rs b/substrate/frame/revive/fixtures/contracts/unknown_syscall.rs deleted file mode 100644 index 93ea86754f55..000000000000 --- a/substrate/frame/revive/fixtures/contracts/unknown_syscall.rs +++ /dev/null @@ -1,44 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -#![no_std] -#![no_main] - -extern crate common; - -#[polkavm_derive::polkavm_import] -extern "C" { - pub fn __this_syscall_does_not_exist__(); -} - -// Export that is never called. We can put code here that should be in the binary -// but is never supposed to be run. -#[no_mangle] -#[polkavm_derive::polkavm_export] -pub extern "C" fn call_never() { - // make sure it is not optimized away - unsafe { - __this_syscall_does_not_exist__(); - } -} - -#[no_mangle] -#[polkavm_derive::polkavm_export] -pub extern "C" fn deploy() {} - -#[no_mangle] -#[polkavm_derive::polkavm_export] -pub extern "C" fn call() {} diff --git a/substrate/frame/revive/fixtures/contracts/unstable_interface.rs b/substrate/frame/revive/fixtures/contracts/unstable_interface.rs deleted file mode 100644 index d73ae041dc06..000000000000 --- a/substrate/frame/revive/fixtures/contracts/unstable_interface.rs +++ /dev/null @@ -1,44 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -#![no_std] -#![no_main] - -extern crate common; - -#[polkavm_derive::polkavm_import] -extern "C" { - pub fn set_code_hash(); -} - -// Export that is never called. We can put code here that should be in the binary -// but is never supposed to be run. -#[no_mangle] -#[polkavm_derive::polkavm_export] -pub extern "C" fn call_never() { - // make sure it is not optimized away - unsafe { - set_code_hash(); - } -} - -#[no_mangle] -#[polkavm_derive::polkavm_export] -pub extern "C" fn deploy() {} - -#[no_mangle] -#[polkavm_derive::polkavm_export] -pub extern "C" fn call() {} diff --git a/substrate/frame/revive/fixtures/riscv32emac-unknown-none-polkavm.json b/substrate/frame/revive/fixtures/riscv32emac-unknown-none-polkavm.json new file mode 100644 index 000000000000..bbd54cdefbac --- /dev/null +++ b/substrate/frame/revive/fixtures/riscv32emac-unknown-none-polkavm.json @@ -0,0 +1,26 @@ +{ + "arch": "riscv32", + "cpu": "generic-rv32", + "crt-objects-fallback": "false", + "data-layout": "e-m:e-p:32:32-i64:64-n32-S32", + "eh-frame-header": false, + "emit-debug-gdb-scripts": false, + "features": "+e,+m,+a,+c,+lui-addi-fusion,+fast-unaligned-access,+xtheadcondmov", + "linker": "rust-lld", + "linker-flavor": "ld.lld", + "llvm-abiname": "ilp32e", + "llvm-target": "riscv32", + "max-atomic-width": 32, + "panic-strategy": "abort", + "relocation-model": "pie", + "target-pointer-width": "32", + "singlethread": true, + "pre-link-args": { + "ld": [ + "--emit-relocs", + "--unique", + "--relocatable" + ] + }, + "env": "polkavm" +} diff --git a/substrate/frame/revive/fixtures/src/lib.rs b/substrate/frame/revive/fixtures/src/lib.rs index 38171edf1152..cc84daec9b59 100644 --- a/substrate/frame/revive/fixtures/src/lib.rs +++ b/substrate/frame/revive/fixtures/src/lib.rs @@ -19,14 +19,12 @@ extern crate alloc; -// generated file that tells us where to find the fixtures -include!(concat!(env!("OUT_DIR"), "/fixture_location.rs")); - /// Load a given wasm module and returns a wasm binary contents along with it's hash. #[cfg(feature = "std")] pub fn compile_module(fixture_name: &str) -> anyhow::Result<(Vec, sp_core::H256)> { - let out_dir: std::path::PathBuf = FIXTURE_DIR.into(); + let out_dir: std::path::PathBuf = env!("OUT_DIR").into(); let fixture_path = out_dir.join(format!("{fixture_name}.polkavm")); + log::debug!("Loading fixture from {fixture_path:?}"); let binary = std::fs::read(fixture_path)?; let code_hash = sp_io::hashing::keccak_256(&binary); Ok((binary, sp_core::H256(code_hash))) @@ -38,6 +36,12 @@ pub fn compile_module(fixture_name: &str) -> anyhow::Result<(Vec, sp_core::H /// available in no-std environments (runtime benchmarks). pub mod bench { use alloc::vec::Vec; + + macro_rules! fixture { + ($name: literal) => { + include_bytes!(concat!(env!("OUT_DIR"), "/", $name, ".polkavm")) + }; + } pub const DUMMY: &[u8] = fixture!("dummy"); pub const NOOP: &[u8] = fixture!("noop"); pub const INSTR: &[u8] = fixture!("instr_benchmark"); @@ -57,7 +61,7 @@ pub mod bench { mod test { #[test] fn out_dir_should_have_compiled_mocks() { - let out_dir: std::path::PathBuf = crate::FIXTURE_DIR.into(); + let out_dir: std::path::PathBuf = env!("OUT_DIR").into(); assert!(out_dir.join("dummy.polkavm").exists()); } } diff --git a/substrate/frame/revive/mock-network/Cargo.toml b/substrate/frame/revive/mock-network/Cargo.toml index 1ebeb2c95db7..c5b18b3fa290 100644 --- a/substrate/frame/revive/mock-network/Cargo.toml +++ b/substrate/frame/revive/mock-network/Cargo.toml @@ -18,17 +18,22 @@ frame-support = { workspace = true } frame-system = { workspace = true } pallet-assets = { workspace = true, default-features = true } pallet-balances = { workspace = true, default-features = true } -pallet-message-queue = { workspace = true, default-features = true } pallet-revive = { workspace = true, default-features = true } pallet-revive-uapi = { workspace = true } +pallet-revive-proc-macro = { workspace = true, default-features = true } +pallet-message-queue = { workspace = true, default-features = true } +pallet-proxy = { workspace = true, default-features = true } pallet-timestamp = { workspace = true, default-features = true } +pallet-utility = { workspace = true, default-features = true } pallet-xcm = { workspace = true } polkadot-parachain-primitives = { workspace = true, default-features = true } polkadot-primitives = { workspace = true, default-features = true } polkadot-runtime-parachains = { workspace = true, default-features = true } scale-info = { features = ["derive"], workspace = true } +sp-api = { workspace = true } sp-core = { workspace = true } sp-io = { workspace = true } +sp-keystore = { workspace = true, default-features = true } sp-runtime = { workspace = true } sp-tracing = { workspace = true, default-features = true } xcm = { workspace = true } @@ -38,8 +43,8 @@ xcm-simulator = { workspace = true, default-features = true } [dev-dependencies] assert_matches = { workspace = true } -pallet-revive-fixtures = { workspace = true } pretty_assertions = { workspace = true } +pallet-revive-fixtures = { workspace = true } [features] default = ["std"] @@ -48,13 +53,17 @@ std = [ "frame-support/std", "frame-system/std", "pallet-balances/std", + "pallet-proxy/std", "pallet-revive-fixtures/std", "pallet-revive/std", "pallet-timestamp/std", + "pallet-utility/std", "pallet-xcm/std", "scale-info/std", + "sp-api/std", "sp-core/std", "sp-io/std", + "sp-keystore/std", "sp-runtime/std", "xcm-executor/std", "xcm/std", @@ -65,8 +74,10 @@ runtime-benchmarks = [ "pallet-assets/runtime-benchmarks", "pallet-balances/runtime-benchmarks", "pallet-message-queue/runtime-benchmarks", + "pallet-proxy/runtime-benchmarks", "pallet-revive/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", + "pallet-utility/runtime-benchmarks", "pallet-xcm/runtime-benchmarks", "polkadot-parachain-primitives/runtime-benchmarks", "polkadot-primitives/runtime-benchmarks", @@ -74,7 +85,6 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", - "xcm/runtime-benchmarks", ] try-runtime = [ "frame-support/try-runtime", @@ -82,8 +92,10 @@ try-runtime = [ "pallet-assets/try-runtime", "pallet-balances/try-runtime", "pallet-message-queue/try-runtime", + "pallet-proxy/try-runtime", "pallet-revive/try-runtime", "pallet-timestamp/try-runtime", + "pallet-utility/try-runtime", "pallet-xcm/try-runtime", "polkadot-runtime-parachains/try-runtime", "sp-runtime/try-runtime", diff --git a/substrate/frame/revive/mock-network/src/tests.rs b/substrate/frame/revive/mock-network/src/tests.rs index 34f797c2b530..bd05726a1a45 100644 --- a/substrate/frame/revive/mock-network/src/tests.rs +++ b/substrate/frame/revive/mock-network/src/tests.rs @@ -24,7 +24,7 @@ use frame_support::traits::{fungibles::Mutate, Currency}; use frame_system::RawOrigin; use pallet_revive::{ test_utils::{self, builder::*}, - Code, DepositLimit, + Code, }; use pallet_revive_fixtures::compile_module; use pallet_revive_uapi::ReturnErrorCode; @@ -52,7 +52,7 @@ fn instantiate_test_contract(name: &str) -> Contract { RawOrigin::Signed(ALICE).into(), Code::Upload(wasm), ) - .storage_deposit_limit(DepositLimit::Balance(1_000_000_000_000)) + .storage_deposit_limit(1_000_000_000_000) .build_and_unwrap_contract() }); diff --git a/substrate/frame/revive/proc-macro/src/lib.rs b/substrate/frame/revive/proc-macro/src/lib.rs index b6ea1a06d94e..012b4bfab9a9 100644 --- a/substrate/frame/revive/proc-macro/src/lib.rs +++ b/substrate/frame/revive/proc-macro/src/lib.rs @@ -25,17 +25,6 @@ use proc_macro2::{Literal, Span, TokenStream as TokenStream2}; use quote::{quote, ToTokens}; use syn::{parse_quote, punctuated::Punctuated, spanned::Spanned, token::Comma, FnArg, Ident}; -#[proc_macro_attribute] -pub fn unstable_hostfn(_attr: TokenStream, item: TokenStream) -> TokenStream { - let input = syn::parse_macro_input!(item as syn::Item); - let expanded = quote! { - #[cfg(feature = "unstable-hostfn")] - #[cfg_attr(docsrs, doc(cfg(feature = "unstable-hostfn")))] - #input - }; - expanded.into() -} - /// Defines a host functions set that can be imported by contract wasm code. /// /// **NB**: Be advised that all functions defined by this macro @@ -90,7 +79,6 @@ pub fn unstable_hostfn(_attr: TokenStream, item: TokenStream) -> TokenStream { /// - `Result<(), TrapReason>`, /// - `Result`, /// - `Result`. -/// - `Result`. /// /// The macro expands to `pub struct Env` declaration, with the following traits implementations: /// - `pallet_revive::wasm::Environment> where E: Ext` @@ -130,7 +118,7 @@ struct EnvDef { /// Parsed host function definition. struct HostFn { item: syn::ItemFn, - is_stable: bool, + api_version: Option, name: String, returns: HostFnReturn, cfg: Option, @@ -139,7 +127,6 @@ struct HostFn { enum HostFnReturn { Unit, U32, - U64, ReturnCode, } @@ -147,7 +134,8 @@ impl HostFnReturn { fn map_output(&self) -> TokenStream2 { match self { Self::Unit => quote! { |_| None }, - _ => quote! { |ret_val| Some(ret_val.into()) }, + Self::U32 => quote! { |ret_val| Some(ret_val) }, + Self::ReturnCode => quote! { |ret_code| Some(ret_code.into()) }, } } @@ -155,7 +143,6 @@ impl HostFnReturn { match self { Self::Unit => syn::ReturnType::Default, Self::U32 => parse_quote! { -> u32 }, - Self::U64 => parse_quote! { -> u64 }, Self::ReturnCode => parse_quote! { -> ReturnErrorCode }, } } @@ -194,21 +181,22 @@ impl HostFn { }; // process attributes - let msg = "Only #[stable], #[cfg] and #[mutating] attributes are allowed."; + let msg = "Only #[api_version()], #[cfg] and #[mutating] attributes are allowed."; let span = item.span(); let mut attrs = item.attrs.clone(); attrs.retain(|a| !a.path().is_ident("doc")); - let mut is_stable = false; + let mut api_version = None; let mut mutating = false; let mut cfg = None; while let Some(attr) = attrs.pop() { let ident = attr.path().get_ident().ok_or(err(span, msg))?.to_string(); match ident.as_str() { - "stable" => { - if is_stable { - return Err(err(span, "#[stable] can only be specified once")) + "api_version" => { + if api_version.is_some() { + return Err(err(span, "#[api_version] can only be specified once")) } - is_stable = true; + api_version = + Some(attr.parse_args::().and_then(|lit| lit.base10_parse())?); }, "mutating" => { if mutating { @@ -255,8 +243,7 @@ impl HostFn { let msg = r#"Should return one of the following: - Result<(), TrapReason>, - Result, - - Result, - - Result"#; + - Result"#; let ret_ty = match item.clone().sig.output { syn::ReturnType::Type(_, ty) => Ok(ty.clone()), _ => Err(err(span, &msg)), @@ -318,12 +305,11 @@ impl HostFn { let returns = match ok_ty_str.as_str() { "()" => Ok(HostFnReturn::Unit), "u32" => Ok(HostFnReturn::U32), - "u64" => Ok(HostFnReturn::U64), "ReturnErrorCode" => Ok(HostFnReturn::ReturnCode), _ => Err(err(arg1.span(), &msg)), }?; - Ok(Self { item, is_stable, name, returns, cfg }) + Ok(Self { item, api_version, name, returns, cfg }) }, _ => Err(err(span, &msg)), } @@ -353,61 +339,48 @@ where P: Iterator> + Clone, I: Iterator> + Clone, { - const ALLOWED_REGISTERS: usize = 6; - - // all of them take one register but we truncate them before passing into the function - // it is important to not allow any type which has illegal bit patterns like 'bool' - if !param_types.clone().all(|ty| { + const ALLOWED_REGISTERS: u32 = 6; + let mut registers_used = 0; + let mut bindings = vec![]; + for (idx, (name, ty)) in param_names.clone().zip(param_types.clone()).enumerate() { let syn::Type::Path(path) = &**ty else { panic!("Type needs to be path"); }; let Some(ident) = path.path.get_ident() else { panic!("Type needs to be ident"); }; - matches!(ident.to_string().as_ref(), "u8" | "u16" | "u32" | "u64") - }) { - panic!("Only primitive unsigned integers are allowed as arguments to syscalls"); - } - - // too many arguments: pass as pointer to a struct in memory - if param_names.clone().count() > ALLOWED_REGISTERS { - let fields = param_names.clone().zip(param_types.clone()).map(|(name, ty)| { + let size = if ident == "i8" || + ident == "i16" || + ident == "i32" || + ident == "u8" || + ident == "u16" || + ident == "u32" + { + 1 + } else if ident == "i64" || ident == "u64" { + 2 + } else { + panic!("Pass by value only supports primitives"); + }; + registers_used += size; + if registers_used > ALLOWED_REGISTERS { + return quote! { + let (#( #param_names, )*): (#( #param_types, )*) = memory.read_as(__a0__)?; + } + } + let this_reg = quote::format_ident!("__a{}__", idx); + let next_reg = quote::format_ident!("__a{}__", idx + 1); + let binding = if size == 1 { quote! { - #name: #ty, + let #name = #this_reg as #ty; } - }); - return quote! { - #[derive(Default)] - #[repr(C)] - struct Args { - #(#fields)* + } else { + quote! { + let #name = (#this_reg as #ty) | ((#next_reg as #ty) << 32); } - let Args { #(#param_names,)* } = { - let len = ::core::mem::size_of::(); - let mut args = Args::default(); - let ptr = &mut args as *mut Args as *mut u8; - // Safety - // 1. The struct is initialized at all times. - // 2. We only allow primitive integers (no bools) as arguments so every bit pattern is safe. - // 3. The reference doesn't outlive the args field. - // 4. There is only the single reference to the args field. - // 5. The length of the generated slice is the same as the struct. - let reference = unsafe { - ::core::slice::from_raw_parts_mut(ptr, len) - }; - memory.read_into_buf(__a0__ as _, reference)?; - args - }; - } + }; + bindings.push(binding); } - - // otherwise: one argument per register - let bindings = param_names.zip(param_types).enumerate().map(|(idx, (name, ty))| { - let reg = quote::format_ident!("__a{}__", idx); - quote! { - let #name = #reg as #ty; - } - }); quote! { #( #bindings )* } @@ -421,24 +394,20 @@ fn expand_env(def: &EnvDef) -> TokenStream2 { let impls = expand_functions(def); let bench_impls = expand_bench_functions(def); let docs = expand_func_doc(def); - let stable_syscalls = expand_func_list(def, false); - let all_syscalls = expand_func_list(def, true); + let highest_api_version = + def.host_funcs.iter().filter_map(|f| f.api_version).max().unwrap_or_default(); quote! { - pub fn list_syscalls(include_unstable: bool) -> &'static [&'static [u8]] { - if include_unstable { - #all_syscalls - } else { - #stable_syscalls - } - } + #[cfg(test)] + pub const HIGHEST_API_VERSION: u16 = #highest_api_version; impl<'a, E: Ext, M: PolkaVmInstance> Runtime<'a, E, M> { fn handle_ecall( &mut self, memory: &mut M, __syscall_symbol__: &[u8], - ) -> Result, TrapReason> + __available_api_version__: ApiVersion, + ) -> Result, TrapReason> { #impls } @@ -488,6 +457,10 @@ fn expand_functions(def: &EnvDef) -> TokenStream2 { let body = &f.item.block; let map_output = f.returns.map_output(); let output = &f.item.sig.output; + let api_version = match f.api_version { + Some(version) => quote! { Some(#version) }, + None => quote! { None }, + }; // wrapped host function body call with host function traces // see https://github.com/paritytech/polkadot-sdk/tree/master/substrate/frame/contracts#host-function-tracing @@ -523,7 +496,7 @@ fn expand_functions(def: &EnvDef) -> TokenStream2 { quote! { #cfg - #syscall_symbol => { + #syscall_symbol if __is_available__(#api_version) => { // closure is needed so that "?" can infere the correct type (|| #output { #arg_decoder @@ -544,6 +517,18 @@ fn expand_functions(def: &EnvDef) -> TokenStream2 { // This is the overhead to call an empty syscall that always needs to be charged. self.charge_gas(crate::wasm::RuntimeCosts::HostFn).map_err(TrapReason::from)?; + // Not all APIs are available depending on configuration or when the code was deployed. + // This closure will be used by syscall specific code to perform this check. + let __is_available__ = |syscall_version: Option| { + match __available_api_version__ { + ApiVersion::UnsafeNewest => true, + ApiVersion::Versioned(max_available_version) => + syscall_version + .map(|required_version| max_available_version >= required_version) + .unwrap_or(false), + } + }; + // They will be mapped to variable names by the syscall specific code. let (__a0__, __a1__, __a2__, __a3__, __a4__, __a5__) = memory.read_input_regs(); @@ -605,8 +590,10 @@ fn expand_func_doc(def: &EnvDef) -> TokenStream2 { }); quote! { #( #docs )* } }; - let availability = if func.is_stable { - let info = "\n# Stable API\nThis API is stable and will never change."; + let availability = if let Some(version) = func.api_version { + let info = format!( + "\n# Required API version\nThis API was added in version **{version}**.", + ); quote! { #[doc = #info] } } else { let info = @@ -628,20 +615,3 @@ fn expand_func_doc(def: &EnvDef) -> TokenStream2 { #( #docs )* } } - -fn expand_func_list(def: &EnvDef, include_unstable: bool) -> TokenStream2 { - let docs = def.host_funcs.iter().filter(|f| include_unstable || f.is_stable).map(|f| { - let name = Literal::byte_string(f.name.as_bytes()); - quote! { - #name.as_slice() - } - }); - let len = docs.clone().count(); - - quote! { - { - static FUNCS: [&[u8]; #len] = [#(#docs),*]; - FUNCS.as_slice() - } - } -} diff --git a/substrate/frame/revive/rpc/Cargo.toml b/substrate/frame/revive/rpc/Cargo.toml index cfaaa102fc3d..8bf930240240 100644 --- a/substrate/frame/revive/rpc/Cargo.toml +++ b/substrate/frame/revive/rpc/Cargo.toml @@ -38,37 +38,44 @@ path = "examples/rust/remark-extrinsic.rs" required-features = ["example"] [dependencies] -anyhow = { workspace = true } clap = { workspace = true, features = ["derive"] } -codec = { workspace = true, features = ["derive"] } -ethabi = { version = "18.0.0" } +anyhow = { workspace = true } futures = { workspace = true, features = ["thread-pool"] } -hex = { workspace = true } jsonrpsee = { workspace = true, features = ["full"] } -log = { workspace = true } +serde_json = { workspace = true } +thiserror = { workspace = true } +sp-crypto-hashing = { workspace = true } +subxt = { workspace = true, default-features = true, features = [ + "unstable-reconnecting-rpc-client", +] } +tokio = { workspace = true, features = ["full"] } +codec = { workspace = true, features = ["derive"] } +log.workspace = true pallet-revive = { workspace = true, default-features = true } -prometheus-endpoint = { workspace = true, default-features = true } -rlp = { workspace = true, optional = true } -sc-cli = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-weights = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } sc-rpc = { workspace = true, default-features = true } sc-rpc-api = { workspace = true, default-features = true } +sc-cli = { workspace = true, default-features = true } sc-service = { workspace = true, default-features = true } -sp-core = { workspace = true, default-features = true } -sp-crypto-hashing = { workspace = true } -sp-weights = { workspace = true, default-features = true } -subxt = { workspace = true, default-features = true, features = ["reconnecting-rpc-client"] } +prometheus-endpoint = { workspace = true, default-features = true } + +rlp = { workspace = true, optional = true } subxt-signer = { workspace = true, optional = true, features = [ "unstable-eth", ] } -thiserror = { workspace = true } -tokio = { workspace = true, features = ["full"] } +hex = { workspace = true, optional = true } +hex-literal = { workspace = true, optional = true } +scale-info = { workspace = true } +secp256k1 = { workspace = true, optional = true, features = ["recovery"] } +env_logger = { workspace = true } [features] -example = ["rlp", "subxt-signer"] +example = ["hex", "hex-literal", "rlp", "secp256k1", "subxt-signer"] [dev-dependencies] -env_logger = { workspace = true } -pallet-revive-fixtures = { workspace = true, default-features = true } -static_init = { workspace = true } +hex-literal = { workspace = true } +pallet-revive-fixtures = { workspace = true } substrate-cli-test-utils = { workspace = true } subxt-signer = { workspace = true, features = ["unstable-eth"] } diff --git a/substrate/frame/revive/rpc/examples/README.md b/substrate/frame/revive/rpc/examples/README.md index b9a2756b381d..bf30426648ba 100644 --- a/substrate/frame/revive/rpc/examples/README.md +++ b/substrate/frame/revive/rpc/examples/README.md @@ -34,7 +34,7 @@ zombienet spawn --provider native westend_local_network.toml This command starts the Ethereum JSON-RPC server, which runs on `localhost:8545` by default: ```bash -RUST_LOG="info,eth-rpc=debug" cargo run -p pallet-revive-eth-rpc -- --dev +RUST_LOG="info,eth-rpc=debug" cargo run -p pallet-revive-eth-rpc --features dev ``` ## Rust examples @@ -65,6 +65,34 @@ bun src/script.ts ### Configure MetaMask -See the doc [here](https://contracts.polkadot.io/work-with-a-local-node#metemask-configuration) for more -information on how to configure MetaMask. +You can use the following instructions to setup [MetaMask] with the local chain. +> **Note**: When you interact with MetaMask and restart the chain, you need to clear the activity tab (Settings > +Advanced > Clear activity tab data), and in some cases lock/unlock MetaMask to reset the nonce. +See [this guide][reset-account] for more info on how to reset the account activity. + +#### Add a new network + +To interact with the local chain, add a new network in [MetaMask]. +See [this guide][add-network] for more info on how to add a custom network. + +Make sure the node and the RPC server are started, and use the following settings to configure the network +(MetaMask > Networks > Add a network manually): + +- Network name: KitchenSink +- RPC URL: +- Chain ID: 420420420 +- Currency Symbol: `DEV` + +#### Import Dev account + +You will need to import the following account, endowed with some balance at genesis, to interact with the chain. +See [this guide][import-account] for more info on how to import an account. + +- Account: `0xf24FF3a9CF04c71Dbc94D0b566f7A27B94566cac` +- Private Key: `5fb92d6e98884f76de468fa3f6278f8807c48bebc13595d45af5bdc4da702133` + +[MetaMask]: https://metamask.io +[add-network]: https://support.metamask.io/networks-and-sidechains/managing-networks/how-to-add-a-custom-network-rpc/#adding-a-network-manually +[import-account]: https://support.metamask.io/managing-my-wallet/accounts-and-addresses/how-to-import-an-account/ +[reset-account]: https://support.metamask.io/managing-my-wallet/resetting-deleting-and-restoring/how-to-clear-your-account-activity-reset-account diff --git a/substrate/frame/revive/rpc/examples/bun.lockb b/substrate/frame/revive/rpc/examples/bun.lockb deleted file mode 100755 index 3a7a0df5cea48f9e639655bd11d1b9b94751e6b1..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 10962 zcmeHN2{@G7|9@j5ZhKNhq?Va$wKEG5ytZ&%&q-$J`|uars( z-KcQ2(4s|~cBDnxyT9*uW)5%NenaskSD2nngn=&*Z^XI|~E8^}RJ@_Je za@yJ|zFBz4>^5 z-i>ps?bZo94L-ZNp7G)CdeX^4d#mR*?t?2W9Yk&0dkjl7tsTB4XWnpwgkK-rNz58f za93PhEIUY6AE?f`Ie(XF#hTIkFNRlLHL{B@i#zPO=%ICf^g7AGW_QDe2go{1p2B;U zI@9Ej;}eRn)p(nY&&iI~4_(wf{tR{JM2oJA^_$nel()3Gct0bTJKcVG!86+g6 zrQr^qyE_M5&%b`_!`S$>?8z%F)B8K#{{-zF0Z;rvcpIW2?OXxK1w7gfL#R%##^M!# zcTwvPLz*IbG!~x@fIezGnZ*rZ@jlVN0`SAs_P2t1n8S@q1kZ&F^dHJb{TrJIUIZ6| z)$+0Kd*!DA$QAG~*Olpe{bvSBWB*~_!4lDE8;Jhn0Pg~L#A5nh`9A^Pie`Uf`;f>l z1N-+z^%;%)I}`9Vp;WwdYR2nHPg1u( zuY9?sXvVZzvwOtfXggJyu({d1ysUPY%X~^k?XncgH^sIn@Sj`QDx+PlsYU7L^wpsP z`TY!@TXXB2Y#J|_N2oS~8k5xh;oBk0ZO@hl&bjP@Nl&fCaSk|5N z)0~ot$;^X~*zJ-RcllxYRI^J~rXLL#+|#P%oa#a2#l9wqv9k5j4ND3(Z@#X$G0w5? z(BuUor-8A@FW)p;cCcT{75-fPsKlQC6X3A&t$DW=xhwYl^4z&oH{+VZM;(8hv{AV9 z=T0U$H5!A~o__JFSW}D}vGv zSBx|?47(q?Cw21v7logBGsp7NI(FaX$hH|9ZTjZz>)SeQ=X83Nnz4gxN9V9IBhO|oBCdl+g`7Y;Z}L=yHz&% z#^vS(rTWK{%pTC*JYwSpu@%(SUP4&3d^@fo$)sl@MN$)G6b^~LWh3Yp=R<2P9+ z&uHp?BUQxiKl$~YYd`dK$>UaiK0bg zQueZ@TO{VcD$XqQTBLB*ow#gTMfDc1h$%E)`tydtJdS%x*qcD<`Lx$M2{**<@n-lzo+Z5-(TNcyS#j ziIK72WbXaZtbc8tZR!`=zA$}KDMXb?tx%@}!#afFlmIqDx zZP;a@tzTwx_SK~cOvm5n52W#u^&Ghwi4%4Vn4o`X^}G@N&PJl4;QXmx--d$o!ZJ=-YjmQSFELk7dQm ztTJQCRmq!teOBf(x2dtOJcNS>EvW8&dc4lgg7aw%%G|=_=S?>g&Qy;c+&hH!VeDrl&-J<;L4-Rd6 zd2Pk=j+YMTw+ha&r}5%mge1oHPxsb1g-qM#P-7IIu_@o*R{Xp~+o;Fqr_MhM?gW^% z>g+c$eedn)$8(pjoN~d(^i1DfYnuh6hq@1YZMc;0v28Pr*MN{h&X5&7opj=G$|aGm zo3WP#H=|wAkZhw1-n&~bFDXBI&qQxk7uOq%#}3n9&)#t^^nKdy8o6M#@h>gXONY&R z6Q#9KM&l*>5u|4%57_!#JI5`wf-^KYXlJQde8a(O{hbf_1IJDJXytx`1 zYC?=hKzz&j>y3ZAVqBbJ^qc?tABUu#Sq;36O}Nj(cb-8qRnEeTMkXaz{a&;$$b@HJ zOe3Lxdo#K&ZDErK&z*1Tg6+sVSQgt)DEJ^9{AS-|9as6 z(gPCpo2!lLT;EA1^`K(%ueP@+X`c4XttuwG`;KlVHpRdS;`x!h_;`tBHYIv@~cO>{e z1K&O1djdSuqgJ@b#`6)LZE;_Wdsy6W;@JkzA$VrM{XOc2`$asn&s6Jmvyu9tZiq)2 zs3Xcn9@GJKL7h-H)E)K2{Ugdn{m~Az0d+y0P*=1EbwYhmU$g;r$NQ)|+TYp?ejsUC zsB(R$DYkv4DeR$t`qcT+=Fj~DvSLV_RtuF?rs41l+Q*QHr&g2cwzez>mNn%KcR<~m^?<6*jY}PCe zhoWlKx{+wLR+DJZjActvB<@Y(!b;t2k&Q&sNu*gRM~NXZc@ndRJAIUH`<8BfS#Ym@ z??~d?T20h$tBc%|$TqN9t1u+CPh#IntvSd>qW>g1UavLzK7ho}0Rz^c-^dpMBw`L2 z=mKOT-vE#pxspwJhkPwSqUp*zN;dLc0g1b-F*Y_7`O<(y;+0xsXH$Q+2gVME#ic0n zH3Es&Yc=7*pt1qo$ae@NP7iE-ST+tY;6|ut2g$!&*i@s0dhbrUE;&E(h%h6yBCvtX{-d~>R~NO)3#Jd!QthY7;?Gv#uL%#jVHK=>9z8p#q# zLV{#0u{4nVWkWVojbP%Cv}4HwXSv8DC43oMB=+Y^cz(gWKt3}_CKhR+#Pb#qVFolf z9g)XR91_A4`7`+upn^LZLXEDdqbMqCNAq9S?qv{e39I6Irq4IFZv}5&!Q2=NS7`j=2q!+;g zSTGvEt5+a^s;`uuwQd0QwQ~XfbrsldV+YHuX7%g33mEFInf9P~Ln?gP?e%c=H~{gu z$ZM8PA9uh&FEUk60wf@Vo5@qiXENpDV7{oXN+A{2cP$)=HT2Ziy^9N@a(HRL(B*)F z^deKadHIy$87fXX#emo|A?x$+T z^qa{OXj(w&%|Ix<2o8lm<#?1x#d0xI2&<_E3+fvIQGG$#i`)i0`%M$j311ooTx!1t zO2O~|g^(GbX^7HV0U52tR6R@^rPZGuUgs!!FDTvB&GM&`gOz+G&-}(P@ z{(H|o_x|^ubLP1Cv{87~7}I>t@!8cU`yAU!4fWlpB<4Q4VF~?sZe?M|-UmgKbVa{d zkjJE!QfCY6bw1hkz*v!e{sJyjk`Qj-QV+(Qvw>RRR2egX0i+c}YINOP8ZV$1EJ_PX z7ZevUW_Qe9R9sTTzECq3i1$sv0N`F3Uzc%(jKx5IJ_1k&94=#=j6pK0Wo%bTb~Vl` zF2Q+5#zVl~=-?e0UzKsWjProKpwE+Ww2VV#jFPc@K}pG?GRCUB88g6s7BCbzLVk_` z2C*9E>;a(<6n{s6`U0DQ;lMpW{(cQG3Rnb;1darT0i$Kq0J&XTPuKfTfqb9_U&r`pVCvzw}kPK>6P3;(meZ0oMfz_DUBsM5^zdF1{twCBOxdf_3R)L^sOUrK=0E zHPUEnuT*h=H@buux9}p7wg#oCu$U9b;Ge2ag=g84w6;sS&?SX=DB5|D#?jWmG{J)| z1!jN;>oWum6k^1y&!%O-tM0V$OX zo~fS4Af;iY)l}d!n8H0&HCP_T39;roj)@@5PijAg*fM;lN{u784RSq7$os_3mfF8V zoD*ya!#~9v1gZW1(KY^aIl%vxk+S2@jq>>adq=V(9R8ant@-{M|3qslx$=xlS@T?> z=OaAh0RO=b@&A5DKgpU(^MA&@5f0-Y-8^)qe_3Wn+U26JTMw-|T>aj+Szgv7Cz?ix z@hba)QFmuFg zwC*Yie4)=#^TYG+PU*8Za%4#38xJSZuoyjEL$5g5yP7KTcHg6$#?^l4+Zyw9{oTJb z{39VJd1l7Z(`PrFQx%A(JPY?e+P?Soul{WC{k*kL#q|%YO|z=(J69i1ZaZLEIM32X zx6$iu(|8>B$z~hv9;hY%*ztlV1tr@AFIouRgU%$|NE552xRmjNmX@X1=rEqH;K_$# zQf-1StpxX@i{QP;lx7op(|T|nT?O~2H;F0tJcocb#unB#s0K6X^0gooXESu1uiopla z3Gf&Svf6}!v=Dp{odJ)fh>Ix$WEyP~hR}NO1iA`7 zl#<8T1Pj%HCsNxO8x2U+QqI`%LNaX`n@fc`2XT9=ycQQ%*!jXFh;%1+cEEP5KJF(c zKjM{??P@ouUTAbqtQPnul=d@?x&UW_54Gn`ZY;}D3AAIfj>@jm$^;DujR0kVtRQ~Fa3_#k@qCMggCanYpePV>HP#Ol z4eAdXfSkAYT>pVa$b&)gAQQ+8;BSB!yQ1YPQxT3t=ui{GCwG}`~1>G;==e3 zpaSvVi$_-u#Z#{1!{OfNskL;$QhP~Q%~Ku<<=W&HJc{a>5UWUAZA+wNz$**-DSRP;==mZ{8e(Ler@>%oQ^C<9W z#DQW#gFpj8F(5>Xz0a2cts#rPoNp#;X>6#yVvfDsfpK_pnmGNID9p4NO+$<(EE9cJ zt`C_CdrZ=)1j~6$8>MeP0J5E z*9i--K!}pF7Kn}RTyc1j<8)H1StrbdcP6;^E0J_d%oe4}ajYNHd2mzKW8N%EMb%66p-O#Gd?ammLihRKsDyalHG)4{Mei@sTa<$2#I?HJ zy#q%-bQ>roizWM$LpxtPSn1X*S5YTERcf5y>{&7Qj;7yvw}DaxtyArrGs`b!satbM zMcGUBp-S0w+GFCcM_r$P(rs`>Mb4#qi(F_qPqI6squazxw}DcMU0(IlDASvlPPjEe zYC4BcmD+7~K!>MKnC^kwK&kF1^;Xo}jrHr?nyG3EU8WCJ%E4_R@kcMT73RAQR;X#> zGQCBqAGLwjwrvL%H@gj#3iG{D#WTt?`oHbgG^uGD`c_KPmYu$aQv=#^-3GgQ()nfP zP^I!ZrVHpbW7C#em5^XGV}CLBX-^8T)LWENZBqT6JDEFLI^70JG1tA~d{N1{d!kzt zt))_Ys#JeTcdK%}eb=_Q4V3!umz$ov{rf1t8*a@SE&ZWVAF33T-wf}kxu0b{ZIJI;2vHC5iUp-S&19k%#iGp#uKGx?FnsH{3Z_;)ya_|No8($G(YrQV%oR<1%7 zlSfCZqrEDYmDwGI<;7(c)V@4~uGaeF7p-`?l3Hp4D0_J@J+8Hqvt|h8ufp9>A4wZl eP0%Q(04>}UPC2hErf$yo#_!husTURGZ2lLaP(W<} literal 40649 zcmeHw2|QF^`1fEeDNE8$q(W)zdlDf_C0mQKG#DA%Xl87slBkr5N~;#FqFwu{RY*mW zls1(LsYJBC&$%mBy&S7i$FhjK% zp%KbH;h~a1v|yHRkUuk+LGx#^L+D&ugtnwGg+f_>g88H3@rZGh3G$=fpGAU#2dLg@~%8~>i}!{kPXF(^z9E0P0g#6J+TqTnA0AE4TOApZ-95iW-n?2FR5 zAz>(@s8cDFZYZ4*62V|oD77NIasoLtP5@nxQUUj}z%PLq<+~2CG{g*U0D}z$;_|qh zV5Se?(HR_CAfyizsL5$K5K#a;+4e>yT6Cg(V1>F+*4P!I7Tqd$e z-u+}K4B21X{@Tb-Pbrx$YZtOL`^Tnqkqh;GF71j=T-WbdqlDq*iwY0Lzr-Cl?_E33 zX4~%huOHYs=SK9oZ#rV+{F~+*!t*_v6f>eE-Q2T+S8GuB&b%~8*!@I4L!4?q@6<}O zg6!t><{dN516D~sGLj03(%PF?%TT4O+rG;j@rGeKgZ z#=V{FqM)N5oJ6}H$94~m@t&kKRONE%a@irHts=$iPZp)iDqdB8GbwoHj-RC|r!BL_ zF+JI1)Y7E-TEE$PFe2B*{Emi#V$Z!>2Wp?W`^Iwf$VRj{7lv zOygPRX|oJ{v5;4fA79*(rF-7QDfpqC%x=q+rR>dW6-(`xd7p_eiD zV#kO(`?Htk`MEv5n7-uG!U3xnD!9*&Xizs&J7BE)_0G6gmct|(tgNp#d2O7ppO*h~ z`Ks?}Cx`TWYy5uhBx%_tMcKosp4zVydWmt1{eo}R3p;Om_s(K_(U0RlYd%U2$RBoA zzK@*es*Q2xUc%=}>Li|Q8x}iKBZ1-b$V=PhsK)6P`C`M*kJn!KX>QG|!liQ6DsB5~ zXTgT(ZGpv1SYEmmV3{ET%mEZ#7_aCa<~xcOk^1L2axY6bFG0?i8@30S@qGLHgz z)E?1}2=en_`JNz%d&EjzVYUIUr91d05`jI@aNU%RdDdBS9VsqB`Jobl+aU`NUzL zGl5@!WP7+B?a3k@mUjjDu^^9i-&y%okk{eYAL#(r$_@owely6!n2W_igYm^5Vl1x% zMr+D%KN7*|e-LlU7Y_3J{Px2ZuqC~t^7}yEf?t1VN~iVz2IP_bqVkabIvQ|0m7wG7 z`R#|TQm4mX0LUZ#p|Vl^I~s8LJ3!u=Up`9f?D&5L@&+J}#vSadD4>oG1zf%^2;%YA z6C^upzuq8^=YQ1y9Sykr6p%;rZ`*qNFt~J0Xc(<>h}pgUcdOgtV)>gOZw&IV7pI`| zK^+|mSY8%{EI}S%&_27vHiYG!K_0IksQ+>LpDKa#C6VND+Mi?*ALq>lc{8X#mg#K! zWx#Mz`|-Nl+5V3Kc{Ki^8pPb+(J_GQe;DK~`1(g}=xq7#K_1QjNDg7bZHEFb-xLO% zBgo_X&X&Iv=_K)@7b{_d17M5QO@_7A6d2#yh4 zpnOy=O2gv_=lflZ&f0G_$fNx?Di@XC+2nZBOB;Q!1})l@~Hht20g#wc>vwF7qEOG zoM$<9L4F`?Iwo{MepVOdv%4t&vy1ZM;i1Q&3++z@dAlyiS9DQ+7;KulQhrDm<&Sq! z{(BeYZD7*tO8ZxIQT`stkLyDJ4}?MA6?u9W<@a_`z82)IyU>0eSopdkAKpdz(_NJR z-bHyQIFB~~{h_t5UwadN8y=qjqu`~%j4sIE1$p-_$d4S*wepvMylWTAF9&(gF31}V zB;F#%{;mGEgFL+p4_bQM_`+vOu?X0|# zvf%t1?Y}$w{OJYqxcx|81*jtdw||E+@B9#!ar8bLy({lf!19ljdCy-+e=rqwdjGCC znD_jEo_`QVN81lsiDxw=hn^c@8%aEqpmv~uVx$jPo{4l=W{7}d)JLeUs0}duiF`1O zhujzxJU+Yq(j)f_^@mGE-DBdy;zq|- zr>ahhe=5}bdTNx~II9$z_TBxp^ZXoY>t6JYCE=pA4F{^@lqEJ{kK;F|7$~{UOD?op z>91`Y9Vx24No^$avxIm2e!~%aCD=QAeV?AXsN#oH(G&&G1dCx83*zH*mR1?x7)HWH zdk-9_R7=&j+aylET);58qq*(V1qEaM=DUpv+p22MZEfD_6uWcG6Yu9=8YADP)rgMk zF)GaF?VQ9^-apYd51t2KIL&qu41O5zrrO>!zc*>?m97wsW%puV@i z=Hn1=`QXK=nUi;o%+FQP?x8$hYTD-=!#>IG7Clf`YpVNm(4O)V^SwW>dTiXK`|M}* zKB1sdGtC?>91R^LWlO?E>i`Z^?W>tSgUq`(re>^2vD}>*`q5ZpfXQsps3JdOiy2mF zk1ns?p1%2trfiw@y5PP3uE+LGxM*^k>b$#l#2&R=mwk^&xbT{&B~aIXxE!PwRh)3X z{-gexrU44O;~bjxd_xOX_&hwRx2HNOxhdq%t2w?aPBM2po=!}Cp)n=oi{&> z=i0m@GQKJfWxmt=leu|DGN*!sonGs`TYi21GovjVgXUIyHqM-=<%T#$-9harf?>+J4 z`k!Y_C#IylckjNI{ch3gL-A`!xa55^wLeqng7Ip zm#@9D`emuc!cFIQu1QTfVc}M2+RbCy;gRB>WFN>kJ2|@Ml$sBSC*hKx2dEUM9cr(d z#x2*qQ8sJdJ=~_>e^Xc*HdUvN~(z2_xdSxyd+V!w={ptmh$k2uBJwInem}!V;7#g_o)A}>{0tm84Dz1 z(ub${o2+P39wv1)DeK^S^MS(#d@88wH`IMYaN_!bBwTnM&=ROJbmzO_K8KFjon1Lq zbj7iahCjZqT_-+q0SB;WL&iN;XoZ9uB{id^Hq?Wo$c+)$^ADy`D8bu zH{Djg^mN~gPn}*YHQQ3$bofg*{odVr_Swd5Y+k98H=t5EzfAJn4yCGVh2o_8!m)Bo zpvHQ7j63pdh@Q9O60^I$-|kw)n5PyuR~OJCYo=_{&YUS~`|xn6*68UCPakw2ZptY5 zApGt1)?OtJHfL+rt`VMALBhq)R|uTCu*P`6wdXg>6w?(lH>=%LQdaf4Znb9Ll9Z9| z@*_(ddkhP`yRI^J(ecHH^lC2D1uptN{lxmM+~iBqW%}P@p6Bf&;iBg>9H^$&1&wdE zHAS9~+9;ou`%SG}sZQmK_3EV$mrNe4?QhU%x#`sT!o^Pe_i1{mr03NapDl1(9BwH- zZJB3v-B!1#86;ddo@@zJn*JEOS?1X`kM7Jk?Y?8L?KqWuF~80Cm-YF!y7^PE>%|At zoMZAWC%pbVI$)Uh{hH~AD>v?{y(BcP_x`M!jP!eTBwYMFiNL83r_i>f&aC{FpFC7% z<>AUFIpK#A@0>Rrzav#l`m~Rz3rPVlw~R@Pn`{I z8EZ+ngoR*R?*=$eqL!lEjKezg_50!tLLp1*mWA@yo;ShW{`hB|P}K>DY(BIZGvy3QWrTyGmsE ziN%~1@l7$DTGp`n!bq|0BkeJyTYOZ)jrPHoD`J!kyta-~P#7 z+&%1!?apG|W$L#c7EKn?t1nX()t+{5$j!h^ljBdLP8Hn_uXaJdWx)F=^t_J)^?dEJ zF?)ZiM%<=)WCYXS%{hF2k@K_x3$A^fH{RV$>gr&<$Q?@i%;YVvOpP7yzfpQo>VV6o z1B&hPL^tL%_Z!|76ejC(fasV;chP=7Da(b8_c z~>vFe>InLv!CbpJBG`v{2xy_szRjdZKV35a}FG+4eoCa?|fhLackjB&pC6|H$If}2;RI!BJSFSUiT`z?>g>L4d!lI zD3$NGpg`tyud9kGWm}G~n$6_CI8C?dp`CM)gsY4}A*P-_F#hT5zIJ_s&pJ@? z@KfJa1Kv(xr9U1{vmfVsXJu4!H;a2e4;MA63|7rn7`1MXiil)|!j1ZpsWj7@i^h_0 z2a|Err|udvRr1)XxCx_j-50ur6$ER1*S|cIx?seznEMvvSeo}mCMT#5@IGu7nV_jM zS>k1#@zM#>i*G#`9q(T_$?Y`>SA~q*oL@VB?U3FB5+vhQ&&>+Gln^f#HM(K*t3Ds* z&mQa7e_^uuJA{l|5OFgxwGOoF}Py_MMH#!us*S)>iR#8rP?Y z?M~R5Vi2}RHdsSl^l_j3Cb}h;i{8y@161ZloZ!o7pLzw zea7&w?;aRmrJpiu(4DtWZi95fn}QN)(^plw-Z!?~7*fY(c~Ye2+>2pgd z`iuezcQ_fBQ~1{X{E7V~q2e|J+*I^W_bMEBvHzXPdu5m3tvvNyq1(#AmJg|~O}rZB z?z%Pm%hA1`xXe`p4oV#_v!C-hMmT{(!W}`z%^b7sy-1GD>}ox`pupW_EY?F4x74R! z^TxWme>3ncl6<9KnE9Z3$1U4~lk;S&j?SN9pQ4mrxBQLCom<_`c&6Hra5c!d7m6=_ zUcBr2)td>mV`iUxV_U6jUaiNj8N6>=URG$_sXI^Q1HRM;_7vr;dU2U~G4QKuk(us| zt-E|iT&Q0o-mUQ*33ntJmwx5WhXX?{C5#R+vX@S+)YZInu)jtrqsHraw+p68<$fY@ z);_myTw@BY4Bzn~Zj{?RiT?5XFOG^7`<|p4yXVMA5-yF5`$-}F=+%grhG2sYO?CNe zosVwYaNhi6vei`e;>C|P`kh)~~IjL}D=!!~H zVW;m5iJh}&XnkH*r#k+MmgB-L)uj63b7O=}9i^)k%w8tF;Y^XqwZ_LK&Q78wgH+jj z^&gr~Qu*@g?zN+=nw1>$ll=!?J?$}ZlJ|g56J7R~YsL@iQ+9WJlzG`|5-xnp)Dox} z86#cAa~YoNy7}a6acxlD_omkl)whaecdN=`ua+M!88x*oPISKGq$&H0rIea#4u%{F zO!%?ht$5^y#}X<%mC5@DofcH6uaRc*EMMQVAzD}5QWR?|8Vy3<3eSI5`$IcF@a&nu zUMY9q1od*5I_T@OwtlIz$? z?X?*>QJmQRCcPvFA5g7t=;@|9x8~IX%OPHBTL-$%sFtTZlrA_Nv*=NrebMT`O)28a zX&1y2`MBteK#z=DH9%zEf^AwS%Y{tJ;~1-U9|^gvR8)6-k9K~hg5Q}Jw7PSO{RW*9 zZBm@JaZ6L>j0vkYetA66@IIq(dz#+5zE7;+kOc1^^jqO(evmwP{d?r4r}llbij=pd z#AUwjb56&X`TmxpoTBzXiTfKe&38o~@^YOwqG4v^AlVi3=&#mmtCqd3GxNQ$m`2hA zyaR6uROj%d5puUKxooauNw}2Ac=s)Ht==E0TI+K3;>Tz4?}zrdaLRFS_xc_RLy9C^ zw6bM6XKx&iUpVlZQ~h%hs{!5?BwX~K0SD?tnTQR&))jjeMCBf(dF{Ea^**(`qGiCP|v5Db@d9xmFG7ur*8evd~snJ83 zdyHfTXE`h99CzKV7W=br@0;IrL>F5=4A;?isws`#)No~f{p`?&)F+BZU#nR=--vU_ z$(0hDk+68-2O;%x5-xrZg5ar}q;8pAi%N(&e0h_|_1Zh(2WLy{Qu*?JjsGFJPus5U zv9m0T$@e-ar$5E|Nu|SQ_Oxrad+Vs}`{KIRPOHdm&%!LC--zFpw!BXva0h4H+EBM_ z=z)`=<`G=C6%qw{_e2_`cbam9OD={Vx9V|ZjY^;B-17NsldAP~c7gF45;r|vUZ-4( z&KD|uc|I?cs4ou}y^q0xS{6CANAA3SH+l`oE=^0jT_#+-!*a}>ceV)zTGJL^saWEx zJ*nH}PjP8Q3%BS$Q>P1kcHOwdH1GKiiR+V!#ex_0AmPG0y_P^d71EA}y6tFXLY zIK$9x;l;bGQ{Qwhy*cE-Qa$D|>V1!~aoe@}+xVCa-CP)MSIbr&*IRnhss2pamv>e@ zcXA+bsl*vSyz6WU)EDb}=E_?+t-k+0_03Olq3*OdvJWoD%FhdaoOooV^rpTg#}+=W zNIkZ3Kc()4mC%*$`!Dxrem637Q}MoY{f5%u=X_ik9K`!53{2&WU*F&)FDhLzL3?Lb zjp7ZT*EzW%wjVOOGfW02j;R@KTe42LIBSs1#p9(zWXk=<8P{B>!|DF|}#j;hyhe|4)wRFszHGZzedjgjt zp$wQ7WL$;piC3P?KKd=Z+}YAqve$c?u}3tD^bKYkZg^0eUG;h8!;iv~3ZIqUxJU1E zed~cjwTe*#jTf=3zi-Ynj2^i+dnXT97@cWYl5uZUn`A4zr6zxljtRS<7oOLx=bU7z zMLkEa6IoZjqiWMdmE@~$7TZ1bs8P6SVF!+--iYsa;e|x=p-Q2PnuFq`-*-EC z@rV5WNiSGEgwAFch6sNd{~(N7N#IfmTq_I;F*Wh%q0Op#BXYdFb7O{Ni+|Tsnr3jR zZ{&>UJ595-&K{`k=dF4{W9p#IbJQB56erPT`xTr@=&>cQO1&H1yYciZf$Z0Yj4N|um5q&*qHLIw!iRBRVyZGLowznL+_qRy z^F8LJXGjedTGtT2Z^vEVee^zQ>#aX12mkbEl%}4Txk_|Hfa)&vn>-=()(zruWZZK@ z>I&80n9Wuxy|FQ3M$Wn`AH2LugI~H)Wz{bQZGV2$mUT!v_wvC&r}?Ezq|Tdrul#X% zkIk2zP4T-AS1&lE$0l$kD6xQv-rM3pwP#%G>$Lnq?}<9QgPUZ>uJ7*6(6|y;{PP>D zW-~?MRf5BJt9NBV=Z`H;iFMx~?~=7is`!RP@qT;nBICr;pGq?cTw;1a-x9V2>hz^2 zbYku&UF$ykvbHY!(Q;MW=V42AQ|0R59(nUj!y;ot)lCXb`#+}+U;bwIhYiDr zjEhwD6Pd`Z%p5;ssDt{7^4#2qs&S_+&&^1+_RBeR(|tvZBfq|AesCb;9;wSq@|IfX zlR7X_N~LCy+O={y61so>q2XoKftt#(;x+##=3W`0S&;&3q3r zd3l9vYkgUK+*rUIPsTkizTjR}-dVlNZx1Krj4wET zVp+WR0Tc>Wy4~x3_tJ-6Z-xbAn6zD4?E2d-gRAFD z^7{?x*O`oaW!eR;(-hA&o<81(`g|*%rPc3VX7RgvenG8To*DfW42$-tw!5%{p249-j9Ah)ne|z+s)xu zM-MfPNqO!v|NQmz_3PypiI4g4^1?1h5l&KYuJ<8tyCk(0!km(U6%Tpm8Wf3Gz=UrK zTLN`n_E3t~lhe(bcPh5@T6@X!3S*H&NYUlcxk;AR6O%cqfy*WYJ#*SVV$8FXyOOw5 zrJ2_(8qXxuS;(keo%nP4O*MXf(Rg!fK?VJKAF3K9DQa}gwY=b}C1d1_A7j5a_7Hui zly>yIR5xS$`Kk+@d?v`ys}qMfFRJS=CHnJGm|Cid^QVXFw_C5+r6|h7CDymeWZX^K z(|4>?=g2X4n`$sz*7Zp{9`eEW)6_WaP1$R&imhBT$+tLctmduLwCTBec5Cc!dS?_b z(%k4Q`{1FJ?2PTrZwXx9`Zk4(`(U<>*_QR%b_p*xoRIC?wB-D2+sO;otEn97Gh1#_vvT9^xKe%Z>7WKdq)J_4*Gg1$Z^WV72-XDc<0Kzecx0vu3p&F=l#RZ z%ysMbsH7&NY~P8BCb_qd)=*4kmvMU-lMGlW)uR5cTEF>(j`%*RwtP8R(Dd?J-*C=f?%b@$=>9G|pCEbVeb?p{O?6W4Zen zhC-mqwaASs-ly!hZr?n9lw04c>L;I1p=#an>{aGY;PS>Xe52PAsLLuA&e$i}ds@u( zNtee-G^EYet8@eS|LVVs* zKJ>KX>4ArfzxP4l)H}QDl-Kq!PF&v2DZ8<{j@zV^7dW~iJ$G-xjz`mLjepEN`X#IH zm)G_~qxYCDFYnu9uf1;+)9v}8fFH*O2ubDe)*;@yIisbvfNPbW7kR?;+MTF+rTzv^ zw=%CkD}Txs>nHiXDLJ+&EIDb6f>xP#N_rME*>`hKozx|)@Zdf+8R<5Te(bColSRse z9(el_4>Im2$E4w7>C#W1RXfVESF*=^f4eQ_tljalGs?%tZTi-*Q*JhGt+3DW98M$a zXl#&s@d_W0?>`GTHoHqMjLI}VYDL1u-!~C@SU$m(7WFo8%T}AOyL-F4q}}rDdF)9nOk}myBg{__(nE=0&dW zj|X7(V>Ol7w9GE8FGXzMrD3d2CLW`29OM(UqImzcx$T zu6H+HG{XCG^w;TkiF=uDm5N8N9aK2x zT05|EgskI*?(2UxFL>zi;7H&G)tqxGT2bluLyla`>n+sF!jF%O=7*VN+~DOuydQZv z6i$_NTr{Owx>>8f{(G$Mlg73ATP(9RHcp?}r{U$s`{y^D95^r2L69%Q92CH-RA%~2QTrhcigOinA;E0l7tooavaYtrDQMx{*MZQ5l$YkPds7hin4 zxhTU+wdzNlmf`KnW}iV75xq#beq`JcemZvjdQTfZ$3|o8L6__PE`8=b@*A<|Kyl2( zYVXXmNypVqK52SZzWY9D{iVGD=gYr4F7D58RUOnExb;MeqV)$7E`y94dci55dR6Pw zd-3JTCKu;u&`zvcnE4=hiqcQ%%ad%xdyu(>w zp2ZCq?5urFDCG*reANb~zKHoc(a71;H8zpzJBy5~>XbC9DxEu4GShkXnjK10V&$g< z=+#~ub#u(Kh6rw!QO1$MubfgH3x{f*kKf=PE$X4QH0J2)!JnU99N*jj?VMdC+yFA} zjw@pp&U7`)5TAPOh2xv5In*MRf@^P+R2QU~tKLx>B|Yjvc}PTv;@Y65s~U3#QhSEc z*1J{LSx@>DEGZjyJ~BZ+M|t6J2yL5VHKgBNhac4txU=+d40V>nqcPnX^J!{=8pH|M=XaSoDSC_d|C`K3VuLH z4xN|cZKr>qpn<{V$5efZ$aV5zxC^jE{d0jar1#P#$=FLWCgr7s+-5Lf>Ga z5DbL!p_VKGLLn3g<%4%0l%+tZt}GywhVYjGq5NS$DD4zqeKDv22$E*udb;MIrQhfa z$)GY3E^05*6>1OC57LJt5c;0Y2?%{BhQ0|$I?)2s2137`K)**oza2o|`Ktk`0}Tfn z0i*#m5{L$b+K=jr+Jfqibb;EA+R6or0Ez^P0*VHj12h*X25278e4qtDu|Nxf;(!(b z#RDY(q3=}CHznx%kC{MpARi!KAU`0~hp5kH0R;ddRRsbeRUzGn03m%N{UUwN24VxD zx3}n9AoSf1`o?A=&}bm^{q`6jO`xGb=y#~&fGmM*f$V_nfyM$^0a*jt08IdL077=u z4M-YD252de1rYk3fC|tMAb2f8{Ov&W8)7-2UO>x$5`j>@41PbCpvpsNufLHFkS>r; zP}y+oPvE2SksY9ZLH&d3g!)Px2-yP_NCZe22#qd$kJ1n!d%|`7evI}b_M`(GJx%zJXKzRb``Q7H zfu;fC;5W|L3lhQ6*MyEhHUfPci~TkU3wdd4Y9o&Z*rOWu2n8HNC|^^b_znZr5Br=V zWQ;X+ja$6NVXs%f(bd$|(bPA>{@<|wDq$gQXbu`V*kc>^I3+A(tf^zH35JJ#$YCE? z!a_PwHDnFg%N+KSCE%d`0Q-)`ezXwB0D%!bi#XWh7UJj=oM7O>_=&|nxe&*Y;6(E| z*vpjwZ84vN{d%GHK#QOt=mDbVa4*OUnU97VfZqOF4)iHeh5wfGdk${Te`V(LS?G7+ zF#p72pJKv7z|ldy{ofh@hiGc2(O_WyzNK?mexL;y^@f$zw$=+S$qO06goo42)y%=h2+ctL|qpmrj##3LKUUi46(67x$tb1(KFi9O&Ec0i~U`=rD^ z^@u(t`p1`F4))Fm9I*G_*ntsmEZ{G_uqQt>ClV$~!9Fmt4?jSIv51t7y<}o9e}skT zArAJViTwZyYK=W@Vo!k}1Fb=I!@f1KZ$OX%b4O^{n~|CU4J0@O8urkM zJq+??baW^dFkzt?3Hux*YK;cgFWLiR8!e3#?AH_fB@`CY2koGB;g=So3%yFgJ}8fP*#LOD(6{8G=#7H{nYI=$FEj}j-d><%ABc%r`BXUPhVeaJKm)TvT^9-* zv^w`)H!`~7!1uQTH2jSl_SpzJKszB=nXp$>?9~xCXa$5`z2KbUB?5*r8xAwb;w1 zfP?+mVn3V$4)%15J#`8=*cUGL0*DN0uJ`Li#>)4IM^reE51)r z0S9~S#a>4R9PHN@`y~}{u;*Xwxm3WxzJalCQvnBi560e41sv>O82d*RaIgnr>_Jt) znXF7a*T-VtrKnF~PKMckBcFr4m%@r+47(?o{@c!cXf=c72Nup>tP2L3x`wcr{8t9= zmBaW?l80NKD9HzWygi(zO%zPgq4Z!^E}I!T%UoGU@UUsz^03L}_@X6&lJix5H+yZe zJaRma(qta*G<&Bw`wW)@lRS9h_PndS(8Ygz(<(UVK=X9GvB6EDDCr+h`8CklO2D8q z6L_xG`R!w^1@aEg(E^|5#Dy=L8OF5;qqD(lxE7lc!Hi%8aJgX|Q!Tig1^%_!(VC%Q zA%PrC7JHT!a!Rg6BO)6Axl)rmYmNoSH=NA?pXp(Gx}<9pi%^zNFhdKN5eyFhCV&fl5q35~;=n1knQ(NnvzOd)mgTgj@&$B0$iA{b`4b*5wzi+o9(2AL>Dde+5!# z{|Lo-=_#9!hD|eU0NYVDab5u8L@W?)MHPhRTBh(e8}UP(TFG2KOmsr+<+j8pn>R+2MPGHLPF@Fel$iD3{ejM zj=}ba571eWG&aMJ$!7R+IU$S?7CRaSF@qk`PA|ZdB@_Yw5+h1a50${;&>#VD+L3F? z1JW(mM86Z=fTl9uRM-v%F9T{J@k1fdi4m_8+mRtR3na<6&;odj{)0~W&*dP>zaljI zry<=|xgg#)2^odK9|velcL74nH4FiG9{WSZTX(xaoNotEJbHZngGq7&;K2c8dFP8X z-kBm2;x7%MEI$UCr?hE2->U6&1ZnufgZm!}CQbsNTq1_$15N?{Ai+CT01@5=ja;ig zh?3d@@~BBP-dP2b3lH^U`Z8(60@zOL$k_mde2cVZ0;92gRsVMBftj>pqW@ib&?ERC zN^P~8L&bT^Ix)5lkZc&;H;6uqK?~%tLfdIRIUAsmZ)v>45Cn#I2{g2x)3~f4Mrcb? zI6GJ{2f%OA+9|Bq>*%>K{y!bdUyfiDGhY)2n($O9tqFXYzyqj8cN z3sR&zFqA*?4Hoe^NM3}}x(x%09h9Of)d zMrZ^qd11kHUj|>4!Hs0GgEW2VG=F9=gO`ow8$f5aXYb@}P$>Bp6bki=>05^8GQ1f+!4DS!pXBM5628yfB(9!&FZ&qPVN0E~18vno0t z>rk!!tMC>@k$Ztq*PjrBqCqw&8uI>m!Tpd&uiZZt`uH+1b! zQ78vspa_f_Ufv;5FZ8BNQ(HME1~$k9o;G~J5x@e2c7HD7$H6kKR62|_gDUg2(31a*!*hau1q!FrG7}J;K%L-<}Ngykn?aQEr(9w+6PAyvH1I$+U zgz^4qjTe-Os?<^efk7Zu^NtNQ;V0$o)Pqx zVT{nWc)aR?en@!ywzf{h6>Ob~tJpe~SD|1QD|8kKmY2$Aa(pBHlN!Pd;~45Qg4;2G zmS*s9uml)H>jaWrbR3Oes}?Z+{a<16+uef2zwabgE6oB{tNTu3wdy;-YIWa9EV6L` z6#2H3NF>t(AktkYaY$wjIHbEy;_%Ff7&3rEy89~}VmP+o5O;rtLyW8z9OAB%IM{Mv zY5^S5T_U1UH>T;i-bxOD)%(#AsO(p&~8Uoy6d&fme@~>%YR_SqcxM^LEm*DS)=LT$AeI z2U~wQ&kfb$y)|xU(conO7~TciXyfx=-XS~#oy;*T`1$y`$Wvq+7RmTwNo*~~>pEzz zp*}5y6XDy2-J%U(&k7B1hZ_>k32wtCl}ls5*>D?n3pPSV`~d&o=0NDb%t9KGzpXxA zW`Ea!FjgdkjrMcFjNdkrn6Q7r6^x%Ybr+0@-_{F^_7{i(BSwvG$4=T5*ji3tKCRP9 z*7P^fpu%4vL-Jn%w^ot>xpn$qK?^!Y04qrQD@dLuTP#6<+lJ8v@HVE{wum z6KxA2vHq%nHbYVX--hv5;5=2M=~*x(2uk=Bw~NKJ%uT;4rG@!dg|wX?1tqlQ{1tqQ zGSJixQ;7h+EvL)yznB(yOG?)!2EnoPFMbP z)+G3H03_l20pd`HcPNVPh+4JNgqA#fX(B&?p+_8v^5G-fI~4lWIUt;;!};Z(PF2Y# me^3GPEhrJb_8?A%+Npj^^+1|W<7)e diff --git a/substrate/frame/revive/rpc/examples/js/contracts/.solhint.json b/substrate/frame/revive/rpc/examples/js/contracts/.solhint.json deleted file mode 100644 index ce2220e0b756..000000000000 --- a/substrate/frame/revive/rpc/examples/js/contracts/.solhint.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "extends": "solhint:recommended" -} diff --git a/substrate/frame/revive/rpc/examples/js/contracts/Errors.sol b/substrate/frame/revive/rpc/examples/js/contracts/Errors.sol deleted file mode 100644 index abbdba8d32eb..000000000000 --- a/substrate/frame/revive/rpc/examples/js/contracts/Errors.sol +++ /dev/null @@ -1,51 +0,0 @@ -// SPDX-License-Identifier: MIT -pragma solidity ^0.8.0; - -contract Errors { - bool public state; - - // Payable function that can be used to test insufficient funds errors - function valueMatch(uint256 value) public payable { - require(msg.value == value , "msg.value does not match value"); - } - - function setState(bool newState) public { - state = newState; - } - - // Trigger a require statement failure with a custom error message - function triggerRequireError() public pure { - require(false, "This is a require error"); - } - - // Trigger an assert statement failure - function triggerAssertError() public pure { - assert(false); - } - - // Trigger a revert statement with a custom error message - function triggerRevertError() public pure { - revert("This is a revert error"); - } - - // Trigger a division by zero error - function triggerDivisionByZero() public pure returns (uint256) { - uint256 a = 1; - uint256 b = 0; - return a / b; - } - - // Trigger an out-of-bounds array access - function triggerOutOfBoundsError() public pure returns (uint256) { - uint256[] memory arr = new uint256[](1); - return arr[2]; - } - - // Trigger a custom error - error CustomError(string message); - - function triggerCustomError() public pure { - revert CustomError("This is a custom error"); - } -} - diff --git a/substrate/frame/revive/rpc/examples/js/contracts/Event.sol b/substrate/frame/revive/rpc/examples/js/contracts/Event.sol deleted file mode 100644 index 1e4ce7cf8765..000000000000 --- a/substrate/frame/revive/rpc/examples/js/contracts/Event.sol +++ /dev/null @@ -1,13 +0,0 @@ -// SPDX-License-Identifier: MIT -pragma solidity ^0.8.0; - -contract EventExample { - event ExampleEvent(address indexed sender, uint256 value, string message); - - function triggerEvent() public { - uint256 value = 12345; - string memory message = "Hello world"; - emit ExampleEvent(msg.sender, value, message); - } -} - diff --git a/substrate/frame/revive/rpc/examples/js/contracts/Flipper.sol b/substrate/frame/revive/rpc/examples/js/contracts/Flipper.sol deleted file mode 100644 index 51aaafcae428..000000000000 --- a/substrate/frame/revive/rpc/examples/js/contracts/Flipper.sol +++ /dev/null @@ -1,35 +0,0 @@ -// SPDX-License-Identifier: MIT -pragma solidity ^0.8.0; - -// Flipper - Stores and toggles a boolean value -contract Flipper { - bool public value; - - function flip() external { - value = !value; - } - - function getValue() external view returns (bool) { - return value; - } -} - -// FlipperCaller - Interacts with the Flipper contract -contract FlipperCaller { - // Address of the Flipper contract - address public flipperAddress; - - // Constructor to initialize Flipper's address - constructor(address _flipperAddress) { - flipperAddress = _flipperAddress; - } - - function callFlip() external { - Flipper(flipperAddress).flip(); - } - - function callGetValue() external view returns (bool) { - return Flipper(flipperAddress).getValue(); - } -} - diff --git a/substrate/frame/revive/rpc/examples/js/contracts/PiggyBank.sol b/substrate/frame/revive/rpc/examples/js/contracts/PiggyBank.sol deleted file mode 100644 index 0c8a4d26f4dc..000000000000 --- a/substrate/frame/revive/rpc/examples/js/contracts/PiggyBank.sol +++ /dev/null @@ -1,32 +0,0 @@ -// SPDX-License-Identifier: MIT -pragma solidity ^0.8.0; - -contract PiggyBank { - - uint256 private balance; - address public owner; - - constructor() { - owner = msg.sender; - balance = 0; - } - - function deposit() public payable returns (uint256) { - balance += msg.value; - return balance; - } - - function getDeposit() public view returns (uint256) { - return balance; - } - - function withdraw(uint256 withdrawAmount) public returns (uint256 remainingBal) { - require(msg.sender == owner); - balance -= withdrawAmount; - (bool success, ) = payable(msg.sender).call{value: withdrawAmount}(""); - require(success, "Transfer failed"); - - return balance; - } -} - diff --git a/substrate/frame/revive/rpc/examples/js/evm/.gitkeep b/substrate/frame/revive/rpc/examples/js/evm/.gitkeep deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/substrate/frame/revive/rpc/examples/js/index.html b/substrate/frame/revive/rpc/examples/js/index.html index 97efebe180ea..72f7026ce5f3 100644 --- a/substrate/frame/revive/rpc/examples/js/index.html +++ b/substrate/frame/revive/rpc/examples/js/index.html @@ -1,38 +1,29 @@ - - - - - MetaMask Playground - - - - - + button { + display: block; + margin-bottom: 10px; + } + + + + + - + - - + + - - - + + + diff --git a/substrate/frame/revive/rpc/examples/js/package-lock.json b/substrate/frame/revive/rpc/examples/js/package-lock.json deleted file mode 100644 index 5c7db0abc936..000000000000 --- a/substrate/frame/revive/rpc/examples/js/package-lock.json +++ /dev/null @@ -1,443 +0,0 @@ -{ - "name": "demo", - "version": "0.0.0", - "lockfileVersion": 3, - "requires": true, - "packages": { - "": { - "name": "demo", - "version": "0.0.0", - "dependencies": { - "ethers": "^6.13.1", - "solc": "^0.8.28" - }, - "devDependencies": { - "typescript": "^5.5.3", - "vite": "^5.4.8" - } - }, - "node_modules/@adraffy/ens-normalize": { - "version": "1.10.1", - "license": "MIT" - }, - "node_modules/@esbuild/linux-x64": { - "version": "0.21.5", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@noble/curves": { - "version": "1.2.0", - "license": "MIT", - "dependencies": { - "@noble/hashes": "1.3.2" - }, - "funding": { - "url": "https://paulmillr.com/funding/" - } - }, - "node_modules/@noble/hashes": { - "version": "1.3.2", - "license": "MIT", - "engines": { - "node": ">= 16" - }, - "funding": { - "url": "https://paulmillr.com/funding/" - } - }, - "node_modules/@rollup/rollup-linux-x64-gnu": { - "version": "4.24.0", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ] - }, - "node_modules/@rollup/rollup-linux-x64-musl": { - "version": "4.24.0", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ] - }, - "node_modules/@types/estree": { - "version": "1.0.6", - "dev": true, - "license": "MIT" - }, - "node_modules/@types/node": { - "version": "18.15.13", - "license": "MIT" - }, - "node_modules/aes-js": { - "version": "4.0.0-beta.5", - "license": "MIT" - }, - "node_modules/command-exists": { - "version": "1.2.9", - "resolved": "https://registry.npmjs.org/command-exists/-/command-exists-1.2.9.tgz", - "integrity": "sha512-LTQ/SGc+s0Xc0Fu5WaKnR0YiygZkm9eKFvyS+fRsU7/ZWFF8ykFM6Pc9aCVf1+xasOOZpO3BAVgVrKvsqKHV7w==", - "license": "MIT" - }, - "node_modules/commander": { - "version": "8.3.0", - "resolved": "https://registry.npmjs.org/commander/-/commander-8.3.0.tgz", - "integrity": "sha512-OkTL9umf+He2DZkUq8f8J9of7yL6RJKI24dVITBmNfZBmri9zYZQrKkuXiKhyfPSu8tUhnVBB1iKXevvnlR4Ww==", - "license": "MIT", - "engines": { - "node": ">= 12" - } - }, - "node_modules/esbuild": { - "version": "0.21.5", - "dev": true, - "hasInstallScript": true, - "license": "MIT", - "bin": { - "esbuild": "bin/esbuild" - }, - "engines": { - "node": ">=12" - }, - "optionalDependencies": { - "@esbuild/aix-ppc64": "0.21.5", - "@esbuild/android-arm": "0.21.5", - "@esbuild/android-arm64": "0.21.5", - "@esbuild/android-x64": "0.21.5", - "@esbuild/darwin-arm64": "0.21.5", - "@esbuild/darwin-x64": "0.21.5", - "@esbuild/freebsd-arm64": "0.21.5", - "@esbuild/freebsd-x64": "0.21.5", - "@esbuild/linux-arm": "0.21.5", - "@esbuild/linux-arm64": "0.21.5", - "@esbuild/linux-ia32": "0.21.5", - "@esbuild/linux-loong64": "0.21.5", - "@esbuild/linux-mips64el": "0.21.5", - "@esbuild/linux-ppc64": "0.21.5", - "@esbuild/linux-riscv64": "0.21.5", - "@esbuild/linux-s390x": "0.21.5", - "@esbuild/linux-x64": "0.21.5", - "@esbuild/netbsd-x64": "0.21.5", - "@esbuild/openbsd-x64": "0.21.5", - "@esbuild/sunos-x64": "0.21.5", - "@esbuild/win32-arm64": "0.21.5", - "@esbuild/win32-ia32": "0.21.5", - "@esbuild/win32-x64": "0.21.5" - } - }, - "node_modules/ethers": { - "version": "6.13.3", - "funding": [ - { - "type": "individual", - "url": "https://github.com/sponsors/ethers-io/" - }, - { - "type": "individual", - "url": "https://www.buymeacoffee.com/ricmoo" - } - ], - "license": "MIT", - "dependencies": { - "@adraffy/ens-normalize": "1.10.1", - "@noble/curves": "1.2.0", - "@noble/hashes": "1.3.2", - "@types/node": "18.15.13", - "aes-js": "4.0.0-beta.5", - "tslib": "2.4.0", - "ws": "8.17.1" - }, - "engines": { - "node": ">=14.0.0" - } - }, - "node_modules/follow-redirects": { - "version": "1.15.9", - "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.9.tgz", - "integrity": "sha512-gew4GsXizNgdoRyqmyfMHyAmXsZDk6mHkSxZFCzW9gwlbtOW44CDtYavM+y+72qD/Vq2l550kMF52DT8fOLJqQ==", - "funding": [ - { - "type": "individual", - "url": "https://github.com/sponsors/RubenVerborgh" - } - ], - "license": "MIT", - "engines": { - "node": ">=4.0" - }, - "peerDependenciesMeta": { - "debug": { - "optional": true - } - } - }, - "node_modules/js-sha3": { - "version": "0.8.0", - "resolved": "https://registry.npmjs.org/js-sha3/-/js-sha3-0.8.0.tgz", - "integrity": "sha512-gF1cRrHhIzNfToc802P800N8PpXS+evLLXfsVpowqmAFR9uwbi89WvXg2QspOmXL8QL86J4T1EpFu+yUkwJY3Q==", - "license": "MIT" - }, - "node_modules/memorystream": { - "version": "0.3.1", - "resolved": "https://registry.npmjs.org/memorystream/-/memorystream-0.3.1.tgz", - "integrity": "sha512-S3UwM3yj5mtUSEfP41UZmt/0SCoVYUcU1rkXv+BQ5Ig8ndL4sPoJNBUJERafdPb5jjHJGuMgytgKvKIf58XNBw==", - "engines": { - "node": ">= 0.10.0" - } - }, - "node_modules/nanoid": { - "version": "3.3.7", - "dev": true, - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/ai" - } - ], - "license": "MIT", - "bin": { - "nanoid": "bin/nanoid.cjs" - }, - "engines": { - "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" - } - }, - "node_modules/os-tmpdir": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/os-tmpdir/-/os-tmpdir-1.0.2.tgz", - "integrity": "sha512-D2FR03Vir7FIu45XBY20mTb+/ZSWB00sjU9jdQXt83gDrI4Ztz5Fs7/yy74g2N5SVQY4xY1qDr4rNddwYRVX0g==", - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/picocolors": { - "version": "1.1.0", - "dev": true, - "license": "ISC" - }, - "node_modules/postcss": { - "version": "8.4.47", - "dev": true, - "funding": [ - { - "type": "opencollective", - "url": "https://opencollective.com/postcss/" - }, - { - "type": "tidelift", - "url": "https://tidelift.com/funding/github/npm/postcss" - }, - { - "type": "github", - "url": "https://github.com/sponsors/ai" - } - ], - "license": "MIT", - "dependencies": { - "nanoid": "^3.3.7", - "picocolors": "^1.1.0", - "source-map-js": "^1.2.1" - }, - "engines": { - "node": "^10 || ^12 || >=14" - } - }, - "node_modules/rollup": { - "version": "4.24.0", - "dev": true, - "license": "MIT", - "dependencies": { - "@types/estree": "1.0.6" - }, - "bin": { - "rollup": "dist/bin/rollup" - }, - "engines": { - "node": ">=18.0.0", - "npm": ">=8.0.0" - }, - "optionalDependencies": { - "@rollup/rollup-android-arm-eabi": "4.24.0", - "@rollup/rollup-android-arm64": "4.24.0", - "@rollup/rollup-darwin-arm64": "4.24.0", - "@rollup/rollup-darwin-x64": "4.24.0", - "@rollup/rollup-linux-arm-gnueabihf": "4.24.0", - "@rollup/rollup-linux-arm-musleabihf": "4.24.0", - "@rollup/rollup-linux-arm64-gnu": "4.24.0", - "@rollup/rollup-linux-arm64-musl": "4.24.0", - "@rollup/rollup-linux-powerpc64le-gnu": "4.24.0", - "@rollup/rollup-linux-riscv64-gnu": "4.24.0", - "@rollup/rollup-linux-s390x-gnu": "4.24.0", - "@rollup/rollup-linux-x64-gnu": "4.24.0", - "@rollup/rollup-linux-x64-musl": "4.24.0", - "@rollup/rollup-win32-arm64-msvc": "4.24.0", - "@rollup/rollup-win32-ia32-msvc": "4.24.0", - "@rollup/rollup-win32-x64-msvc": "4.24.0", - "fsevents": "~2.3.2" - } - }, - "node_modules/semver": { - "version": "5.7.2", - "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.2.tgz", - "integrity": "sha512-cBznnQ9KjJqU67B52RMC65CMarK2600WFnbkcaiwWq3xy/5haFJlshgnpjovMVJ+Hff49d8GEn0b87C5pDQ10g==", - "license": "ISC", - "bin": { - "semver": "bin/semver" - } - }, - "node_modules/solc": { - "version": "0.8.28", - "resolved": "https://registry.npmjs.org/solc/-/solc-0.8.28.tgz", - "integrity": "sha512-AFCiJ+b4RosyyNhnfdVH4ZR1+TxiL91iluPjw0EJslIu4LXGM9NYqi2z5y8TqochC4tcH9QsHfwWhOIC9jPDKA==", - "license": "MIT", - "dependencies": { - "command-exists": "^1.2.8", - "commander": "^8.1.0", - "follow-redirects": "^1.12.1", - "js-sha3": "0.8.0", - "memorystream": "^0.3.1", - "semver": "^5.5.0", - "tmp": "0.0.33" - }, - "bin": { - "solcjs": "solc.js" - }, - "engines": { - "node": ">=10.0.0" - } - }, - "node_modules/source-map-js": { - "version": "1.2.1", - "dev": true, - "license": "BSD-3-Clause", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/tmp": { - "version": "0.0.33", - "resolved": "https://registry.npmjs.org/tmp/-/tmp-0.0.33.tgz", - "integrity": "sha512-jRCJlojKnZ3addtTOjdIqoRuPEKBvNXcGYqzO6zWZX8KfKEpnGY5jfggJQ3EjKuu8D4bJRr0y+cYJFmYbImXGw==", - "license": "MIT", - "dependencies": { - "os-tmpdir": "~1.0.2" - }, - "engines": { - "node": ">=0.6.0" - } - }, - "node_modules/tslib": { - "version": "2.4.0", - "license": "0BSD" - }, - "node_modules/typescript": { - "version": "5.6.3", - "dev": true, - "license": "Apache-2.0", - "bin": { - "tsc": "bin/tsc", - "tsserver": "bin/tsserver" - }, - "engines": { - "node": ">=14.17" - } - }, - "node_modules/vite": { - "version": "5.4.8", - "dev": true, - "license": "MIT", - "dependencies": { - "esbuild": "^0.21.3", - "postcss": "^8.4.43", - "rollup": "^4.20.0" - }, - "bin": { - "vite": "bin/vite.js" - }, - "engines": { - "node": "^18.0.0 || >=20.0.0" - }, - "funding": { - "url": "https://github.com/vitejs/vite?sponsor=1" - }, - "optionalDependencies": { - "fsevents": "~2.3.3" - }, - "peerDependencies": { - "@types/node": "^18.0.0 || >=20.0.0", - "less": "*", - "lightningcss": "^1.21.0", - "sass": "*", - "sass-embedded": "*", - "stylus": "*", - "sugarss": "*", - "terser": "^5.4.0" - }, - "peerDependenciesMeta": { - "@types/node": { - "optional": true - }, - "less": { - "optional": true - }, - "lightningcss": { - "optional": true - }, - "sass": { - "optional": true - }, - "sass-embedded": { - "optional": true - }, - "stylus": { - "optional": true - }, - "sugarss": { - "optional": true - }, - "terser": { - "optional": true - } - } - }, - "node_modules/ws": { - "version": "8.17.1", - "license": "MIT", - "engines": { - "node": ">=10.0.0" - }, - "peerDependencies": { - "bufferutil": "^4.0.1", - "utf-8-validate": ">=5.0.2" - }, - "peerDependenciesMeta": { - "bufferutil": { - "optional": true - }, - "utf-8-validate": { - "optional": true - } - } - } - } -} diff --git a/substrate/frame/revive/rpc/examples/js/package.json b/substrate/frame/revive/rpc/examples/js/package.json index 6d8d00fd4214..4d7136606b65 100644 --- a/substrate/frame/revive/rpc/examples/js/package.json +++ b/substrate/frame/revive/rpc/examples/js/package.json @@ -1,23 +1,18 @@ { - "name": "demo", - "private": true, - "version": "0.0.0", - "type": "module", - "scripts": { - "dev": "vite", - "build": "tsc && vite build", - "preview": "vite preview" - }, - "dependencies": { - "ethers": "^6.13.4", - "solc": "^0.8.28", - "viem": "^2.21.47", - "@parity/revive": "^0.0.5" - }, - "devDependencies": { - "prettier": "^3.3.3", - "@types/bun": "^1.1.13", - "typescript": "^5.5.3", - "vite": "^5.4.8" - } + "name": "demo", + "private": true, + "version": "0.0.0", + "type": "module", + "scripts": { + "dev": "vite", + "build": "tsc && vite build", + "preview": "vite preview" + }, + "dependencies": { + "ethers": "^6.13.1" + }, + "devDependencies": { + "typescript": "^5.5.3", + "vite": "^5.4.8" + } } diff --git a/substrate/frame/revive/rpc/examples/js/pvm/Errors.polkavm b/substrate/frame/revive/rpc/examples/js/pvm/Errors.polkavm deleted file mode 100644 index 77de4ff3b1b3fe1f378ae31bbba24ddb38cc6300..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 7274 zcmc&&4R9OPo!^yY?bAwjR*LLh*^VPCn?wnBOdO@HbIoyXMtPQU9HUiD>(X+GiH+AE z*<{xada^0wD^4vrShWR5ZF6v$kk(&ZhZ{T-lW>P9Ic&$w95(|)=GuaXIeKon_RO_M zO-RW7pX85(OEZue292KH>b?Ja@Be=P?~Q!puNbDel*xWrj(Mqy$uL!xt{)&eQUHz4 zZHM+HXjRan(C&qHsIIm3Kz&_%-GM_#j>gyiwH~RfU;EeA)X~t`(0-t;{fJT5*wEHc z*LuKcXg_9XZ7|w&`0%mzy8BuivK!HnWx5CIT8}jxXgBJ#w)-0lqoF>}X&Urd=)Qy7 zto02?TaU!8PfUEpG`RC#<^iUY8D=c(E_Ofr9kz=Nnhevp=^fLbntx-ivi#2Sc;0U7 zW|T(nq9NNJ{%!uW{S|w;h-WFg+(DOqA(Ya@f=Hu*6Y!-+b$-FB+-soimh-cYY;~uuQ`qHCMzH|)w zfB4nomIGF$+9N4npX8P5-jc`VSx2<`CopcxUjYoUH$n`tH-_vL2WgbpSnTVf`lr7H?In%7fCg*0)46rhw_#m$d0mEXD~9|p0L>J%NodbPdj{GlG+4u(&#f!o z1nY{I*UiJ+&!Jspj3Y+K|2os)ZGiqYMnBdB{i{r!*Jud&XBcl&n>XY?&(x?*ZMloq zM}*Rgmlf_`?Vu@aoX{(Qoa;f#_f)rj>t|2*nw9Dwk34idA4RLb|4&an`Xmriuxi9j zKd{Fet3XH=o+a=AItG0NdH}jH1U&8G zLlxthSuS%HWe7?oyqPmxU(B1elec)ib&=8GFuh4)<0BwN%CO#y*off$myZ1LW6_00Cl&=PuESyh7W1*_z@iK(_xZ8- z!p&Lyca&o>m}OA{!Ebr>Lxp=8hAE2@n?o_jVMD-lOfyOY=2oq(Bw(-8I*S9A2efvI zDBvurfaxIswO#9^s2&DbVc6LYz#ai)HGtL|aI|R;Qg|BxUl`@<0b!s*4VuAeZC0%_ zPiveFxd@k?yJK5!5~X&^A-&5Ia3>%S>?7#1^$Z#f7&Fvf(ZL&=SzP*c>C* znRI=en$Zby2_n~Z6V$fA(1!IE#Kt@0Y8SYfs{u}L1q-}yk>Smx8T`e_N=OqrV$9X7 zJy2h;#AKeg5m=cy%aay~%}k;2mMP%jS!kosPO>LT<~z>K*Z-qFCFy4c{duIHiJy(D zC$q=cy#bE5((oN%{Qh_UKm(+I{tpWGrWHaPjde8SDb*iAm?;39pFcYB;d9no!#QSM z!P#;cj;sRZiU`}=;IL-vXLM4#tv7K}f8vELML6v9c2fULozy7&u-d0{Qhkhq9?H9s zm%6?61BE-3cfAYvBE8g`mQQ-A9kc(da2XiB)=Po6{KCA{u4NY2ywuJuz~Y8w7O(eG zf6Ubz;OoKRtcTtO%?S=j0NmGsn<{|ud~j2aHgHPEz$t+j%Q~exaQr}Hh4H*5@cmxD zaf>EVZ(cQtd(2=D0Z5j2)%{-KYR%dFN|M5o5SCCF-4*K80~`%&5#h^!0Tf?02WVdG z@_%*N+tBplI>)k{5C5_3Zo#yA_FJYEbFBQ2-znVBOgVF`BBLQP5h2giR&-P(E2b)b z3O?|O=@EPP!w+rEbjH}}hq=s{V|otKj;HdL+5fXsu)~n)ChYI&0*V+M3G9E(|KP1T zlkIEVdm)j|B6r7D*tqOwM_1(m#bt17a5M{%Okj`pctcWYNRmR5J0uAq$q|xJNCKc` zDvZ6a|Bu4Wu{mD|9#f`S{&My4+2D>t&bORQ9|6C75!!3eW}szm7%w_#-gv`!5g(^< zB9n^~5FcoM8KcxIQVf;>*M$(lz*K4GS4+mK$an-Yzn;t)lo_*C3_-d`^SO@fhGpwJ0LsU%anyuQLYh*&U}a5g48%NK0ilsa(S+Hcrk_k&Jt$#4e zYIx{gIKReUYr8e9^K($&Em~Z;%5;DJ`%zA`F)IMwd1X=Ie!q&(r^}4b-z4VT^$cwI z^yeRPZY3{vT)k1miCp?`g?s)rVm8F3nbm+SN5=r@7IyFS6<7a4JWdgYUOan0m! z6>b8CX;8eyh!e1TwN6@OK&&Z%7?ck&$ngNg9O!vN>!hUyWYx6R$VMEk)51b349_Nr zKCFtl8c12|DQgko8=HWk;wv$A0hAp)1T9$gW+>>O{?HFygu-r~(OVZFwS>GA3dj|U zsD1A@3ir-s0>dY^R-NC9NGn+UBlh!i{3{8bRxgvo{P|%->Y;`Tp^Aqq#vvfYNQ=Pg zXKMASh<-MdNpqPyX%(7+S~8jSEI;J*=c=My@^XA@*10S}MRrdY92L8q#S9YI$uqQw zWIvB864EnB&fMJFE9H*Lu;+7yGikoaO!qbhB2VgcKWoGDA43ux!9fqU8u&tx4O(8lv zBxPplYlw>76}LTf&fNP1y);Nss>rFs|O9@*42K- zAceYm)F4H=nt(r|u699(bae_ko30K*$Ls1Sbaq`m37tb%UxO}RSD%HhKv!p=Tc@iT z=yZ6?VGuA$cvnSCI$X;jW*zj*AQl=J4U(tBy8(k(snr@Wj#iT^#(V$mzgD=-AFjr; zPZRGySXz$vLPSu1`zwWecM0?!WKGmckQY(?eF=dyd8t^lua=0q17uB_BJh_qg+QH2 zbJem&gZ@&Tg`~BX(lioytwfsCEX~1H8q#zW^pG_6Gnu%JG$$BPdeYLxEGs^!a5fhq zO@mC9ZYoDNnxi|pMn{^Tg)!LaH3oE*w9LSiKw2}*vH^fDk9)x+sF7#spiHK83pu*^ z6}kx%Eu{O-!A?**ciO4X15Ks{H)Z+)PptxW+74@J4kS!=I#Lsr+8mdf*rCl?Qxkq| zE-y9V)#faz2}zqXrzQk#&Xk%!+8nE0NDWtN7uKbQcW4(1Qp0}jLVjx4t6gxUh9&KS zJvA(77kF(JrG}6;YfB|7wOKKh+@Z}DrILPawlJ0SYP0TCQqpGEr;>s;>(XX~R03%; z&QxWkHnTBRxkH;NOI7-{nGLB*uQpSfs+6>ul2oOj%@k`dOQ}kvw@R!w;{m&f2+h_7 zg}d@W4D=~r14D$^u$tNO@ZK=MwEAybpa$By1v;=It!#n$QMYW{F1w?l%|G((>JEm| zr{y3jxAga)mfw)Kg-`Vdf88H>x<7hbeYDu=+-3`-@*wgCk+R84Kgo2>rf}HD4+u^t z8!*NoY`u6_Gtrw?`ur{QRy&zDRjO;KL`0#TNdiZ`77fi*H!4){KH4PkZ#Z1Q^OZ{02v^M8Oo BH1Gfb diff --git a/substrate/frame/revive/rpc/examples/js/pvm/EventExample.polkavm b/substrate/frame/revive/rpc/examples/js/pvm/EventExample.polkavm deleted file mode 100644 index 6dbc5ca8b108c1ad04cc248b735b2d7d4f43f2a4..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 2615 zcmcIle{3699e;Or{LXgTUR)>lnx@Sqa=PFY!YUJ0Prjl}w%1O9Atl*te9m0Q zm!$PO(?8Ow$d29IQmdkFR*7y(6(_3zfv0U$0wYDN8UpQ)ff#>9Xc`h4QiTvI{bjy) zX;UOB{^R8Ddw%bIzn|~#_o6R8htRMKSs#bduO?6vg>C*wGLxB>^V2Usy>{(<*9RXR zvwbP}OuQJn3hY zQ*)CqXOb4+>B-Eg^~4fi`gcP+lbE!|jtTV5^>$DGeMbZ4T#%~UESu5z8rzMc5+(v^SspNTmS zKKZ04=Iqv&TU?{=p1D!E+-j`0XMdxwx8lxQL#WbmOA`gHvv7(F;gAp&`Qg?P96-E_ z=Z@eJ#DA?VjBK$s92M=(sOb0%B$}EyqICweE`OynTfaCzDw)op8 zfPYU2GnD{i+e2q}9(xeSc9d5kd0faDLLwYhI^>on{t;=NA0e&lCaQF9qK95WJhT)$ zWX9zM$zIS?Cb)~^PN%M*#k}mbYW&^TKmFUK;&&domwS?XxhK2#RhDA;{0Ye(*-D0I z{eBGhTH(}7mWkV@dtbjF$E*#C#|%f;Sz!FS^RCYfN zLMu#Vihl_y{u0nU+D@WdE7m~uRd648du+hVT8F`Xo~4;GSVuFNgfjRZQU-s5v>LEI zERg-5+>7DoTWpH4)d;JH*;Dy2A7F^6mGu2BEuGUc zQSH?+eH9dHsR?aLjf(Pl0OMK^Gd4BRSQRMgJw(a=044h<>7isVCEb+lp@dK(QPM?8 zCna7=c2nY^q=OQXl3kSSq+|yr?UcAFaZ%!=1duQ#4occ6u~TBBgr|gKsWFz0Hg&fb zZ?;-3vzA#}-Ow>h=gODL?@}R#XCG*-{PN*iKh}r@pcA%CqeO_?|X6ck+rJ2!Jq?jL9VhsDnhiQ3naPQ3nZ^ zQ3nYpn~G!$#L^&H8oDeEot6f#rD3J`HMS^)P6=`0=ni|%o{LQ)m)oJ8q zEggUxY7t7-yrDD@gqTg_)d<8)Q=vWlpZ+U`zje=S7iO*xd<<*l9`qJVnPIGy;2;eL zPqp1)Db*|!#&5i4S_S&yQg19cf2q4Rboa08Zl*@To(2;d757HPwZR0I8Dpt%<4k+w zrqBSR-V?m8NA7LB}ITI?d+cpm~0A344VfKFmXnjoi(va9OC}|5cdAm|BM^s+LX~x9$5F zHqk-<`?o{zL>_Fw#6`ttuHQ!2&iR}NeBO9uK&f8zeR=I-Vx6Oj$7q$hh&GC+abQ4Es-r@ma-zDj;<_;9KM{`z F=-*Hb7FhrQ diff --git a/substrate/frame/revive/rpc/examples/js/pvm/Flipper.polkavm b/substrate/frame/revive/rpc/examples/js/pvm/Flipper.polkavm deleted file mode 100644 index 488ee684f0c4aee5d64f8b691d048ebefdd51044..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1738 zcmbVMU2NM_6ux$x-WzutPDq@)|1|QdwtJ!V5G#W%pmNt+G$}D1DuSuamMm$>);UdQ zJC#sCD=kEWENrOT)5KD>=@V#^x}TR!;sGHc!NVjT`+zEekU%_5E3BL()2fNRFe{(! zdwlMf^L^*o(YKEv^nD9z90EpnBdCTVQsHw_k8A<#0CWLf1VjJ^;27XUJd+tt#B=fC z6XR!`ja6nmp4eDTjOLT6WNvsOH=d2Bk`u{zW;mP7O=eF|Ky>ufsmWaYcqZ8(sz)B> zosDNElf$`e{Pe`hWHy^jxV_@^Ua8JI-P4jtp2>_mEg!e~#OVXa&{;Hvt{_R+CkzPh z2?Zf9&WncYf@`yMTxxaqxL+w5HrG)76s@27k?4ma z8b`rQD+loi5{tnVDA!EUgvo=H2NfP996Oeb1$&Iz3yQr+?JBXCYICHvfNL|@z;Eif z0Q6Bs7T}+_ol+dDfPh0Mx^5B5J1M}o8DOgZU@?pfEd>;gjD zg4Cvo&|iP?pBVmK$flHY)_H3`n8`azh8XSkZ?>c}$)8~Km0NTXPK{qGp|H^;8uUL3Nk5ql}xJ=Hx^ zTPC%tr~5L@;AgX;+AYud%OSWo$SQIy*lVU>7`NqM`#n^*#Q{(36uDuO)1T~SbQhyL z8QsC?c1E``x|LDNXpm8r(Ey`{#p#bW7Ws{*)87GaQJ9Y@-@E!yRNMnco`wyJ_mpfw?l{9 z3+NdvulK$=Ok;|@U@sE8s@O|3KediNxFh5(v|d}ry!|Hde+^g!Tt&t0EBQMsiEqr? z)V!sbH;K9I+;-Ae8(gh*oh{8mLk}Lr@Vo1E`MB|3H}2q7mL&+Fa7TDehfh~{CtQT6 zq6OB_{5j3jrTLBiJ@esx=lc4~6|Il7Nu^RnyQg*XuS$Ktm7>>5#@>Xn&Fk&-cwZ-N zg_2UfQYaL(J=J#3!_yicF!+G)(%^vBHxLeo+k91s94VLkWL~~lDPPpOsu#7sa;03> wV!rSmUe+G}I@|aWqdW#Von7C~&Q`Qb6)kpQfQK*C7Xf~BcJPa?PEA7p0;=WV!2kdN diff --git a/substrate/frame/revive/rpc/examples/js/pvm/FlipperCaller.polkavm b/substrate/frame/revive/rpc/examples/js/pvm/FlipperCaller.polkavm deleted file mode 100644 index 585fbb392a314c15a35e7e529106738bde3be02a..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 4532 zcmcInYiu0V6}~gO>pMGk%-DF>lLutH8Lyl)0&kYmq=36vo5{>N!0h0_mKJOiZ)Vpq ziD%+u@7Po_)Ros+*0Q&uwo@vlk&z$$(MUC~L{*l`a>7fZt!P6fR2T7ss#PICOPY|l zXU1_7N`4eXS9|7h@44sRbIx}jqdssCLbDaf`B50X96?1CareEm!X9x0RRRe>w*Y+$ zs1fKspcbILKs`WD0gVFfZ0qc_sinQGyRBvC-Y)CvTj$=k_N#CE(mlzYd)t#O_Jf}6 zZrRtp*KE_1W^z|cclxnpOJ{Odx0&qTZ|=D&?AmvG-M(a7XUo#yJ`j<9?6LjbZI5;) zofY=TWuXV!I`=1Ay3Mvd`*tNwGuiH`poez5?b8gICGE+s&b?O2@Z?5%sP|!X0QI8Z zp_SAflt%5O?xS<`3HmRz$5rW$xf#z6kH4h0)2b^ zROw{ty0W{d_~k#vC!^U{0YUcDt=k)A0QN+9L1p4AkmnglT*XMI)*|SL$3=BfO?9bDE=TU-|sSIL3>lqn=F4WGX?XBVS5f@T;5NGqU%kul*ST`Oh|59Q@~KxLkW}dodtqgm1vu5X01q`T0K*mr zUaJ7~&abZl16qu9!hudR?|Q{$bf}gtP+^9psf>On72kIJjWb6MxZ>M>z5jb3U`pHe z!`XAYCR|HCGj#cLZUcP)+8r{P_@C8z?)KoRPC z80ax{^|~0aW^4dN0H#ashgc$6{naaQUGwpUIQGEK5da6oPbfoDawtpE zYG|BvB$N$($e12O#?&w}CbL&ze)9=f_XbV$ghgy<*th`OJ5c>{YVQLfOKNw}N{|3` z0i}Ri>93%6E4{3CN+kmvoP`W9h8`ghR-pD$)(|e9kK-F?P|yKxgiNbsCJ}l(lnafA zW{NN2;;T%NIM6+X!7xtp`7>pt1*5ah*j;aQ@YW=RyMyGwF%b3}z-a=IJWCjfUn}u9 zOZ-g|zeeI$OZ+N{Un%jt#8*qaU*dfdUnTK3N_?fnS4cc3@i$0(xx}xK_%ewvm3UU- zy%G;{u*8>0yhq~Q67Sj}@w6uK)K8F+;v`03OTcNNOrAhY%c1chC%_!PH~|u2y_f+3g`$OX50{5DkpyM?>MFa)PnOGT!7b)kQ^-1f*f3+`JCPd zDfc~C^C7(`2hY)qO7MMpF{dAa;m>H20&pRh9CW+fMv8Ie^dgLxxh#EwW+@H7OS25M zqgpUsEI2}iQ$V&k1~i2X{Rp!2jQ%Fv;H;Ltb5ORU+P%uo=V7#=?$dL2UfJ-`ISv2K1IHl{X(ds*|!l0HQUDHFdz^LVye{v;1~e# zwm-O*8iCti{#P9T{$eJTSS~@A{j_-sCXQbu=>!=C4FLi&8B37yhyn2lNjKtDc8CQ? z6YU4d?nv=P2I7@;%Eec%S1R{j1R($!4JR*JlbICAlp%|&+_%328^tez9lLY^Qa9us z87ioi5?oLdaRvtK>}Wj)p#Bi(eV_#-IBFuWJE#eVu+`+KoQ|kmYGEnRcuM8&{0HO( z;Dx-<@W+3~@n1h9?VP!-!jw5vsWf9Z<}(bW!~8gxpW&g#1C~wMXDU@@%x^YT=d%oy zwEPU0KfxD|v&AVU56~yHDO09m#%Qz2^+gs_@t85U*;Ha~2f=pdkUzoYU+0Ujm0oTg z23@AI(v0zD(<*cO>M!z`v75}MHRkr4&24M*GYssUf1S%`_~I$M$+9!JvD4%@ z1!DTba>z!0_DLN7^1^b+{x6Nu6Bn$EQ&D74BJp*fD^Y&4di@y7D9Dj1&hWF*7zO0mV8fN}<{%gWF7*AuKIuF}B5P$Og=O8YB zANJZ~&vxk%yUSRHcm&G34ey$;V6Q9{0>Y@0pc>*)#WC$y^T4ZlY(8 zGVs|nd!VFW?@yf4nPL+6zjlh0YNaNrX6Cx^$!D0kXT0+R-np&#Y>xm-mZe7G^tp{bne1_=*_J+QpSCD<3Aupl_>fSwTPd@Mt+#sza z-a3n(?H9cp#41g$eKv4ge>9uT9um_9ad23S7DR2Tk?~d(3+G;tQEvqR(xw%y&tY>Y+WEwThqwYxVg&4qf#UiDHNK2 zJRy!WMx$CGrWK~-S|tYiXt8Kij0V1`C^L#ykZS^g^-Aq}rFjDO@s5bou*-;8TT>8~ zEB~k*Q+exO#7I2{O= diff --git a/substrate/frame/revive/rpc/examples/js/pvm/PiggyBank.polkavm b/substrate/frame/revive/rpc/examples/js/pvm/PiggyBank.polkavm deleted file mode 100644 index 3f96fdfc21d8d8fc5bf68a34f0c9f7b055afa2a4..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 5062 zcmcIne{37qeZM=Ax_3v(KFX5r&Ze!4Mys@11Y~Hebe6fSz&zgx@j!CvMr2tn*_4m6 zm5Wc7&396Vshu)XV$I2%3d_oX8LbTK2he2z4a-RYF)e5PaEqmX6c~yD6Hqi*w_!=U zt#N+X+V_rOB(rC}P zK$<`lkS9S7gM10(Igl4Y20*eP;~-NYr+NnlY&ntc9qK)C>g99R&eOolz3H8&^Zmnp z_EX=W-ZymO{LssTy?WnZ-{})W{V(;M80b4aG}t$EVbIvo^y|;|?(==U11D;e=RrmP zOD|m*>ODEo=j^a&wv=A%9k|eUVra0}IDfitaIi1!_A--y?6MD^{Azu=@7%!4R(&|wPf_&r#F6qevT|+1*^TGF2orpmeTq7Q zD2kaO&A-pWm?mLXp_pDGX-R}3}2wbY#7A_=EhMLQ4C*4A7urK`8~Hn z)g|0)TySx5q5d7Z;8iwF;h!k8w8`Q9OBi$NiqhX}P1$25d!G4zxBhd0h zUCRE7+g~chGy@XnGiqjnN7){VDTo_@+0-LxQxhdk6f_ZOtd->(XmTb(ZU|&fBJ+qW zRHjko23yIoNfy(V9_3v`SF@Qo>5qo8mxh&4zL-}kbI8m@idiKzeQ8{&TwzT;0Mlkh z(OQYtdQNN2XstbT?a)U~Ob?s7R1oh0m`CO*+FGzitV~as+YGb3n`ESmxn`1=E@c(c zZy%v`OjBCNBuE})l%fmr#_-jR^ks8eGOq|k513aF(Ie&+YsSiqQnn(G0=hg}#}e## z4P*}F8ubaVtYIs2?bo)1Sx^8@;+Y2twwvfdD?3Vo^};C3@ict|HB(FiHG(UsIxr#+ zKCNwN_|g3YD~DN3 z#-G0ZJt}eRPwrm7^rO1uvHX8r{r=b7+pJI@@x>M^)K6wbmz(IRZ1xXD*AchYkz%xt zIEV@o-D24z%4~#R0|wc0d5YvDFfz#rrXD3ZWa=u(!S=v~`oyu<;0ABGlE;q! z+1@9==lU$j^FU<>x&O~ymTvpY#WO^owjE3KNf@F;&%>Y+cnjzLCUHKE(8^=(S7VjU zXk8%$!X`+FnF(CX!W|V$uTZ>;Ci*HxG8HP@|27rM7VRLrR8T6f^FaR4)#6o~{t#Xe z2g}ol^aqx&*a1p1!R0yRMCy0zw2phg?e{?b6=dU+IBt6)_!y)c4*vlu;0cl;iCAP< zAcJ;HEf0CBLk;9qwEDCi>UVB6KvGy9q^g5}B4zdmtK-}qj$M8lRhH15Ttl)!c9Bf< z<^;M)_$slZ;pP}3X9G8vUj)U*)t8iPky<5Cj6Ty>Daqjg9o* zn@f`Ux?tWy=56bBD?>S4sYSJ=D>U!Q>OmT=1;<07(D_g|CNaa3fqK08X{y zJC^88NGSAX0d|}Eo1iFe>J?a8g~wI8ppU}%14yCQXdbx4BRa|cyUoBP`?GquF9eJL zf`#Oqa=EVva(RF6EW7? ziZelD{XlUhV65*i&M3yZuQ($a>-&l`g0a50coi9Ivau=@$0Np?SR4-;YpuocfU(w6 z99N9B=Hj?ytnDd|3&vVgF^`N@ud#xPBx0=YE|Q?Jx~oV6#%d!JMspx#tTX`p^sO)z z2;6?(vH4d-S5u8#zWMi)?9W+%~dwKB{lI0Fn4 zh8E|*;;#~H{U*u8HA&JWL6eY%{y-fu!>r5cZIlUE@UsVDyN~hGux) zAoFC(9JD)tQOAVa+AYlud#|8F7I}@yvyNoqr>F}hsz#^%e=#7&RB7% z%sN!Acc|xyhr;`3$TnPNje<#?d?`JcqFlV=09FGES z$aau_w@ujjCM*%}J0^^hIhD+(?BE{^{YB{2&;qchkZY^v5cGrv>q}LOh2|MLyYdaT ze48`RM(z4?8-FClAe@n zIlxr|{K|}L`57M4%gPk=vt2GT1K0)glw_U+s5Sq7p$)(de@t{_`$e$*60rRe1Y~F9 z=Ibf*cHCT2!DVDXs;mOXBjk(%I=2$m+E&6cZ$*>nsgJX|trkjBTzK*y{wu*Mpg9hu z8t&xeD}zW_6tUQf#TG0!V{s1_o3QA`qJYKSSlorhMl3d9(St=EiyRga7FjITW6_O8 z7Z&TV$Y7DitnFB<&33GHz;>*)-*&9!vmI;gvmI;gwH-@j@CM01`yc`2W^QA1bJPBF zK+Z&~GaPjP6Y~?};H?BV^br(ANM1?y4E%zgSIK6eNJh*|G%f_+|7C(r!aUq2vf$k2 zTxNr|K%moQq0QWYQ=&7uwhscGA8lLrskdO4@1yPLiIVv^Sx7Hnzy;0oz}~B=~X*8&L2658IoM!M1tN!4`9{rQ*WVfAwC1{Wq8dY^j>vJ_EEg zgi#;F(EtQeKSWY5gwkndG9adUjS&c{=ffxsc;dn{2wE|p`>C$LuimvwR@X0J zpyNX9=kHS2$NXHUzd5NMyxx9zESAsbr^e6|V^mXVYIeqdaC~ZZ%70KR#ge7&WNB`; zuvCltT_}bxwL!_>`LbSm-1?N$={+K@A7M%hqR`H zPn2e-w!uEsJ6rqJ<^Rg@rgkj}*LTjG_a6j1`n9fyTm0PR9YBr=w7+&u&NgYV$gjmN z#ccQs_Vy$w+5tX>G)7H!x5FyX_i!HOI!pf8mep$WqyFpmvaLn2Sj_MBUiLpxI{b=S NQaG=FzSPx4{V!`iF+czS diff --git a/substrate/frame/revive/rpc/examples/js/src/balance.ts b/substrate/frame/revive/rpc/examples/js/src/balance.ts deleted file mode 100644 index 1261dcab7812..000000000000 --- a/substrate/frame/revive/rpc/examples/js/src/balance.ts +++ /dev/null @@ -1,8 +0,0 @@ -import { walletClient } from './lib.ts' - -const recipient = '0x8D97689C9818892B700e27F316cc3E41e17fBeb9' -try { - console.log(`Recipient balance: ${await walletClient.getBalance({ address: recipient })}`) -} catch (err) { - console.error(err) -} diff --git a/substrate/frame/revive/rpc/examples/js/src/build-contracts.ts b/substrate/frame/revive/rpc/examples/js/src/build-contracts.ts deleted file mode 100644 index a37b850214b8..000000000000 --- a/substrate/frame/revive/rpc/examples/js/src/build-contracts.ts +++ /dev/null @@ -1,96 +0,0 @@ -import { compile } from '@parity/revive' -import { format } from 'prettier' -import { parseArgs } from 'node:util' -import solc from 'solc' -import { readdirSync, readFileSync, writeFileSync } from 'fs' -import { basename, join } from 'path' - -type CompileInput = Parameters[0] - -const { - values: { filter }, -} = parseArgs({ - args: process.argv.slice(2), - options: { - filter: { - type: 'string', - short: 'f', - }, - }, -}) - -function evmCompile(sources: CompileInput) { - const input = { - language: 'Solidity', - sources, - settings: { - outputSelection: { - '*': { - '*': ['*'], - }, - }, - }, - } - - return solc.compile(JSON.stringify(input)) -} - -console.log('Compiling contracts...') - -const rootDir = join(__dirname, '..') -const contractsDir = join(rootDir, 'contracts') -const abiDir = join(rootDir, 'abi') -const pvmDir = join(rootDir, 'pvm') -const evmDir = join(rootDir, 'evm') - -const input = readdirSync(contractsDir) - .filter((f) => f.endsWith('.sol')) - .filter((f) => !filter || f.includes(filter)) - -for (const file of input) { - console.log(`🔨 Compiling ${file}...`) - const name = basename(file, '.sol') - const input = { - [name]: { content: readFileSync(join(contractsDir, file), 'utf8') }, - } - - console.log('Compiling with revive...') - const reviveOut = await compile(input) - - for (const contracts of Object.values(reviveOut.contracts)) { - for (const [name, contract] of Object.entries(contracts)) { - console.log(`📜 Add PVM contract ${name}`) - const abi = contract.abi - const abiName = `${name}Abi` - writeFileSync( - join(abiDir, `${name}.json`), - JSON.stringify(abi, null, 2) - ) - - writeFileSync( - join(abiDir, `${name}.ts`), - await format(`export const ${abiName} = ${JSON.stringify(abi, null, 2)} as const`, { - parser: 'typescript', - }) - ) - - writeFileSync( - join(pvmDir, `${name}.polkavm`), - Buffer.from(contract.evm.bytecode.object, 'hex') - ) - } - } - - console.log(`Compile with solc ${file}`) - const evmOut = JSON.parse(evmCompile(input)) as typeof reviveOut - - for (const contracts of Object.values(evmOut.contracts)) { - for (const [name, contract] of Object.entries(contracts)) { - console.log(`📜 Add EVM contract ${name}`) - writeFileSync( - join(evmDir, `${name}.bin`), - Buffer.from(contract.evm.bytecode.object, 'hex') - ) - } - } -} diff --git a/substrate/frame/revive/rpc/examples/js/src/event.ts b/substrate/frame/revive/rpc/examples/js/src/event.ts deleted file mode 100644 index 2e672a9772ff..000000000000 --- a/substrate/frame/revive/rpc/examples/js/src/event.ts +++ /dev/null @@ -1,29 +0,0 @@ -//! Run with bun run script-event.ts - -import { abi } from '../abi/event.ts' -import { assert, getByteCode, walletClient } from './lib.ts' - -const deployHash = await walletClient.deployContract({ - abi, - bytecode: getByteCode('event'), -}) -const deployReceipt = await walletClient.waitForTransactionReceipt({ hash: deployHash }) -const contractAddress = deployReceipt.contractAddress -console.log('Contract deployed:', contractAddress) -assert(contractAddress, 'Contract address should be set') - -const { request } = await walletClient.simulateContract({ - account: walletClient.account, - address: contractAddress, - abi, - functionName: 'triggerEvent', -}) - -const hash = await walletClient.writeContract(request) -const receipt = await walletClient.waitForTransactionReceipt({ hash }) -console.log(`Receipt: ${receipt.status}`) -console.log(`Logs receipt: ${receipt.status}`) - -for (const log of receipt.logs) { - console.log('Event log:', log) -} diff --git a/substrate/frame/revive/rpc/examples/js/src/geth-diff-setup.ts b/substrate/frame/revive/rpc/examples/js/src/geth-diff-setup.ts deleted file mode 100644 index 3db2453f2475..000000000000 --- a/substrate/frame/revive/rpc/examples/js/src/geth-diff-setup.ts +++ /dev/null @@ -1,177 +0,0 @@ -import { spawn, spawnSync, Subprocess } from 'bun' -import { resolve } from 'path' -import { readFileSync } from 'fs' -import { createWalletClient, defineChain, Hex, http, publicActions } from 'viem' -import { privateKeyToAccount } from 'viem/accounts' - -export function getByteCode(name: string, evm: boolean): Hex { - const bytecode = evm ? readFileSync(`evm/${name}.bin`) : readFileSync(`pvm/${name}.polkavm`) - return `0x${Buffer.from(bytecode).toString('hex')}` -} - -export type JsonRpcError = { - code: number - message: string - data: Hex -} - -export function killProcessOnPort(port: number) { - // Check which process is using the specified port - const result = spawnSync(['lsof', '-ti', `:${port}`]) - const output = result.stdout.toString().trim() - - if (output) { - console.log(`Port ${port} is in use. Killing process...`) - const pids = output.split('\n') - - // Kill each process using the port - for (const pid of pids) { - spawnSync(['kill', '-9', pid]) - console.log(`Killed process with PID: ${pid}`) - } - } -} - -export let jsonRpcErrors: JsonRpcError[] = [] -export async function createEnv(name: 'geth' | 'kitchensink') { - const gethPort = process.env.GETH_PORT || '8546' - const kitchensinkPort = process.env.KITCHENSINK_PORT || '8545' - const url = `http://localhost:${name == 'geth' ? gethPort : kitchensinkPort}` - const chain = defineChain({ - id: name == 'geth' ? 1337 : 420420420, - name, - nativeCurrency: { - name: 'Westie', - symbol: 'WST', - decimals: 18, - }, - rpcUrls: { - default: { - http: [url], - }, - }, - testnet: true, - }) - - const transport = http(url, { - onFetchResponse: async (response) => { - const raw = await response.clone().json() - if (raw.error) { - jsonRpcErrors.push(raw.error as JsonRpcError) - } - }, - }) - - const wallet = createWalletClient({ - transport, - chain, - }) - - const [account] = await wallet.getAddresses() - const serverWallet = createWalletClient({ - account, - transport, - chain, - }).extend(publicActions) - - const accountWallet = createWalletClient({ - account: privateKeyToAccount( - '0xa872f6cbd25a0e04a08b1e21098017a9e6194d101d75e13111f71410c59cd57f' - ), - transport, - chain, - }).extend(publicActions) - - return { serverWallet, accountWallet, evm: name == 'geth' } -} - -// wait for http request to return 200 -export function waitForHealth(url: string) { - return new Promise((resolve, reject) => { - const start = Date.now() - const interval = setInterval(async () => { - try { - const res = await fetch(url, { - method: 'POST', - headers: { - 'content-type': 'application/json', - }, - body: JSON.stringify({ - jsonrpc: '2.0', - method: 'eth_syncing', - params: [], - id: 1, - }), - }) - - if (res.status !== 200) { - return - } - - clearInterval(interval) - resolve() - } catch (_err) { - const elapsed = Date.now() - start - if (elapsed > 30_000) { - clearInterval(interval) - reject(new Error('hit timeout')) - } - } - }, 1000) - }) -} - -export const procs: Subprocess[] = [] -const polkadotSdkPath = resolve(__dirname, '../../../../../../..') -if (!process.env.USE_LIVE_SERVERS) { - procs.push( - // Run geth on port 8546 - await (async () => { - killProcessOnPort(8546) - const proc = spawn( - 'geth --http --http.api web3,eth,debug,personal,net --http.port 8546 --dev --verbosity 0'.split( - ' ' - ), - { stdout: Bun.file('/tmp/geth.out.log'), stderr: Bun.file('/tmp/geth.err.log') } - ) - - await waitForHealth('http://localhost:8546').catch() - return proc - })(), - //Run the substate node - (() => { - killProcessOnPort(9944) - return spawn( - [ - './target/debug/substrate-node', - '--dev', - '-l=error,evm=debug,sc_rpc_server=info,runtime::revive=debug', - ], - { - stdout: Bun.file('/tmp/kitchensink.out.log'), - stderr: Bun.file('/tmp/kitchensink.err.log'), - cwd: polkadotSdkPath, - } - ) - })(), - // Run eth-rpc on 8545 - await (async () => { - killProcessOnPort(8545) - const proc = spawn( - [ - './target/debug/eth-rpc', - '--dev', - '--node-rpc-url=ws://localhost:9944', - '-l=rpc-metrics=debug,eth-rpc=debug', - ], - { - stdout: Bun.file('/tmp/eth-rpc.out.log'), - stderr: Bun.file('/tmp/eth-rpc.err.log'), - cwd: polkadotSdkPath, - } - ) - await waitForHealth('http://localhost:8545').catch() - return proc - })() - ) -} diff --git a/substrate/frame/revive/rpc/examples/js/src/geth-diff.test.ts b/substrate/frame/revive/rpc/examples/js/src/geth-diff.test.ts deleted file mode 100644 index b9ee877927bb..000000000000 --- a/substrate/frame/revive/rpc/examples/js/src/geth-diff.test.ts +++ /dev/null @@ -1,315 +0,0 @@ -import { jsonRpcErrors, procs, createEnv, getByteCode } from './geth-diff-setup.ts' -import { afterAll, afterEach, beforeAll, describe, expect, test } from 'bun:test' -import { encodeFunctionData, Hex, parseEther } from 'viem' -import { ErrorsAbi } from '../abi/Errors' -import { FlipperCallerAbi } from '../abi/FlipperCaller' -import { FlipperAbi } from '../abi/Flipper' - -afterEach(() => { - jsonRpcErrors.length = 0 -}) - -afterAll(async () => { - procs.forEach((proc) => proc.kill()) -}) - -const envs = await Promise.all([createEnv('geth'), createEnv('kitchensink')]) - -for (const env of envs) { - describe(env.serverWallet.chain.name, () => { - let errorsAddr: Hex = '0x' - let flipperAddr: Hex = '0x' - let flipperCallerAddr: Hex = '0x' - beforeAll(async () => { - { - const hash = await env.serverWallet.deployContract({ - abi: ErrorsAbi, - bytecode: getByteCode('errors', env.evm), - }) - const deployReceipt = await env.serverWallet.waitForTransactionReceipt({ hash }) - if (!deployReceipt.contractAddress) - throw new Error('Contract address should be set') - errorsAddr = deployReceipt.contractAddress - } - - { - const hash = await env.serverWallet.deployContract({ - abi: FlipperAbi, - bytecode: getByteCode('flipper', env.evm), - }) - const deployReceipt = await env.serverWallet.waitForTransactionReceipt({ hash }) - if (!deployReceipt.contractAddress) - throw new Error('Contract address should be set') - flipperAddr = deployReceipt.contractAddress - } - - { - const hash = await env.serverWallet.deployContract({ - abi: FlipperCallerAbi, - args: [flipperAddr], - bytecode: getByteCode('flipperCaller', env.evm), - }) - const deployReceipt = await env.serverWallet.waitForTransactionReceipt({ hash }) - if (!deployReceipt.contractAddress) - throw new Error('Contract address should be set') - flipperCallerAddr = deployReceipt.contractAddress - } - }) - - test('triggerAssertError', async () => { - expect.assertions(3) - try { - await env.accountWallet.readContract({ - address: errorsAddr, - abi: ErrorsAbi, - functionName: 'triggerAssertError', - }) - } catch (err) { - const lastJsonRpcError = jsonRpcErrors.pop() - expect(lastJsonRpcError?.code).toBe(3) - expect(lastJsonRpcError?.data).toBe( - '0x4e487b710000000000000000000000000000000000000000000000000000000000000001' - ) - expect(lastJsonRpcError?.message).toBe('execution reverted: assert(false)') - } - }) - - test('triggerRevertError', async () => { - expect.assertions(3) - try { - await env.accountWallet.readContract({ - address: errorsAddr, - abi: ErrorsAbi, - functionName: 'triggerRevertError', - }) - } catch (err) { - const lastJsonRpcError = jsonRpcErrors.pop() - expect(lastJsonRpcError?.code).toBe(3) - expect(lastJsonRpcError?.message).toBe('execution reverted: This is a revert error') - expect(lastJsonRpcError?.data).toBe( - '0x08c379a00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001654686973206973206120726576657274206572726f7200000000000000000000' - ) - } - }) - - test('triggerDivisionByZero', async () => { - expect.assertions(3) - try { - await env.accountWallet.readContract({ - address: errorsAddr, - abi: ErrorsAbi, - functionName: 'triggerDivisionByZero', - }) - } catch (err) { - const lastJsonRpcError = jsonRpcErrors.pop() - expect(lastJsonRpcError?.code).toBe(3) - expect(lastJsonRpcError?.data).toBe( - '0x4e487b710000000000000000000000000000000000000000000000000000000000000012' - ) - expect(lastJsonRpcError?.message).toBe( - 'execution reverted: division or modulo by zero' - ) - } - }) - - test('triggerOutOfBoundsError', async () => { - expect.assertions(3) - try { - await env.accountWallet.readContract({ - address: errorsAddr, - abi: ErrorsAbi, - functionName: 'triggerOutOfBoundsError', - }) - } catch (err) { - const lastJsonRpcError = jsonRpcErrors.pop() - expect(lastJsonRpcError?.code).toBe(3) - expect(lastJsonRpcError?.data).toBe( - '0x4e487b710000000000000000000000000000000000000000000000000000000000000032' - ) - expect(lastJsonRpcError?.message).toBe( - 'execution reverted: out-of-bounds access of an array or bytesN' - ) - } - }) - - test('triggerCustomError', async () => { - expect.assertions(3) - try { - await env.accountWallet.readContract({ - address: errorsAddr, - abi: ErrorsAbi, - functionName: 'triggerCustomError', - }) - } catch (err) { - const lastJsonRpcError = jsonRpcErrors.pop() - expect(lastJsonRpcError?.code).toBe(3) - expect(lastJsonRpcError?.data).toBe( - '0x8d6ea8be0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001654686973206973206120637573746f6d206572726f7200000000000000000000' - ) - expect(lastJsonRpcError?.message).toBe('execution reverted') - } - }) - - test('eth_call (not enough funds)', async () => { - expect.assertions(3) - try { - await env.accountWallet.simulateContract({ - address: errorsAddr, - abi: ErrorsAbi, - functionName: 'valueMatch', - value: parseEther('10'), - args: [parseEther('10')], - }) - } catch (err) { - const lastJsonRpcError = jsonRpcErrors.pop() - expect(lastJsonRpcError?.code).toBe(-32000) - expect(lastJsonRpcError?.message).toInclude('insufficient funds') - expect(lastJsonRpcError?.data).toBeUndefined() - } - }) - - test('eth_call transfer (not enough funds)', async () => { - expect.assertions(3) - try { - await env.accountWallet.sendTransaction({ - to: '0x75E480dB528101a381Ce68544611C169Ad7EB342', - value: parseEther('10'), - }) - } catch (err) { - const lastJsonRpcError = jsonRpcErrors.pop() - expect(lastJsonRpcError?.code).toBe(-32000) - expect(lastJsonRpcError?.message).toInclude('insufficient funds') - expect(lastJsonRpcError?.data).toBeUndefined() - } - }) - - test('eth_estimate (not enough funds)', async () => { - expect.assertions(3) - try { - await env.accountWallet.estimateContractGas({ - address: errorsAddr, - abi: ErrorsAbi, - functionName: 'valueMatch', - value: parseEther('10'), - args: [parseEther('10')], - }) - } catch (err) { - const lastJsonRpcError = jsonRpcErrors.pop() - expect(lastJsonRpcError?.code).toBe(-32000) - expect(lastJsonRpcError?.message).toInclude('insufficient funds') - expect(lastJsonRpcError?.data).toBeUndefined() - } - }) - - test('eth_estimate call caller (not enough funds)', async () => { - expect.assertions(3) - try { - await env.accountWallet.estimateContractGas({ - address: errorsAddr, - abi: ErrorsAbi, - functionName: 'valueMatch', - value: parseEther('10'), - args: [parseEther('10')], - }) - } catch (err) { - const lastJsonRpcError = jsonRpcErrors.pop() - expect(lastJsonRpcError?.code).toBe(-32000) - expect(lastJsonRpcError?.message).toInclude('insufficient funds') - expect(lastJsonRpcError?.data).toBeUndefined() - } - }) - - test('eth_estimate (revert)', async () => { - expect.assertions(3) - try { - await env.serverWallet.estimateContractGas({ - address: errorsAddr, - abi: ErrorsAbi, - functionName: 'valueMatch', - value: parseEther('11'), - args: [parseEther('10')], - }) - } catch (err) { - const lastJsonRpcError = jsonRpcErrors.pop() - expect(lastJsonRpcError?.code).toBe(3) - expect(lastJsonRpcError?.message).toBe( - 'execution reverted: msg.value does not match value' - ) - expect(lastJsonRpcError?.data).toBe( - '0x08c379a00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001e6d73672e76616c756520646f6573206e6f74206d617463682076616c75650000' - ) - } - }) - - test('eth_get_balance (no account)', async () => { - const balance = await env.serverWallet.getBalance({ - address: '0x0000000000000000000000000000000000000123', - }) - expect(balance).toBe(0n) - }) - - test('eth_estimate (not enough funds to cover gas specified)', async () => { - expect.assertions(4) - try { - let balance = await env.serverWallet.getBalance(env.accountWallet.account) - expect(balance).toBe(0n) - - await env.accountWallet.estimateContractGas({ - address: errorsAddr, - abi: ErrorsAbi, - functionName: 'setState', - args: [true], - }) - } catch (err) { - const lastJsonRpcError = jsonRpcErrors.pop() - expect(lastJsonRpcError?.code).toBe(-32000) - expect(lastJsonRpcError?.message).toInclude('insufficient funds') - expect(lastJsonRpcError?.data).toBeUndefined() - } - }) - - test('eth_estimate (no gas specified)', async () => { - let balance = await env.serverWallet.getBalance(env.accountWallet.account) - expect(balance).toBe(0n) - - const data = encodeFunctionData({ - abi: ErrorsAbi, - functionName: 'setState', - args: [true], - }) - - await env.accountWallet.request({ - method: 'eth_estimateGas', - params: [ - { - data, - from: env.accountWallet.account.address, - to: errorsAddr, - }, - ], - }) - }) - - test.only('eth_estimate (no gas specified) child_call', async () => { - let balance = await env.serverWallet.getBalance(env.accountWallet.account) - expect(balance).toBe(0n) - - const data = encodeFunctionData({ - abi: FlipperCallerAbi, - functionName: 'callFlip', - }) - - await env.accountWallet.request({ - method: 'eth_estimateGas', - params: [ - { - data, - from: env.accountWallet.account.address, - to: flipperCallerAddr, - gas: `0x${Number(1000000).toString(16)}`, - }, - ], - }) - }) - }) -} diff --git a/substrate/frame/revive/rpc/examples/js/src/lib.ts b/substrate/frame/revive/rpc/examples/js/src/lib.ts deleted file mode 100644 index e1f0e780d95b..000000000000 --- a/substrate/frame/revive/rpc/examples/js/src/lib.ts +++ /dev/null @@ -1,128 +0,0 @@ -import { readFileSync } from 'node:fs' -import { spawn } from 'node:child_process' -import { parseArgs } from 'node:util' -import { createWalletClient, defineChain, Hex, http, parseEther, publicActions } from 'viem' -import { privateKeyToAccount } from 'viem/accounts' - -const { - values: { geth, proxy, westend, endowment, ['private-key']: privateKey }, -} = parseArgs({ - args: process.argv.slice(2), - options: { - ['private-key']: { - type: 'string', - short: 'k', - }, - endowment: { - type: 'string', - short: 'e', - }, - proxy: { - type: 'boolean', - }, - geth: { - type: 'boolean', - }, - westend: { - type: 'boolean', - }, - }, -}) - -if (geth) { - console.log('Testing with Geth') - const child = spawn( - 'geth', - [ - '--http', - '--http.api', - 'web3,eth,debug,personal,net', - '--http.port', - process.env.GETH_PORT ?? '8546', - '--dev', - '--verbosity', - '0', - ], - { stdio: 'inherit' } - ) - - process.on('exit', () => child.kill()) - child.unref() - await new Promise((resolve) => setTimeout(resolve, 500)) -} - -const rpcUrl = proxy - ? 'http://localhost:8080' - : westend - ? 'https://westend-asset-hub-eth-rpc.polkadot.io' - : geth - ? 'http://localhost:8546' - : 'http://localhost:8545' - -export const chain = defineChain({ - id: geth ? 1337 : 420420420, - name: 'Asset Hub Westend', - network: 'asset-hub', - nativeCurrency: { - name: 'Westie', - symbol: 'WST', - decimals: 18, - }, - rpcUrls: { - default: { - http: [rpcUrl], - }, - }, - testnet: true, -}) - -const wallet = createWalletClient({ - transport: http(), - chain, -}) -const [account] = await wallet.getAddresses() -export const serverWalletClient = createWalletClient({ - account, - transport: http(), - chain, -}) - -export const walletClient = await (async () => { - if (privateKey) { - const account = privateKeyToAccount(`0x${privateKey}`) - console.log(`Wallet address ${account.address}`) - - const wallet = createWalletClient({ - account, - transport: http(), - chain, - }) - - if (endowment) { - await serverWalletClient.sendTransaction({ - to: account.address, - value: parseEther(endowment), - }) - console.log(`Endowed address ${account.address} with: ${endowment}`) - } - - return wallet.extend(publicActions) - } else { - return serverWalletClient.extend(publicActions) - } -})() - -/** - * Get one of the pre-built contracts - * @param name - the contract name - */ -export function getByteCode(name: string): Hex { - const bytecode = geth ? readFileSync(`evm/${name}.bin`) : readFileSync(`pvm/${name}.polkavm`) - return `0x${Buffer.from(bytecode).toString('hex')}` -} - -export function assert(condition: any, message: string): asserts condition { - if (!condition) { - throw new Error(message) - } -} diff --git a/substrate/frame/revive/rpc/examples/js/src/main.ts b/substrate/frame/revive/rpc/examples/js/src/main.ts new file mode 100644 index 000000000000..88b72755aae9 --- /dev/null +++ b/substrate/frame/revive/rpc/examples/js/src/main.ts @@ -0,0 +1,141 @@ +import { + AddressLike, + BrowserProvider, + Contract, + ContractFactory, + Eip1193Provider, + JsonRpcSigner, + parseEther, +} from "ethers"; + +declare global { + interface Window { + ethereum?: Eip1193Provider; + } +} + +function str_to_bytes(str: string): Uint8Array { + return new TextEncoder().encode(str); +} + +document.addEventListener("DOMContentLoaded", async () => { + if (typeof window.ethereum == "undefined") { + return console.log("MetaMask is not installed"); + } + + console.log("MetaMask is installed!"); + const provider = new BrowserProvider(window.ethereum); + + console.log("Getting signer..."); + let signer: JsonRpcSigner; + try { + signer = await provider.getSigner(); + console.log(`Signer: ${signer.address}`); + } catch (e) { + console.error("Failed to get signer", e); + return; + } + + console.log("Getting block number..."); + try { + const blockNumber = await provider.getBlockNumber(); + console.log(`Block number: ${blockNumber}`); + } catch (e) { + console.error("Failed to get block number", e); + return; + } + + const nonce = await signer.getNonce(); + console.log(`Nonce: ${nonce}`); + + document.getElementById("transferButton")?.addEventListener( + "click", + async () => { + const address = + (document.getElementById("transferInput") as HTMLInputElement).value; + await transfer(address); + }, + ); + + document.getElementById("deployButton")?.addEventListener( + "click", + async () => { + await deploy(); + }, + ); + document.getElementById("deployAndCallButton")?.addEventListener( + "click", + async () => { + const nonce = await signer.getNonce(); + console.log(`deploy with nonce: ${nonce}`); + + const address = await deploy(); + if (address) { + const nonce = await signer.getNonce(); + console.log(`call with nonce: ${nonce}`); + await call(address); + } + }, + ); + document.getElementById("callButton")?.addEventListener("click", async () => { + const address = + (document.getElementById("callInput") as HTMLInputElement).value; + await call(address); + }); + + async function deploy() { + console.log("Deploying contract..."); + + const bytecode = await fetch("rpc_demo.polkavm").then((response) => { + if (!response.ok) { + throw new Error("Network response was not ok"); + } + return response.arrayBuffer(); + }) + .then((arrayBuffer) => new Uint8Array(arrayBuffer)); + + const contractFactory = new ContractFactory( + [ + "constructor(bytes memory _data)", + ], + bytecode, + signer, + ); + + try { + const args = str_to_bytes("hello"); + const contract = await contractFactory.deploy(args); + await contract.waitForDeployment(); + const address = await contract.getAddress(); + console.log(`Contract deployed: ${address}`); + return address; + } catch (e) { + console.error("Failed to deploy contract", e); + return; + } + } + + async function call(address: string) { + const abi = ["function call(bytes data)"]; + const contract = new Contract(address, abi, signer); + const tx = await contract.call(str_to_bytes("world")); + + console.log("Transaction hash:", tx.hash); + } + + async function transfer(to: AddressLike) { + console.log(`transferring 1 DOT to ${to}...`); + try { + const tx = await signer.sendTransaction({ + to, + value: parseEther("1.0"), + }); + + const receipt = await tx.wait(); + console.log(`Transaction hash: ${receipt?.hash}`); + } catch (e) { + console.error("Failed to send transaction", e); + return; + } + } +}); diff --git a/substrate/frame/revive/rpc/examples/js/src/piggy-bank.ts b/substrate/frame/revive/rpc/examples/js/src/piggy-bank.ts deleted file mode 100644 index 0040b0c78dc4..000000000000 --- a/substrate/frame/revive/rpc/examples/js/src/piggy-bank.ts +++ /dev/null @@ -1,69 +0,0 @@ -import { assert, getByteCode, walletClient } from './lib.ts' -import { abi } from '../abi/piggyBank.ts' -import { parseEther } from 'viem' - -const hash = await walletClient.deployContract({ - abi, - bytecode: getByteCode('piggyBank'), -}) -const deployReceipt = await walletClient.waitForTransactionReceipt({ hash }) -const contractAddress = deployReceipt.contractAddress -console.log('Contract deployed:', contractAddress) -assert(contractAddress, 'Contract address should be set') - -// Deposit 10 WST -{ - const result = await walletClient.estimateContractGas({ - account: walletClient.account, - address: contractAddress, - abi, - functionName: 'deposit', - value: parseEther('10'), - }) - - console.log(`Gas estimate: ${result}`) - - const { request } = await walletClient.simulateContract({ - account: walletClient.account, - address: contractAddress, - abi, - functionName: 'deposit', - value: parseEther('10'), - }) - - request.nonce = 0 - const hash = await walletClient.writeContract(request) - - const receipt = await walletClient.waitForTransactionReceipt({ hash }) - console.log(`Deposit receipt: ${receipt.status}`) - if (process.env.STOP) { - process.exit(0) - } -} - -// Withdraw 5 WST -{ - const { request } = await walletClient.simulateContract({ - account: walletClient.account, - address: contractAddress, - abi, - functionName: 'withdraw', - args: [parseEther('5')], - }) - - const hash = await walletClient.writeContract(request) - const receipt = await walletClient.waitForTransactionReceipt({ hash }) - console.log(`Withdraw receipt: ${receipt.status}`) - - // Check remaining balance - const balance = await walletClient.readContract({ - address: contractAddress, - abi, - functionName: 'getDeposit', - }) - - console.log(`Get deposit: ${balance}`) - console.log( - `Get contract balance: ${await walletClient.getBalance({ address: contractAddress })}` - ) -} diff --git a/substrate/frame/revive/rpc/examples/js/src/script.ts b/substrate/frame/revive/rpc/examples/js/src/script.ts new file mode 100644 index 000000000000..999312f0fd5b --- /dev/null +++ b/substrate/frame/revive/rpc/examples/js/src/script.ts @@ -0,0 +1,49 @@ +//! Run with bun run script.ts + +import { readFileSync } from "fs"; +import { Contract, ContractFactory, JsonRpcProvider } from "ethers"; + +const provider = new JsonRpcProvider("http://localhost:8545"); +const signer = await provider.getSigner(); +console.log( + `Signer address: ${await signer.getAddress()}, Nonce: ${await signer + .getNonce()}`, +); + +function str_to_bytes(str: string): Uint8Array { + return new TextEncoder().encode(str); +} + +// deploy +async function deploy() { + console.log(`Deploying Contract...`); + + const bytecode = readFileSync("../rpc_demo.polkavm"); + const contractFactory = new ContractFactory( + [ + "constructor(bytes memory _data)", + ], + bytecode, + signer, + ); + + const args = str_to_bytes("hello"); + console.log("Deploying contract with args:", args); + const contract = await contractFactory.deploy(args); + await contract.waitForDeployment(); + const address = await contract.getAddress(); + console.log(`Contract deployed: ${address}`); + return address; +} + +async function call(address: string) { + console.log(`Calling Contract at ${address}...`); + + const abi = ["function call(bytes data)"]; + const contract = new Contract(address, abi, signer); + const tx = await contract.call(str_to_bytes("world")); + console.log("Call transaction hash:", tx.hash); +} + +const address = await deploy(); +await call(address); diff --git a/substrate/frame/revive/rpc/examples/js/src/solc.d.ts b/substrate/frame/revive/rpc/examples/js/src/solc.d.ts deleted file mode 100644 index 813829f40b6d..000000000000 --- a/substrate/frame/revive/rpc/examples/js/src/solc.d.ts +++ /dev/null @@ -1,83 +0,0 @@ -declare module 'solc' { - // Basic types for input/output handling - export interface CompileInput { - language: string - sources: { - [fileName: string]: { - content: string - } - } - settings?: { - optimizer?: { - enabled: boolean - runs: number - } - outputSelection: { - [fileName: string]: { - [contractName: string]: string[] - } - } - } - } - - export interface CompileOutput { - errors?: Array<{ - component: string - errorCode: string - formattedMessage: string - message: string - severity: string - sourceLocation?: { - file: string - start: number - end: number - } - type: string - }> - sources?: { - [fileName: string]: { - id: number - ast: object - } - } - contracts?: { - [fileName: string]: { - [contractName: string]: { - abi: object[] - evm: { - bytecode: { - object: string - sourceMap: string - linkReferences: { - [fileName: string]: { - [libraryName: string]: Array<{ - start: number - length: number - }> - } - } - } - deployedBytecode: { - object: string - sourceMap: string - linkReferences: { - [fileName: string]: { - [libraryName: string]: Array<{ - start: number - length: number - }> - } - } - } - } - } - } - } - } - - // Main exported functions - export function compile( - input: string | CompileInput, - options?: { import: (path: string) => { contents: string } } - ): string -} diff --git a/substrate/frame/revive/rpc/examples/js/src/transfer.ts b/substrate/frame/revive/rpc/examples/js/src/transfer.ts deleted file mode 100644 index aef9a487b0c0..000000000000 --- a/substrate/frame/revive/rpc/examples/js/src/transfer.ts +++ /dev/null @@ -1,18 +0,0 @@ -import { parseEther } from 'viem' -import { walletClient } from './lib.ts' - -const recipient = '0x75E480dB528101a381Ce68544611C169Ad7EB342' -try { - console.log(`Signer balance: ${await walletClient.getBalance(walletClient.account)}`) - console.log(`Recipient balance: ${await walletClient.getBalance({ address: recipient })}`) - - await walletClient.sendTransaction({ - to: recipient, - value: parseEther('1.0'), - }) - console.log(`Sent: ${parseEther('1.0')}`) - console.log(`Signer balance: ${await walletClient.getBalance(walletClient.account)}`) - console.log(`Recipient balance: ${await walletClient.getBalance({ address: recipient })}`) -} catch (err) { - console.error(err) -} diff --git a/substrate/frame/revive/rpc/examples/js/src/web.ts b/substrate/frame/revive/rpc/examples/js/src/web.ts deleted file mode 100644 index ee7c8ed034da..000000000000 --- a/substrate/frame/revive/rpc/examples/js/src/web.ts +++ /dev/null @@ -1,129 +0,0 @@ -import { - AddressLike, - BrowserProvider, - Contract, - ContractFactory, - Eip1193Provider, - JsonRpcSigner, - parseEther, -} from 'ethers' - -declare global { - interface Window { - ethereum?: Eip1193Provider - } -} - -function str_to_bytes(str: string): Uint8Array { - return new TextEncoder().encode(str) -} - -document.addEventListener('DOMContentLoaded', async () => { - if (typeof window.ethereum == 'undefined') { - return console.log('MetaMask is not installed') - } - - console.log('MetaMask is installed!') - const provider = new BrowserProvider(window.ethereum) - - console.log('Getting signer...') - let signer: JsonRpcSigner - try { - signer = await provider.getSigner() - console.log(`Signer: ${signer.address}`) - } catch (e) { - console.error('Failed to get signer', e) - return - } - - console.log('Getting block number...') - try { - const blockNumber = await provider.getBlockNumber() - console.log(`Block number: ${blockNumber}`) - } catch (e) { - console.error('Failed to get block number', e) - return - } - - const nonce = await signer.getNonce() - console.log(`Nonce: ${nonce}`) - - document.getElementById('transferButton')?.addEventListener('click', async () => { - const address = (document.getElementById('transferInput') as HTMLInputElement).value - await transfer(address) - }) - - document.getElementById('deployButton')?.addEventListener('click', async () => { - await deploy() - }) - document.getElementById('deployAndCallButton')?.addEventListener('click', async () => { - const nonce = await signer.getNonce() - console.log(`deploy with nonce: ${nonce}`) - - const address = await deploy() - if (address) { - const nonce = await signer.getNonce() - console.log(`call with nonce: ${nonce}`) - await call(address) - } - }) - document.getElementById('callButton')?.addEventListener('click', async () => { - const address = (document.getElementById('callInput') as HTMLInputElement).value - await call(address) - }) - - async function deploy() { - console.log('Deploying contract...') - - const bytecode = await fetch('rpc_demo.polkavm') - .then((response) => { - if (!response.ok) { - throw new Error('Network response was not ok') - } - return response.arrayBuffer() - }) - .then((arrayBuffer) => new Uint8Array(arrayBuffer)) - - const contractFactory = new ContractFactory( - ['constructor(bytes memory _data)'], - bytecode, - signer - ) - - try { - const args = str_to_bytes('hello') - const contract = await contractFactory.deploy(args) - await contract.waitForDeployment() - const address = await contract.getAddress() - console.log(`Contract deployed: ${address}`) - return address - } catch (e) { - console.error('Failed to deploy contract', e) - return - } - } - - async function call(address: string) { - const abi = ['function call(bytes data)'] - const contract = new Contract(address, abi, signer) - const tx = await contract.call(str_to_bytes('world')) - - console.log('Transaction hash:', tx.hash) - } - - async function transfer(to: AddressLike) { - console.log(`transferring 1 DOT to ${to}...`) - try { - const tx = await signer.sendTransaction({ - to, - value: parseEther('1.0'), - }) - - const receipt = await tx.wait() - console.log(`Transaction hash: ${receipt?.hash}`) - } catch (e) { - console.error('Failed to send transaction', e) - return - } - } -}) diff --git a/substrate/frame/revive/rpc/examples/js/tsconfig.json b/substrate/frame/revive/rpc/examples/js/tsconfig.json index 55cb8379e886..0511b9f0e041 100644 --- a/substrate/frame/revive/rpc/examples/js/tsconfig.json +++ b/substrate/frame/revive/rpc/examples/js/tsconfig.json @@ -1,23 +1,23 @@ { - "compilerOptions": { - "target": "ES2020", - "useDefineForClassFields": true, - "module": "ESNext", - "lib": ["ES2020", "DOM", "DOM.Iterable"], - "skipLibCheck": true, + "compilerOptions": { + "target": "ES2020", + "useDefineForClassFields": true, + "module": "ESNext", + "lib": ["ES2020", "DOM", "DOM.Iterable"], + "skipLibCheck": true, - /* Bundler mode */ - "moduleResolution": "bundler", - "allowImportingTsExtensions": true, - "isolatedModules": true, - "moduleDetection": "force", - "noEmit": true, + /* Bundler mode */ + "moduleResolution": "bundler", + "allowImportingTsExtensions": true, + "isolatedModules": true, + "moduleDetection": "force", + "noEmit": true, - /* Linting */ - "strict": true, - "noUnusedLocals": true, - "noUnusedParameters": true, - "noFallthroughCasesInSwitch": true - }, - "include": ["src"] + /* Linting */ + "strict": true, + "noUnusedLocals": true, + "noUnusedParameters": true, + "noFallthroughCasesInSwitch": true + }, + "include": ["src"] } diff --git a/substrate/frame/revive/rpc/examples/rust/deploy.rs b/substrate/frame/revive/rpc/examples/rust/deploy.rs index b74d7ea18d41..f2be5d233f6d 100644 --- a/substrate/frame/revive/rpc/examples/rust/deploy.rs +++ b/substrate/frame/revive/rpc/examples/rust/deploy.rs @@ -17,10 +17,10 @@ use jsonrpsee::http_client::HttpClientBuilder; use pallet_revive::{ create1, - evm::{Account, BlockTag, ReceiptInfo, U256}, + evm::{Account, BlockTag, Bytes, ReceiptInfo, U256}, }; use pallet_revive_eth_rpc::{ - example::{wait_for_receipt, TransactionBuilder}, + example::{send_transaction, wait_for_receipt}, EthRpcClient, }; @@ -41,11 +41,9 @@ async fn main() -> anyhow::Result<()> { println!("\n\n=== Deploying contract ===\n\n"); let nonce = client.get_transaction_count(account.address(), BlockTag::Latest.into()).await?; - let hash = TransactionBuilder::default() - .value(5_000_000_000_000u128.into()) - .input(input) - .send(&client) - .await?; + let hash = + send_transaction(&account, &client, 5_000_000_000_000u128.into(), input.into(), None) + .await?; println!("Deploy Tx hash: {hash:?}"); let ReceiptInfo { block_number, gas_used, contract_address, .. } = @@ -62,11 +60,9 @@ async fn main() -> anyhow::Result<()> { println!("- Contract balance: {balance:?}"); println!("\n\n=== Calling contract ===\n\n"); - let hash = TransactionBuilder::default() - .value(U256::from(1_000_000u32)) - .to(contract_address) - .send(&client) - .await?; + let hash = + send_transaction(&account, &client, U256::zero(), Bytes::default(), Some(contract_address)) + .await?; println!("Contract call tx hash: {hash:?}"); let ReceiptInfo { block_number, gas_used, to, .. } = wait_for_receipt(&client, hash).await?; diff --git a/substrate/frame/revive/rpc/examples/rust/transfer.rs b/substrate/frame/revive/rpc/examples/rust/transfer.rs index 1d67a2dba28f..b99d48a2f78e 100644 --- a/substrate/frame/revive/rpc/examples/rust/transfer.rs +++ b/substrate/frame/revive/rpc/examples/rust/transfer.rs @@ -15,24 +15,23 @@ // See the License for the specific language governing permissions and // limitations under the License. use jsonrpsee::http_client::HttpClientBuilder; -use pallet_revive::evm::{Account, BlockTag, ReceiptInfo}; +use pallet_revive::evm::{Account, BlockTag, Bytes, ReceiptInfo}; use pallet_revive_eth_rpc::{ - example::{wait_for_receipt, TransactionBuilder}, + example::{send_transaction, wait_for_receipt}, EthRpcClient, }; #[tokio::main] async fn main() -> anyhow::Result<()> { + let alith = Account::default(); let client = HttpClientBuilder::default().build("http://localhost:8545")?; - let alith = Account::default(); - let alith_address = alith.address(); let ethan = Account::from(subxt_signer::eth::dev::ethan()); let value = 1_000_000_000_000_000_000_000u128.into(); let print_balance = || async { - let balance = client.get_balance(alith_address, BlockTag::Latest.into()).await?; - println!("Alith {:?} balance: {balance:?}", alith_address); + let balance = client.get_balance(alith.address(), BlockTag::Latest.into()).await?; + println!("Alith {:?} balance: {balance:?}", alith.address()); let balance = client.get_balance(ethan.address(), BlockTag::Latest.into()).await?; println!("ethan {:?} balance: {balance:?}", ethan.address()); anyhow::Result::<()>::Ok(()) @@ -41,12 +40,8 @@ async fn main() -> anyhow::Result<()> { print_balance().await?; println!("\n\n=== Transferring ===\n\n"); - let hash = TransactionBuilder::default() - .signer(alith) - .value(value) - .to(ethan.address()) - .send(&client) - .await?; + let hash = + send_transaction(&alith, &client, value, Bytes::default(), Some(ethan.address())).await?; println!("Transaction hash: {hash:?}"); let ReceiptInfo { block_number, gas_used, status, .. } = diff --git a/substrate/frame/revive/rpc/revive_chain.metadata b/substrate/frame/revive/rpc/revive_chain.metadata index 64b1f2014dd06815fcea6a87bc96306eb00eda8b..305d079f9bd86786d1cd00fadac152eb856b26ee 100644 GIT binary patch delta 19715 zcmb`v3s_Xu7C3(PnKOHzGY@7Ekm04EprWAQ6HHVTR8)MUVWNyM>Ll-niiT!MS29gH z;!2a0TVA#FCXt=2s4Tr{MMgJE^O0CqT9}$tl-7TpGlLFszyJ69ecz97zOx@^?X~w_ zd%gDB`%w9OpZ`4Ehvqs;?3Ri;+J%zOoR&w^0pv%yfTqj&bTov{qExmpqwW@Y z9^S5`0rF55Nr;@urV_n;ko6*d@)s{d zhQ4Hcz;9XOKf!v#;yFDY>pl?hy({HU=nIV}GhpGWG?7;U``6M~bA?9=Sd zY?FLG-d683=maGqX5%a-;ptgR9HGS}_N9)Z;@b9RW}WUL z$%g5Zn2&sX>{ar;oRmCT{yAxymaD<%rQ{fV`izt1exrQk!{f5ZMLA^rX!4CbfBbkk zc)U$+7;h%u!@1?uCVxGCF8Q&ZOnFyNu4@T@%LIv)G!QNtcqV^4vj_RDUYa#UZ~cwq z_}koVjuS-A#t{PZ1NlHX_?lOJRr-qrS193u(L|D0Kk%lXL=e_YIQ${BH;LiAi3#d@ z@qTjlyq>%tfs@rVP@X%%RR7Yv2|8kxe_JpQ#(vKGNsbi-&LYPGdkIWGPP*1_%KVey zf^S@y>~AG8vxW~(EG~AGOmgOBbHrL?FL8v(R~AMi0{^T5k)0q+`8nAkgoKZCu5e_h z#SV<)h$+vpJVegViXkELuB<_P7=e#!X;A(3thWRbQUB7CWthmla=Vd8cyToCt&1cg zkbmlZ&A~HD^Gck#4i5jK>o@22;Q3er-uLqXa$|lp86y9Y{|t$jpak{_?yI6}J46CCD}@^4Y=|Yce*a zHZDcQluEc%IjCrcmnS$kU0zibOVZ>6MJ=W@1k+h@kRq2Z|EoBhOsoH}xI!m8SKN=R z`R`-mkuG?c+Rg)~{QaK}3g28IE(xS1Q)=xdiMV)4=-5$8vRiA%RhK zynnsr@g;;WC6Mq14U)HPI7rIm0Z;ZK<#Nswv+w4wKC$+0Uf!5~H}AV?#ohcXn^xY< z&wlciyZM2e?S?WOh9Fw4T)AAcIRbUfdz%lD3c0LmF+Q8Cmf&;ZQw8|kl@%<1@suCx zp_Zqv^BV~qyh#1!U*vtHO5U}l7pbm)f6G`xs$lh(e3U$FtD`-8Vr!hP%FU3n4HBek z>{2gq+X%dKmAA>u;60&+kOF9K<%8w)r+0QJ4U#8qEALPnf+d{u?L|;QYtQzQU;ps=zgKpbpZ;h)R@R3#Zs!pVa))$6WdbXQ z%~vIalP0yUe&Q!>dIW>{CHmm{tj{+H*vC&_f*YN@tFumrbMdY;01lHa^kH1Ti#XJ3z>|V6~|Zl z7@}r}D^^egCr}zW{|yvB*U%+M8V?ynWTr_B{9lo z2U(!Ly1t0iVe-$#v)J!aVadDe6WX;H8izEM8Pd zfk3=r`kQ188KN}3Ng`BCzrICs_`9}EDLX=9gVgc7c#gF}GMg)0#0l!;aDMuDR!iQwQKbP_QNrp0YId33XoV$WwiOH!I{9kHP#-GNQ z|6a*^=}4N=P|p7=TqAu~Sou$EP_0z_lYghXrW$)H&Ug89dM)1rmK^85@Ga{+kTRHg zf}gK0_RA;uX@pdO?n8bjsZgH#kk|2;?rG#75nW0!TAA}X|4O@ZRN_wa$-L{$Anf_& zFZoS6EGAyy{jr#SfuCgZt?Jxk6})zppXD00((?yiQaL{MHm-pK4z}{sl&4$x+w5+g zf^`qmyG1lWUA@9pXP_Y8nOAaG<0?%R5D^x3Jp*#f5XcIn<{m^M71yXK4O{{uXy ztV-K!I@zB`S()}`sPgYmbjOLS5T$ksCVMfgZ{$zw2J$|Ng4G^7I{DD{+|<1+B{t)Qop6jq->f7zDLVSYwc~ zR}_Bq$3Ch)q}%4n!`rIwdo=3D)WPv1~V_^=M4gM#Z}+%PoYfZ4*kD`Q-O%~>^s6b zG*?RI2|?6j>ik$JQM$u$+48TV=ov;Su7JAJJyd$#FW09m*#+g>?o* zn|4QNqL}s;^gJ>tZ0e#fRd!nRXcdwbETC_rIdRH&KKfU?l3;kYuYS8Ojm&~>CVd=y z)KA|fG}n>4&`}h+#9kaiigWD6OF~QXLl-(i?F9wcS#}f{qx2zu=p~01*o#U+^B0Ax zc@0uo6QzIL3x|!N`raz~fF1f2UAJTI#fo6}7Au0`VjUw~B!jTGh@VouQ(uSD z3ge&G=lT1QzM+`~_OhIOdv@mVP<2;1WAe0Uc>j5Q7@h1`Y|koFzJFfNBXL#5KLTmo zXODgyuCKcY`(HqkmqXQly{{s?sPF1FGzbaz@7KqXas^*KrP_+{m-Sy6&>RU3`XFM- zPRt9nXJx6n3aM*{$hY*f12Pjsm*tl@)QS>Z5Jz5Uks~*MnIl`S$`4ag-q!yt{*5mH z?QmD6@VNffE=V>{4@bem5oA0l`c7Yj6uSJK{s9Y0XX3B=L@yliqk7sSw49I;QwvI* z`FSJ8F0tq3Idagxh87e#^NXA%Ww;ra&9Y{izs37`<$_zpX zorq|rhR|wtF)w$gW+SFQ_oemf>pfAl0TV|weF~G`v_#p^pWe?4=%ly6n>+PE3Li`N zT3kEJM@Sj;Po$aQ*~JA5id6&vOy*b|OwG5`xEI5)EZz zFnkgXyT;J|aj_z2BDTEzqFj4UtVo1e9Er*=aNyn;33j(CJlCF=Z7<0$Ds#bbu|UVt zS~3Loj-`Er<3%!SnxiPmSzKT*$yzcN<*j%`=^%`MaPe9<5S&B@kp!5QL`RTB*qB74 z`wi|mSQ75cEy!7*ZXv@&((xs`Ki$t60 z$jV2;7M0DOF>YFsBgwhgQC!05V8cY(9i~m9^L)_A$ILETG}E~_&t6hmgzi@vygiA| zK`e+f7zRzIX=JS{zk4#Z|BnV05Rgjke5GqTy*9chU*$2MjoRd6tX!2kF)URppQh4R zh;f_x!o=A^xf-QvDorPRtp_2y?snrk;DKwmSk+>KnbYYcuYDp3p5d?;=jRo3+&=V#F#q_UV?`k^`WqM zF6}8v683pV4|2Ei9{e_#xZ^N}8Yl01E2FPJ$WHDdrgHlHrw10+>0 zW59m__2om{?-$TWbC`syXH!i@QE?83Zi`hqbTCeGI&4`?BMfQkKt+trrNeuR z>98(1i1t-;#{j{!gvJ>&vQnjK?LjCiolRDKcy%QzG0e%nCUWRfF>Zpxd3+Eoh=XjB%&rWjj{jOe)mZ^GKz#b}3DiLN=;6t0WG8YA204u3F1&lj68-@Kp(lQjKCP zrCF$VT%;Vd+r5JOJXUbG+Q?)bfxqx{7zr=!VX zC|r*7IjmOwgPen`6)67=P_=^kV4}gjwSrFMk4P8>_lK}DI-VQ@XBnNsH%YK|66?mF zlAv}H>j}5Z=={G`V)%sPj=9F(>x6skjUHn^p$*h}k!w^(j-@77pIuhb(TG^)UQ3gVSG2cWi-caolWXZb{)PlCYiS-X z)x1Zj4U^JGXm7q%LfsS$AI@PW<>XBEHc#4I?eDnSujeqt+Yv(28*qq{fs*LfQ}!vD%wl{8c!ep@%uI4@7~2N<;IKe|s;CLwTZ6WvO}V8fF% zn?%6%Cuwi*Mhv^VMPLfDO@!f|_3mnRd%%*1JVhGhkq-nYPj04P^XS?X%e0b@)f&Y> zC*d0cW46$~=x66{p~;xkZ=vNR9%8F$FeX!~=`3_ro~uTXMCIdZIz!#+gl(fe37?{& z9D{;dxt*qg?#C*G8jF77;GHxU{e?w4X%xB&@=g@n3V35D z9Vt{A@R;Z=d=33~Q7hR9qjup`svvKd=BHf41owd)O0|}?VYWie_Jct?sV_vM5yM96 zI}!tLVZB~GIjDx6wX}x;!(r)AI_?mhe1#65f=4GJ z!5!Wr2P&bW1VTcmy7L?fFTxm1k!oX*KQ(1F!(D+(a#$mO8j0f?Tnl04ju;?uEfVz@ ztX8NE>JK3Uoq)DOG@n1A4J#(L(cl_UlgEe}wf5F40@tLD4U4C=8rvBIcczngoOfYL zXm(?|;(-b4JY3GphE6W$WkifB=dwD$M-5lh?wSo;i-vLxf5QM%U!xE5ty*e>|Gq}Y z+1d;wXOgT)an(nB>HF4H914oqS57T6Zfr_i|(CI{i5l3ib zq-69Q7jA9-9>OvkwQ-qY(-Arx!>A{Y&>_SR{yIWKy#0)%&op~kerZWD+6#{JgQ`Xv z+bh86q9jJD?i%y}M1wZa#)V*p$5NOEi+ZC;WImXD@Gc#PTOIHB(8q{`r1xm96s>|r z7;)3``g?S$>t!hPJxaUc1}5<+x^zPz`zW1kh}V!K2PeSmM{$&i;C+scg{-5r3rsvl z=bKVg029XzF>TVYVa3UIyI^=pYUK7!^Sp6gJYX_aq@r z9c)|&QWS=rPbqcj{ISl`tZjs8> z-b#(A2EO942&4xB+B;x>>kX*?>AcML2H=6y-K3`Z?~*O5_o zgNV4Q!vJn~ViPhx< z4-BeNmr3Nd8M)dH-65jCv!x1(9q!gvn%`$cUp!DLI7iEQ;{gpzoe^CKji7#q+yp)B zLC`}m@+&$FXT9tzI#|F^ak%Wg)u*dxKnE3xO20!!4q7hIOc-{N=EKRas6oMVy9vCZ z$u(MmI|cmLbhdiJcE5T8=LdyfBSGun#jkPyaK@;e(J3uo(`131he6-de+$hv zQ6HXfH7Zrt5rt2ikwj_d6xQYIfUQN2f)Su}yMhWEyoi&s)v#Zv4i_V7YH_;zkc?(@HKiGSV zhGWur3$2SEe0Pfu;r&di12BdJDE3x5o01S$%7U8W)GKtyZ2iHmn(>2}C%hY~yxC8&t-q%zS2F(2ss zq3mO7RPIn#i!o|fv#$S3Po=ot`K6j%I&8WJ%2enlu{qLo6?3YIOM?v(n}WXPmlErb ziDY0YBpqfNSegwZ-%%J@DJ;cv0%uMRM||<9-%+FqS)6}{HiHI&j=ss%rvT%qj*6O>I|XxKl?fv^B8eNDk7LG-Q-0oz{2UB!RJnn6`LkHQ+Fkh8pY`zH zW(rR%!1E70@hB!WCX8BXq=2fgX+W&aPRlNeFFs*F<3walzvRcH`8w2fxq6;ZIdM7&AZM?kP8b zE#d3jg{uL~Kj@Gt$`zZN?96qR;2n#*Lp=n+fovdu2=BS%;+{D>kmUp&Rxw(U-skb) zlWWlMpCLy~+z~glSswM546u&UBP$!Tb~L6Tk{JQiW(a_0j{^ z*gm)*wp!H=&R{l!Z*o_v3x$D@3&E@_f684ag|I~aj0u{og0GSp!h(&$d6PR>v7--r zT4;8cr)xEd z@}DjK@Z12FV{X%?Yv#Rq?v6>m(-Nj6#IOtY@3{7Gb_KaWw7`Z(%g3Ywx8Ko_F zYTjZz-CHbhL!f5@dlh=+L}BnoTAt;G2jxBi;kmtVA|` zdV<;0JBT-TvX=2kEaaQS4sgkx;gVf&c!F@3KKe0i%w)9Llik}oj}s5{HB&lZOzn&@ z)rC<_P-o+0r)zxB=YQ$mGz8+)%^dy^xSC{`xeSky2xD>XVbE_Zd%LIIOnk?d7A?a| zR8hIkyamOj1?qm_UNweL+UIcG<}i~`?J`)((7_xDU9kW+6?mV_u{D`>?b6{TMj{+$ zxi%}P+r0)TbWSrdIk7EfFfwoAauq~=Qim!yHif-xEY$|gVZgr()=Xxh;588qLpfwj zWXZVB&rM_lv828EWg;7mo4H|=*aURI9-hRyqYtuc5(~#lRX}=IxCNxrLqIAt{MI3q ztHd0S#f>VqAc&jH#$kKRFT{Sd4)|2|=-)7J(=bQWxG~pyV6M?HSED(oRWa9?;o@W# z0z2Oq7`*lfwJ5Wuuw^>3Px){Bf4Ys*-${BNK8*m0bS-|{7b=Se2Z%4*58HR(QZzda$%B(1p#p;^d z-@TQ|#`e5yzI*nBR(v5^aCCyBvhWycv15UH|L}?#3L^Esurxv+472RaTh+pNz!(CL z+HpxP!v}U2fok)*o%PnWwBsJQkZpkHGFdR3S%_Lmn~ooZWU;-v)^?bSS zFvKil-T$V21E99}|C#pX1GN5Za8L8PI{m*;zmbr)ob`0;U_Qi4i+}r-xn0iyk8_PM z<*((cc8GwJt5|F(%8hk~J#Q(;MQUV4HNi!@b5<@^Lo`M=#0#zSDl}i$ztu~w@Sgk- zdy*I8y-f4quOZYQ64tQcLc-nRtVq^Hd43IB#LzyBT#u_C_79YfWEl4kWQY`a{2wfm zPj-#d_qIz!e!FLNQ(SL4huKmUL>A8LrhAES{WrZItbCj~aijaq<1D3LnwO`kN%!hx zYSM9LIKlMx#Kb>qR_dBknb*BJm0{<6E9J^vFn(*jTrd?LFl!-f z8`A2@Ei6u|P~q2lag}PFKYX`Ev&*__wi8vT26nib1;R58yam3iW)>JqnHQYhrnllT zfA_6y2{Q2dtt>bQ*EoD^evw0qmvf}gB4<%?$$~7bsfM?=vJuvf09=&|dl;ZQRSkIY zAe6_=v)1D>U}OBZd6Cg$^YLKSq59bE8@g>Sh|9nxz(1>Tz)4TDd}FP~OV|VMhBXYI zJ&nG?J~cl8HTLvvsO+>P5X0*Tt2g4tSOYw;jqTtMxoFkpu$xv5cWDLPwjn?MYr94i zy!OxyE@Ix$pc3t{7k9)9u9>M#33-O~GvIb3Wj9;JpYVb^LufDM$KB{N@r~{!Zi4hZ zs1~)j>nNP?#XT(WUUcOY2z%LJ#1Oxiokd3T+sESgGw!D6;i-LWA>Zr;Z{LqwQPT^| zNcO{xTH2;~zsROp;HB5myXphSU)S8d6>qTDNnaTBCX0n0+i8!W%PMukuXwe1-SBGl zYV#r%y+yKkTevG;Ku+TPyc$>x-{LN$H?V&E4R>K@1H*W`yU^Ugdhl)TLbtbAFXJ6A zVoENubKD&q>J;LuWWB|9SP9vB5~r%iXCT;5vQR;?i0f4Wwn5EF)^{)->iq2pDin|1 zaRcbgHe1L9x0}^|uTVQIGs7Pz(VTcg*e7hH(a%DxsrlKs>+(}8Lg8_I@BtQW5b(|? zxZEM|H6~rdEUtsM%xE^y(FN6+4Kc|4O0q#e8D0j%Q~z_VdwQIbRjB-l+&yj zhthBw0|9Fx{0z%>mBzuzGmPmfL;}&NG$68~1IEhE7%N>EDa(mHynhKp zwpDQU5}QYA;H8T!!BS-*!VEl3bu2@I7S}@ac^1;Ys>A%N)%h=R^F_7C^p|L}uU2u# zVeC*j%fc`@bC%tQ*tHJNHM5kM+74sg-Fd9LU1L=fej2yW)%1gNh~|LDxa&HMyY7FD zyRO5y4|$IJP^WPpa^aqM0X@2ED7(O-BM*0I-{9H4p+kEh=aH+EuAn^;gT zLXlv{AoDBcD;{&r4bLys1=#)-n~CQaN1D-<9CeX(!K)D3Vw}=v()=$ezI0>e8YY>wyghsD0vO z{o#kNQ584BuV15AbOm}{LX+NNf&0EgxxS&H>YtQcvSe(2p1Qd$ap?!4{oosS99Cjl zf*3A3j}||`k}9^O%%yK%a2$+RhLF_y%p?VF&{}i6GN%QVr(<&a-?}zXuE>5 zlc4S@F5B3vEEkVaF+UMi^X;o_V5B!Q^UoFkwN6F$;i_xg_*G%181end~95aL=` zME~dxlo*VJj78L%632~iJ0)XXxMSh^HRcODTiD(;Lpn5!@oX5=sbPEv-~_8DbOPf3 zTR&=*ASPNju=mLHei0a=DRZbv(LGCaoZ zz&tXITE*MdiyYgoBbPXU-C%RpI1$M`_Q>tLa=EV7YQh&v z0#k3X=_n$#w^%YCV36%7NPpf!{SyhpS`ld?EN;cUU@~lLWrI-jH@329MEXZ7j$$A5 z`32t*nEMMtro!WDk^!%)NiJOd1=+X)f_}xM5+?tOmHXhKU$J>3yr3p$;I#Vg3P``9 zdb(kRVZWg{4FLOZD6}DJ66d9T?&&+m;aG$pdq%=dgv23rZ)1JYW=>R-3Mg$u8k~XJ zHuOxJP4XyH7~F1S0T=@X|KHgl9z&f6O+m2ucNQU-&15g!$iQPSj9@Z&;dj=Z4>8LF z%zkk3ceaj?Hp?HF1L5I6(D%=Unm=&Rh0yp18_Ms-(34jPEdLod88Nq+7fxo>Z8i+! zEgCgJxy`INv*TDt%vITow%=w`26Mm z8#DeN0UR;L{IUniB{9^{US9n>?2*K19hTt>NgR(s((VTFAhOpd25|x2;)pSd%hg-9 zdre{s^5s^un2vmT-7HQb>B4OOe8pB$rmXZ6 zAI48f;17RsC9M$g@FIGvO$-LGiL3BBlTU#7jDXj9UkVYeI%K_fLd8LQOcl_6 zu8RW z*1BYIEg{F1igDsPLQ)jN1o0Rnsmdo)#jl7iO-fW=o-XF9xpbw+OmQc#)g4U}d+T%= zl3lrWzv!pFJppvC*ov9s%Aa$QDA>jG^TZTFK2fe^h=DvQRisSOs)Ck7uwC59(j=R} z33dpxive)LF3v>fF=U}QR9`4nNDo(siJ{7hg(7~JeS!#Fu<}usSU`~E8;is#7F3sb zJKM3$nO|C*Q?>%?9AY;mZ;^PP0Y&qL0i0M@DHdr}Gd`k4iFn}0}9Atk= zEYuUip_=;QDKP|Dto11|9wXlaWaNu7rA`*xc$B#pw~BpvJ%MXf@>i~H6+b4nNJAGc z6fY>*i%W~j$laU`{(KZKyl>wo#*5JgT;Yd8r$Nk0(FU7;5cRNUyZE3!R;`C)&xn;M zg;RHk@p=-CcfjDx4si>zXkLxDh&TB`(^d)hvCoRls3Ow{7bAwl{0dWsF=gZ;Lw>_ zOB~syIgVW5_ad+STZz~!USW6(%5qTr7MbLigW@6-(2P29ls+2!s!?j|#N|A4OTa7Q zbc9>_ins)?TH+xo{)u=9|B4s`=0jpC9``(ONc?g#0{Jb8TJ`f+rwV(GPid`)1{}5F~yfFbT zeJH9s;n~rL#Jtn%PH9jO4Y|Aeh-rhN1Mbx zIMJ3SaXw;7SEhU_8c~Rqx&IXv0ZnU{(>O3Y3^|R|al#{~Q3Dsk@262PbD`{vc%M|L z4mOp9rohNAk(n=?5l86CR7LTnI2UQQ;!E)WYL;GSQJ7HiK5-OW;zdk0o=489 zQ1UN`b9DN(xQvzXVYAo^b;8YN)TY%i@RAsBZ7}S{J~$GhCzS}Z4^~|gyQvFzLiB+H zm&7_;Adzyieb--7qY{$`NTH|?atGj7?M_%SR4RjqVx%s} zbWg@eRjiEUnpDlSW0*7q=EX{W_~L;$sXJ7~N;c%XQmBfPdNKTdjxRHn!`pGvK#d!{ z21;wl5oOarsi!Wi!qlI$PFYk^Otj~SKKJe~Xe(9e(fwvu&yT^HVNzEVrj79QFewMe zQGIV5qvNDNC8I0TZGzMX9rW=Nq<64WF-3X^Z|qd0NKP+wWqz6?5lVc~rIiB0goi_2pSx+F z-sP9rb3(J0ICHW?wNG2|b3k}&z7%FWcn=D0%$FX3_ZCP#IL`<;vq0)g=0R1a#dhnQG6QuWlFu2*$XB70xudWvhebEEQDkumsX>)vS&+|&`bNMLRv+Xw--rUcyt5L zRZ8ppa?Qgyj6LSsiXxy-Ej3{d)@^bBeM@1+uc!?hdsI%96xbLWgo`upTG!riuN-K2@<|8nmNV?4GB=jU5_#Yg$qT+}x zmKM=tW}L=xwZcz%xmbFRkVa*8sdPe*YaO~u+Jlzj_$p}?f5WUKKZM-Fx0)4uxwKG6 z5|pD4OG!Fh25Fu2jCwTIEn6Cm^A3S4>)PjS5!^FI5w|}s4MSDA?Qv-}jqt*5cfi_z zh`p`hDcQ5~-6s&DULOgwHb{9UJZi$RW}r8)!~u1WOF?jDgR~Qs(nh9wJOkmaC!{W3 zv51EwyD6ze0uF%-PvGR^73)T6E)EcuZk8ru^6F+O9R1U?o27rKmLzM76pN=sB`EhH z@X8kH5nSL5CB9lh-@yUJ+odHa<>$6b!@^W`F2sZmPIZn6)joahA2r!twAe9Q{S11b zdW~=JGt!#LWf+a&xES^1vlQ?AX5%$$WD#Ojy-iL#5N)p>c&~o6C-|)8`71Q8Istz@ zgMNlnY26`FLJF1JHBzDaRj-m)E1^f8t9U;rp&>4W5qqVDs6}?}#o?BNxDWqzP$k6e zlcu6kTE7n$sLHDvTJ}kgqkp>M1!)R;9Phs%*>M3PUqp7?4ZB~I9zb>?`=zDGF1h<9 z{G<^s?nl8o1icPO-{JzB|1EhDbR>dbl6s?wOnOP0i~q!7=S$cnssyo4s%IF^d-F9Z zLOn9svQ==h>m} zN?~g*dxcLJyTpNCrRL|&brj{#)NW5-@mfDxtDRZk$eNAUrZK?L;$@A~>SnEQ*WB>3 z#%ML;9eIvoXK}07U>8KHdbc76Kf7%68r-KHdzOP()<x`f2dayOM8| zWLcl2y-3CX{)8VO+9%+byxx{_5=kN%Myc$sSuT;kUEP$LCE!h5LjB4F8jQa#26 z`W}^%P!HrEMgBhlGmc3g=t3;9(EUHsNnN5P3~u~Kde9JHv8uQeEz`mAzSQEIW)VwP zgjo74R6ojHfcudpgVU8o$EC5n#cl~N##@n&T#Qa1V-9%j1L->=DZ4(D-X*$1OQ~|b zSqkFe;ghJ)y>UtYye(}f%Csg4zo!jX;4^6_;f0T&?lZ}6>*rEe=KfAuIsUoyn4r9R zR>FTxP!3DaOKXI+7U3wwT#$N#_XU*mwGeed+KjFbq8z8_zLKu;x=L4v`8=spGQW|Y zASmiBm!*TqanF7$?X&E2O)SoeTa}Wy1F-xDX(H_UPD(^S_0o6JqiEL_eUGMCXA>^M z#VgV<=yz3m3JImQwx0KFU5sB#m>AGX3fTr6|3P5$b>JgLO^HboWW3wuI_51;f5;51!ltn9e!2) Mi4{B7t+jIhA1+6QhyVZp delta 23445 zcmch93tUvy_W#*u&YTC#z^H(rpo33PP*5>Ye4wIIqTs9W1tW|)2@K8*DjFfB6`A>% zaD`@R_gZ?((#THUvhv<6EvYQ6tSs$j>CH!${>ondzx&Lfklp{|_xXQ*eE7^hXP>>- z-g~XJ*IIk+wf7s#66*e%AeT9Pj_`xKZuc`f6cmEJInWg&Yt4S=)|FD|Mrug4d zd$O7S@72D{>F=WTWefZh@muM?OG{x@{zm++@PDQCV5|J0A<1m5KLfv;{BuK6*jE3; zAxUhze}70Xw!{Bj$jxjQe$#FJh6)Kn>^Qg2=k!!b!fyZFp}p8%|1=9aqXnV*FIgZ=W!r#9F-VtB3v;MSE!~I{4%rl-g;rD}4DfpGL{p^JQ;p|HGl|O9s zaCX{1Ve}aMhK+9Te`d6mouxdl9PK|cdRpVfar@1UDP#7T#h)y+;BL8t|GkNc?AOMh zCyqC_{Z$Y|#svHek}58>6=i&;N$64s`+1Q^IY4jB`QH*2MS`8m>AZw zjxfbvvLKZe_}4EuXDL82lT8o=Rha2t?dfK@Eh(qOS?+WBswO!p{|>2rAnqV^rr%MEH2y5|&nD`$Ic%u?CEpTLrUD%!PPjvlQ_FK3n89mU1& z%5q;%uN|e5T>VY_CIzOY>d#%+`xZ@#$E=_0 zT8MeZ!CVLG@Os_FE|9Rq&db{uVLBGibCl0T#_TG)%g5WTaC(|suJWfZEMOu2bqhDK zP=CszCnbw?M;7h#i0%C67o|&9X?gH%$l{DP4#twEc%2>&I&<<+AEkK1i-Rs+QkSu8qIVlBGA~w)JmXvi64DcK%^YZ)_<7xl#VdmUe0P$38?#rT(m? z5&qLl6VQ`3cZ?LBjEY_Z_qp#FWnwO>C}vTOZ`IU^tgLbJT{D^L;_M&@W&S;PFJ}uH zhc3U*#3~!#ymuKBs~DYqNsRH&u6vGE`>#IGgVp*o@1J_@`S17FUVAQHS#a(7tCfqd zJuiP?$+hPmtDe91EI#N6sYcV1YO@Ko{;~(Viz^s8K7d%*@!&>Q=g(O^7r(n!&%GFDIVN5mw* z@{qInvEiX~u|7DaD-OBUQJ` z>F579jbmHMs_W9czkg{Or;DENpk9`9&LJd;C#* zZVuUFj9FEMy^Xi;xkF_88ozm|fFa4+dzy*+82#p99sJp^MvMCy?L1;?@BgGzSmW|n z`zh?8|InLh{45f zB%ba}lM?(Vk3AR2YJBFC4q_1QSDy`L>_B7u=cy*+W2ArCmm?znOyK%oKDp!nP^9sL zFZ;1St=t&(RRr3>U?Ur|PTixj2LFL`PW<*cUk;jYI$zI_^#5)UPs%SY7cqn%e%KJv zz?f>|+8v_IHpyM)l!S)HEth`MP;A5R_nKG(y>-mIO@B*d9^hCX62e~Zqo9_hGIbyw z-YfRj`)9BhyRaPn>~yw`u^g(O$#lHt&SG86%%x*W`FQrJ61EEoIoZSbnlgupA?yGR zs9^EzfPQNQs}j-HVGqk^4SH8Edrf4&(pQyiCJK*O$QneXpDkvcc>2o{maDKsdd^+! zf;BK}BlYoX*m;afAL3_|O(?W(J@a(JNAqrW7_T4gW)%{Pq_KZzH7r9v@ORdogF62* zD;2L90J`s0mfR8j`O7EFZ(?Wln?7d)rREk6U1k^BlIJoT)gq^NyWk61HBkY?NKKsG zrn$hX4&s4OIg&|JSsSYDDfUDU?(QM}gyfE%ViJ<=J;m3>9Ogefv$OtSf>uFuF6?^FYyGbwh6c#}RpTO0;-axj4$8Z=n!r}xhhyP8{N zqv~MYJVD$pMnG%+k*RX@f8>kbhXOylXNp@neWp9a%yyWV5BtQYnM2R61V7+4Z=tvZ z$=eIXt2|jGevFTQT`Y!~Sb^TSRD7aaGrrxM#Zd6d(#_&)oeja+RWE!^{LE}TccEEt zh#y5%-`JCC%6U_q1sZREQ_N$mj($EMKE~?w4F^S&i1g97#k*8iuR}hbY?hOH-w(xX zF(4_E{a@6{Xm|U^;)A9bOe=0|<$2tTt0q@fz_N&dY2;kopyz!i-eU2uy3~PopBBeA zGepc8F@!Vv13!Q|hQ5iUmwy1}R(~u0s*j;WUEy|>`>u&&{qCQ| zP$?Gge?3;zh*C#I zg~z?nSvZ^XU{&EFmv0{Z#7wu;fUcQA3u?lgpdG0t9mxumw;Sa>Rdz>ti5(Cs++L^M zVK1(PmFV`^y{@v#QlF#T>8|vaR`r>}CeOplqL?ozkUH$%d2WxdxYB3$xs6=D4k>EY zm9AAq2JE6B@WLSIBA7xI4sMHme{;Sxb&fC;0kobDt@l0GMAvj+XZ@Wkrpqj#E%htEnChg!^X?a=5D6ur z3-!+>>0EojIut9t64pxmZ0#+TX#q7uPiIP#>9zkdDgIZFb-0k1^ME2`HK4FQ%rAjGSx7%m7eKoZky~}?8Fp>OCg70(t0b4i=AHYoXXN9 z`y8i}&ts2sP9@}D37@ERqNh17-T{As$Jg8uqjOd^+u=nScRBAI?d)Yvv=+O|=eXu_ zSpn#J+XVCdVZb3U{RO0=UOm+8O$AR3Jrb*jRhUnjLE4Lz=9f6$cPur($niesPIGxU zq~(&eie10F${YqdleF6WZzLC1n`a~W{UN0jRje_$MfK%t%#EnV^WyX=e)CI=LHE|1 zbLHU^ou!T{Dyuig1)uem_2!325yB$$?Jt@;vXH!MOGEl#k9n$nMwWe{+vjZQ7Ab@LE=ie++$rUy z{%3o{-W~y8jn@mqZNNzWa7}(oQntZ8eAZM}fQdXVHz+fQOKx}hLRT@TM*x1nVNjA} z2w~1uoYY=4yU-bM<5ZS&DE@p$H@*1p<{z55n_6=E{r@!UZLb-ZI*iMGFjnb-FU=m{ z_v)AC>EVz()4w-og|XWDO>!6fz9z@91G~SFZ--T?-y9+j<#emF$TRTzwMFj0btG#i z!%EZ_x06Ff8rnl{A98||z+;edbEG_+XGV2Uy9Y+=ET8L}XUFQ!aj;EJg1h zC4a;jCp22V8Sth?%k`Y_XFJGNZf*SBU2f!`pXn*@MN*m|uS0UxNc44y@@>5Ex4q>z z!UK*AIyP9I&Z;RbQ!Yv!HPXE(vnL#HK38!P9B`hYf)KAt7Gz7(WY%33BmqBy@naT+ zMB0;yK_;&sDo0V$5P399CGQZqhnT9;>X*#0EQZMK($ZAH!lKLFo-#*in#!cfa3r`v zM`I>k%j#B!8OarKl~INs8!E?8)KGaP%P_#dI8=^cL#Sz}+(8`D0z5?xll!wQx_OwK z$+GF*VRCZ+KN$z><|?ZwE#!;+c$M8)fW8x^)33o))-}qO{nLCz*~b#!$(6x92+Y?#w?ro zXNwSR7q+PW@o_QwrkmwqjBUM+ySCedPI=diUNV}7<;pX(y}bAyRoE9OzCT#}N{izAsdTcO z8gWoHfI7!%$pp~qBt0`h9x0tt*~4V-t8}37Jh_{6MrDuCcaKONX=0viXXgR{ zpUsnP>>|CEC--KT=`6lbUcNkrCgsan;!!o=t$a3L9wi=A;a-U*>qNOD8cLifr%ESP zONBqn66+s+TA?>5%HgzTqTE(Ir5bhw!Hx*y+415T)&JU0-6(7ltt=VVM`vlc+! z$;on}q-pFSYV(E`Lp`U+Hc``PS*c>Df+=!W&8nd>rU`BI6;tGC+#)-1n><8}(fs-S zdQf% |yRE|3ewIL*)ANg-{T93jRBKTnert=%=yEt=~fk2jf?PLn&uyBJI7Q!*>y z!pteb)I>4Pl_`mukQjjU+w)9`LeXopH<|YZ`_{B1KVZ08Z zS<~eMkWoky^a(TMXbF;PUXeT!0_W)>IU33SBH4#zkVEbz<`}>uYyqS;wP-$<{^r1} zPNa_y-+2M*2>eH^)CBx;Gd77UK8pyOMUg5nd5|`Jaa-Io#b2y!@yUC(!%R3HqYsq?J6x5 zga(bOpJ$!)o9D<~w75+i>K0AFKksmpN!V&UZP(I;?NsNMdy6|XV;e?qazh=6y9`S7 z-yJ0Lo)%=@&1+h0X>h*o;b8H;*8sLpix&17^@y{NwI%!Pr+o{sxp$B{dVrsU{4Ita z^~mMy1X;aua%7Xn(kEldS=GFb1xlI%#pZeCo9XdNIl^xrZKk)q@)&lM+WF-1;z^Ax zW0g+gDUFiGDqZObpFHainecQ8!cn7FJ&pxObG*f9ju~CIeI*>{W5L_WK%*~L%EQ?y z{@jji3*|A9XN-br;tAngpvpc?xM+OWYZl74n;{`mmdT?mSB$U1ZyNq7`9pcS$U=zj zkjp##$;`ucprgv`qR!P|7qfonGWj$nJh7efs1R$2L96>P{f&Gr5*cDlh!8{WyX7iO zeI&7DUXBe^7bPv1GfeRz*LJluedThwlf=4nV5v6}B#|!MCqKiI>CrlQJ*=0s`{fdr zMjP&zdxjri-Gb9vtf zxfk0^(W~WbB(qk_)tGIct%i!up)0H9$=K`}zXnC}^{O@UL=&5$AM(px87nYg`d@2m z3j5%7*E}q@hy6eg%bjCags{mu1mF}W4-Z6uWghiSiU@4&Q*}VDNrN@k6Qwde0yvwe{4rHACbdD zgI;4n*dAi2Q+Q6~ls0S!?b{)jv0VlObjk=a!0wRiT46V5$qXj!3Gr_#??|7Hl{I?m z8F>-gYt*5c&&q1#z7`es@d}4c!hTxvtUM?eJAo?0ng$}R#OcXm%x;J^JPWLw2a8<~ z*Ibx5hP0d*%m57!8ssTp=|WS0P}mjV#2FQVW*3 z)F@A|SVOsp7HnE5gvW&E zES`KX%W04$e|uSuYui1PCFD7(+?75rbQr9MP;PucH#_#q?b;>swpClSkch%T z$h6P^@`r?8XP-2x+>5nS2EDu&woC?H*b5VD2(OnGDrAxK6?sD7qn(bvB1ge?IQxp+ zh5q%b+?MRG%7a64jK;vtx#WCR9wg@Twnnfi^wq1f+fu;4S_C0GRG0}5v^+?G)TliG zuHW&doC0C-`J3{PF3wOEJKEy{2YSJDzIh%e9G;~m*XD%N=>Lse;NqZ00?Pgaa(Z}6 zC_-7Nf7tR4dXIy0wnS~-ll#%8CQO9N7DKP(qcod^D*Ag9#9p<2s!2`~#ag4WAuEE& zTGgTk@>;M~-HkM>14z)Uj+VU#Szb%0-jg5FR`GULghEc-byzMD>qCvLQJuXn&*H1) zq7URBAzE%bB2Pr@13h&_&VYy+YF6S&J}S2zeBio;)gMtX0KcVGH`t9HPliJH-%1mY z%4r?8qn}Ar$~|ywIpM`BfjNpb_I#&r2W>ej4~1|!aa2xkcVj!-Dd|Ib#(*81hN`eV zRM^$3;T<=QpBF{Ly8}G1N5A@^{IE#1pU6e@<1sk_a@GEcJV3(6KsWzMe>yxgAloRz0{Y-3>qg3EU_Z*_NzBmtS?%s2V;ldIH;Hl}H zyfHKeQxAqBW@lU=x9&WaiShh7Tk38RB52>Ya;yv!sLa`&Zn+@GK*BgL$Z2AC3!lbf zqJrYJr4zvN-EnB3|ul^02&SALb%s5E$4n!OBZXjD9A5;82*ceg2$(pLgQzx^uz zgIg)DU6mhYS@ng=W-Xg{PPOF#s7dJ`wwQ5b_K&y)&ms^AT39F2T4#OZ?Ydl3i zE!10#4SoGRi-IjzYz4Q$;5xQZc0+LBIX-5K1=f?w(nq>WOPz-8GtpfN_sW()(avp^ zRCqY@+bRRYwp+Rd*xOO+g?&-qR_W4xhlO3w6j*FYz{$Pc!eWE|ZWt!KdPn-Ptuj^I z!7(3c7IBm#nIW_7@P4U?y(5?=OX5S0HcKu?Ls?c9*AQ+ zmqsY<#r+ntMkvv2KRp_u^h0tuLaB{8$gA0G2aaPGTWB(19Ak$qutSWR%OaKA#iQ4{ z8cwz-rBm-?m~n>fiGU-+GCbDYLu7C_7hnoJv_&}1M;uAzQA#!TKh8xd6X7-C=#D?XGICWI@!t+g?cxg{Wa&6&(TwNoV*#G{UMt)BoL7u`(=J ze(I*Y&#uth-4zGqM{*A(7Rk*$luaVy+de-NZOE4=tvwZ!Xtu7q6iq#QDWz7;%JF3t z+lYeIs&DS4+-VZqSow5}inIo35o5hRBO|SJ@-{h=`&tybtB?5E7u5_yFbA=&DG>T*Y;QsCX(|BLAI-ZQAQViN->Te{8_P`z zl$*j~Pd9W+RGLuGs@BXKYt0PQ;t5172InxiBPcl|oX8hlRssL`-oVPNLK#&I0b0A$ z&LPUHYynjaRiZ2ltSkchZy_WJa`@n$8mjz*RZ{sdB`TrP%IrokW6r<<0<%{kWN!{M zS*vWgC@SgcVM=1#Dl4C}6y(oI}z zj5nb^I2#&T%!YcSAzPX(G~h|Z>!x6Xdq*py#VyxcA0uwP{$i$k$0&FIp_%Q0W_ARd z+0~+%9Y!+^5aYXeGkD!?0E^e(8mp`@u|0a;IHjJ6d##kZ48G2NR=sAK{EaB?H(#4ygo3I;4riDSuM}}c&{6~xl6Y$UH6F$UI zO(`t&bA?JMd+5n4g^I%B^`GY`PH9BaB)-W!+3gQt%}0S9RMW3hV} zE#Ncqy7ZhB-db=b(z!BaSt}X%-~sc7laIh5)c-`kl>RI%T<6I$+Gol7b>sv zU1fKb!o6BZAWTk~gR1wtDrJrW2llPC${?&$*VUp<0ex7jB;t*C2*H8@M)~!+3M~kH z`4hkROugxWaBw0xt9&LvZn`CGc+>5aRAp z?tlQ^aF3FNgb&G783qGzIc$;6m&}^p`(CBaBvysd{Xb(4ym!CyqF5cqH%em2P~p$4 zR4y`z)y=Dv)$n!kA;nz|%7VJE>tm`z>*8fuSY;H6TpOGo^(~65rSq#XeLi_mN!RLm z@wH(>Lm)48HEfjz8oye33_=mbQksJUIVYiWptT3GJ0vk|_ zXD`RQIBuIaJseD0s%NJOX2sV>t3)5PeN8(04FU?8}D?YXi{~8Cny^;=wTCus*zA z85IIQ`ijStrR79|cyiG5t@5OU2aa*BpOt_%n{ z2wQ0~d?z_2RP=wz zWAaA9rojezo(HKH$Q$SOdW8h($Y_TPhf|E8GlV-h5vhV;nCz*!_Su1>0{n{u@$o(c z(A#Iz`sbm46X}KLm1kKmnzLK^JL^sHFDR*FOLEE=@-r`EoLEn}iYI#<vkpVw=foN4%folghz(Q zgvW;qSID7bs&9J%g7de)^A9g5z2Q|kc>wtty3&(Q9Dqf)R0j^UK<%Fxj6;mnrnE`d$eooCpo+gplVOf(v%X>Xxt z$yALbDm6T?Pnf;f=W&&LUB&RaCI^Zgd`p?o7I=_^bf~A)aPU-eIQ9v8y$v6E235YT zBu*U?-ctHxgx?^2GEfcDCky2!I=u)$RI&P5E@iW9lvO9pmxSd2E&wqeSmA-f*;Lb{ zbfKt2hP^xF5PF+SMTan7a`>07wCRv?5?Od&02zC+3FDqZ?cY&)rA`TNtzv5>SP$tK zLIFS#CG}NEG4IW9>hbrZJ zdtb)kw(foz6~2oF*8;K}hW4KHo|2kU*$P4xhfv)-{;C!bs__hLRRyXZeNP#P&=r80 z1xY*qFqFp%sz0oBPpoT&_SzfKUK;?kivD$2@nWjp`aXOCYw4Bup#qGz(Q0%c;;W7p ze4unptZxOe;YNrJ0f-H>`vWBt!g%wCuokw^@XwSP5PG#olp*0;!kIKtVA0sCEcbbL ztv{~B_1V&DFt4{B`vC|3rT6KDV%TDia*=e#JQ@T;c zaj4{9KEfQ_O6@*YlHqe%`6*^|Q=sVwK31ls9Bu{ij0`j!ZDxQo@b@*d(9uBWj|Qp^ zIHpV*a4cNStnj$XTwDSc`ViynwN2@lHV`Z<91myq>DUw>Y#%Taz64Y@Nr06?;h!n1 z;1=Lyft~#6XRx?AJu+zLC%`RaHplH5I`;`A_&I9x8EoefpF(|Iq-Dp^^~*e~BYp5G zs$b#HeIV-$QU_p)%toJmhE=ISWPCG<0})+d*v~kwbmaw}{#@x@W41*Ms4zSjg&yZZ zjKJbtSY~llxCVGCiUXr5oa-zf`zKOc$Ye=4EQJ8a!cv4rYBn}DxL*k`C343h6M}Fu z0#0WaLUVAf;w&k|p~iVmZ@igxgR`(Eb1YDWQW3nAH`2xuxC?PamCxzrLKJalu5vC! zZP(6fa@^vRZH~uX7H?yj&25&Hz=+_LEOQ;+8_Rb?ZXxJY>}>h1CfK^GyrR42j>T{+t&UdIvdK1V7PZsm=D1kKj06>7M%%WAtpEm+|%hM?n#*v2Nb`M={5 z9?cRYd^VIdq9G6`

ZaM`5v9fk%O*coAraQ?i9M^PN@AybxpsA(A&5V-sTjQ=_)o z7+!=yB0kB|SuE$7%5&iFH2H}$d*oO8^1H@+GlM#TPW~0u>l@4*4 z*}i$rR#Z;;9Jdr3v_Rl)K#|XtwC=G^-#pH-Ktw<$F&knW9W61I5r9@RyahBUdpEXz zRU6l}tr0B5=7Pc?9goCh(UZ7UGMD3KNUK3)@j)~zlB^bE2q=;)o29kXNua*Rl@Nj~ zjw0;~WoT-4D{zKD&S}O;OM#pdz)4PE6av_jfLRcLka$ulnKrXk9mkDz9DzDKk<{pD!P%;U>&6PMKm`|la}xfm zG)njqMrs*N{t`Z}TH5d>RwA|Z&6i5|J_}l*VgL|Z*^CKz(wdp0G61iV2K`HUxu&XB zos^b!Qf{bI-KuacC*8FcY612SP4Fy9U13{g6IS3+tg{JqHgyz&qsq!WT*j5TsPZcq zL#ybquV5i1(uZFuS$)>FYURF`jof!b8}*cZO37*00FgEc{07aD2fcw=(?DBJDem-5 zt$=OeUEbb|lr1f~ydBSAPM#%hwP81O?APFj?exOe%B?j!03;yqxva1fv0cRggvdA=VQTS2u#=(W8tC7Xnb$0W_ZECfx-^gP80?U?&Iy?dB2TECPvZA z-@to4g^qp$jgPZAr@md4(LvrO(rmrfgt#I|Q(5bU6= zGfIkhQNyhck#ye~c*o2kxYZ$whJ3FiQp!lyL2T0^!?^1qp7Q<;_gP|y-+_s=6!nABTg(pgr`Y1C=m({Xm>cF_V2h>AKPa7mxmSNs(xpm%IBVB(?16<}0zIoJ z`4Z4mO|ve++^eNW_}gJ>x`a)zqw6m#+0wBv_O!7erJ~F5tX|Y>FDqb*MEd9N%IL1i z>J|2n&EL!sLIwNH+YhsLSA&0D90y|r7W$%_DCw$V4{3h?@eED9swA86PWN1eV>W}{ zxvK2MmTiq%$g+2Tp)TS<3%82uIq*ZaNi6_-K4wyHVKen_P3lu7q@<`D`C**?8on-| zRhqgLUGE>FUPh2bVyLxQN3pni z^cSb7HeR%r-kz$iQVO(aNst`$-c&V)7T>B)!ZP#Ft?EE?rB}-yGEwV3~=PI5T!*FJ#0=76`WZLJ! z!R_V2vBv2qekg{!`C#7iQ&m3uTsO`$BJ&#bYYpmirmLADV6(qC)h=M9H=ODvu)<4o z)W6{TOW|A<$APxe{qw*Q9}5sfpUzY7L>S97mpYa0)pxnTG3Nb1hlQ4TwLVlZU!7z+ ztbNR}HBdh_U#(`LXYkop!1z%l`g|X_gaq17vNXp#C84=@e8Lv`{f&?@y6>Q}9LAaNUx;jQ#k0U^PsSQ>VR8dYS=V)jwl zc@wioW=+l>HHdq*n(xKnXJ73_UVgN$5NDelrA2`gtNe)TBE(PdoehuEIBmrb6XiOp z?EO;urS)+!`$Q+)z1+o@NsrpqPW>v}rSlynZeOyuWPaa@%F@!lX&D1khuY`xV|cE) zf#W*7e4x3aJ_3EUO3f%45&|mzPxLVD|GkHI*z@S(Dm9KqJgDAYlJ!5;)Bg@GcRi@q zA+m4qYBeK2bDYELo5YEVP#7F{gP{Tebi_EbXB;05T+d>VhRhgnDEfbbSOfmwiG@3Z9Q6oUeQ=F>61GLfT6KW9Ii8+a zt3GLt!-Seb6W6J|q3Z5jr;e3pf|$|?di6=IpZ@zgwX=lHpH8GMhIi@AHtn)l8A2zv zYxh%hgBk@D-nT*3t=-uG`=sJ|&XUSfXEpuM0Ks-jkJ_M~QCQvj$1xGN(l?K*b6{PK z|BE`zoQg(w=`?0-VN2eWwGlj{4hK{{_!`@wxSahuv7 z$vxZDaoCagV4LbgZ^mv{C!_D1wySAyd3~^5oewC({sw9vrh|WjP(4boJf(h$!_XU^ zRuO%8iZ<;~55enn`!niE>9-KXz)pBpoh&(+IaGi0SqKq4T0`}uU0iuN^!K0RqIjnM zTcg^`4AGUa2a2=|XGOobsz%WZ4~gydC3~RO<;YNAES^%Uklp4bbvR^J-cdC|pYxJB zok7a|<7M@J$f?}D&}3P(X)n}nPG~NDuvZ-qa&&n`%|$qR#Ve4382^D+RW~$bE)9B3 z9c*>7Zn(FL$Km9ZvU)3epr>3_+ zfCbd!EugE4yl=s*sibe;f<&*T+uv3PYqh+`xy+tJeGjSe^zPegrl}fk+OR|FG?Z~2 zQg=XGw{L>bhxu9kj@lVU&8BzM6G-lS7hF`Q7rm$6YBH|?B^&7F57Zt|VgLF77RpxY zdPL2zHHGX(8v=_nvsEawhf0sAoj}5sN7UCrE#POAww6T?N2u}QDtho^wUhqRQT0v< zM&_VT)E=gjp{J<$6SWW9q_6u#EfHb<{`eUve2$8btAnAS9y<=LkVPLIS4Xj}dhF-w z015gk|4TInHq-nsF)P{j7rs*cVm!0-5TcDRL@x`b5fR3_z$DRTw$RV#)E-iSX0_1g zU#q#M7)u=W`9{5Binru(jTE5|J*_5)`qLNH`6j5J_fcH@zpU0>l(Nihm%ihodbs@kS?5aNNZf0r0GCQuqUWm)LBng)BE-&Lhwxs+@ zXQgwp+sA|CaM*Gf)7D0V^$y2qV@yuVX5L@|jqIRhLgg>(piQ&xWeF`{e$zqwhl%ae z|JF(CgZq-`m|ZIYyA6%k?BKJ)c>H!VS{qVg_UTV&OC(bgi&teOv-9!%8IXna>K z5@vI5FRe4(-BpVQ_pP8uyK22mb(U4MtC!Y~^W*w%S`FK;FYBgtHO1Fk`Utl1b9`QA z{AMOx4+k{p*wCWoHGxL&p#_Or6lT*Vx<64XMHjYS??S&`S_d#By3ikO?c=TYP;Daq z4qBS7b%DO_rKRKhQHt-a9l^tK`l+{83VXwur2SoDC&`|wrHW@PAymH>H^F-Ut2@P8hp z-N6#|MT0f$$0zG2b2KK`#10=h&sAD7!no99B>!bRmJ@%l>RZ`AH~>-VD2Ad7SU-8V z8L-@E@X2)uxP=VYZUnrXxGBt8W}okLR=`(=;IR@{v2m%Gae0|{3N!R*JHy<@RRDk$ zxGAKn5B)wtv&Y(R{m+IJ#pP-7p@-~&+v{Lk)1%*Dg@0R~Hm+yUf9e=Q8_c%A(4;KJ z8|lnzw2erDjaFP*Y21=f9N0#8QbV5B3A9S1zvpSa5=yQQv1*jlTkLTeHz<0irovxn zHdnLDkY^TLQ>Wx>J!VvX_sN@@GTA*fwFRUce!>74K*)&@F_Slr`yUW~ z!-*5T4#MbOkP3De&hQ&eG?O#9mtilOno`6KQr=bEOo#W>B75+oF{~O6+9<_gTwjTC zkrF^ba~lRL*r&G~u)%^wGmH8d9c&4k_XsxBb$Do{RgVMEDx5VQez(MRhC&1Xi6@F? z_HhPKVUPjQ&-^S9Y&nDwmsS}ZT8#K4@Fu_Yz*&w*tl_wsUydc5v0D%Oo?{}gn}j?0 zirp$!eWmWXu3|kYA9sHu^n1ocjl?V~ReQDJQUh~XX`fd+iKTwW$y&ICHEZwNwB?c2 z)|&))#LFC>`B=xuY>Fa43ch}umTO*Nt+NIeaWt+#+sL--rwcUPK(L$coS_YZ0^Ku1 z>&xTm{x>Tvy1NkaWgq=qs67Zfsjf&{VrsG;CfT8#LiSmQb{9LUyJu^A5sRTGmuLsg z5MV#Lv?rlgAG=*!ig+|Vc0S}Hg3$EzQf;;g{(y~cZIlV3=7a~`>P~lfwJcKuRM~4@ z?aRObmomN6LhT_jI84|&=~dcIFbpcHv^z1uE>vlQ;cMx=L@NiXHZ0NhL)#QB)y9Nn zg>fTctIXykENvd+5+gO=hf8|KI^_0dBm}N3YS~Y^m1I ztkqy3)=~s%v!TWo5jwDnP7pYwfou2sy?ql8xPy}u?Ee7 zLTrP!6uqq8piKv*k8RNAgIw7gHQbCrFK&dO-%r19)V>BWPCcUGdXvL+X_M9ymRjti z+BA@K(W7V*Jvs51)~K*!^w5)9ckwtv2iK`%PyVgcQ&@4Q>0yNc6JGJb4+xaV#o0SV&G@Rc@BsxPiIM?DVgYej|RMMI~+BeLqFaEps z4`!+jU!f0qSsO2!B4IPXdq^9t*LvUDpHk2Jr>YmEn{yPBrp7c48qsvCFKF*st9qjl{yyU0R5JZEgX0 zrC8(DRbtBtm6EyB0#{a)2ei{{w;>^eMGaXPxGR`W%oby4=P9#lf{N4MJgDtv;PmRZ zVWzF6-`>`EyzeT;se?s;E(fI)a%{XpA^X|eN&wmp1L z0O#p8ucOqLzL(}5)mDNd@Bc)r#tkRfNKHQ+uG>DBdwG9Sh$pmyK_I%ZfchV z3C-0TkK5wG>j?Cs9v|a4_DKV@?Q%=dwOAkk_RX~Sm}XTa5Y}u<1qD9QmWV@awDJ?} zZWz@wR;S% zsr{FByBJw!%f!G7jWasfdUCtxfTLdcT6;@03N@Y9=5t4LGh-bR#RGLT{=BvohWMrP z+CW&~y)QuWG||!vT8wz0n&^V|M%W5lgaK@oZLKZh5?#Hhb5P{n8Z;ahZGd9lvOW Pd~smCicXmJ+l2oGmJxf} diff --git a/substrate/frame/revive/rpc/src/client.rs b/substrate/frame/revive/rpc/src/client.rs index 901c15e9756b..a0552189f443 100644 --- a/substrate/frame/revive/rpc/src/client.rs +++ b/substrate/frame/revive/rpc/src/client.rs @@ -17,24 +17,25 @@ //! The client connects to the source substrate chain //! and is used by the rpc server to query and send transactions to the substrate chain. use crate::{ + rlp, runtime::GAS_PRICE, subxt_client::{ - revive::{calls::types::EthTransact, events::ContractEmitted}, - runtime_types::pallet_revive::storage::ContractInfo, + revive::calls::types::EthTransact, runtime_types::pallet_revive::storage::ContractInfo, }, - LOG_TARGET, + TransactionLegacySigned, LOG_TARGET, }; +use codec::Encode; use futures::{stream, StreamExt}; -use jsonrpsee::types::{error::CALL_EXECUTION_FAILED_CODE, ErrorObjectOwned}; +use jsonrpsee::types::{ErrorCode, ErrorObjectOwned}; use pallet_revive::{ create1, evm::{ - Block, BlockNumberOrTag, BlockNumberOrTagOrHash, Bytes256, GenericTransaction, Log, - ReceiptInfo, SyncingProgress, SyncingStatus, TransactionSigned, H160, H256, U256, + Block, BlockNumberOrTag, BlockNumberOrTagOrHash, Bytes256, GenericTransaction, ReceiptInfo, + SyncingProgress, SyncingStatus, TransactionSigned, H160, H256, U256, }, - EthTransactError, EthTransactInfo, + EthContractResult, }; -use sp_core::keccak_256; +use sp_runtime::traits::{BlakeTwo256, Hash}; use sp_weights::Weight; use std::{ collections::{HashMap, VecDeque}, @@ -45,7 +46,7 @@ use subxt::{ backend::{ legacy::{rpc_methods::SystemHealth, LegacyRpcMethods}, rpc::{ - reconnecting_rpc_client::{ExponentialBackoff, RpcClient as ReconnectingRpcClient}, + reconnecting_rpc_client::{Client as ReconnectingRpcClient, ExponentialBackoff}, RpcClient, }, }, @@ -98,119 +99,63 @@ struct BlockCache { tx_hashes_by_block_and_index: HashMap>, } -/// Unwrap the original `jsonrpsee::core::client::Error::Call` error. -fn unwrap_call_err(err: &subxt::error::RpcError) -> Option { - use subxt::backend::rpc::reconnecting_rpc_client; +fn unwrap_subxt_err(err: &subxt::Error) -> String { match err { - subxt::error::RpcError::ClientError(err) => { - match err.downcast_ref::() { - Some(reconnecting_rpc_client::Error::RpcError( - jsonrpsee::core::client::Error::Call(err), - )) => Some(err.clone().into_owned()), - _ => None, - } - }, - _ => None, + subxt::Error::Rpc(err) => unwrap_rpc_err(err), + _ => err.to_string(), } } -/// Extract the revert message from a revert("msg") solidity statement. -fn extract_revert_message(exec_data: &[u8]) -> Option { - let error_selector = exec_data.get(0..4)?; - - match error_selector { - // assert(false) - [0x4E, 0x48, 0x7B, 0x71] => { - let panic_code: u32 = U256::from_big_endian(exec_data.get(4..36)?).try_into().ok()?; - - // See https://docs.soliditylang.org/en/latest/control-structures.html#panic-via-assert-and-error-via-require - let msg = match panic_code { - 0x00 => "generic panic", - 0x01 => "assert(false)", - 0x11 => "arithmetic underflow or overflow", - 0x12 => "division or modulo by zero", - 0x21 => "enum overflow", - 0x22 => "invalid encoded storage byte array accessed", - 0x31 => "out-of-bounds array access; popping on an empty array", - 0x32 => "out-of-bounds access of an array or bytesN", - 0x41 => "out of memory", - 0x51 => "uninitialized function", - code => return Some(format!("execution reverted: unknown panic code: {code:#x}")), - }; - - Some(format!("execution reverted: {msg}")) - }, - // revert(string) - [0x08, 0xC3, 0x79, 0xA0] => { - let decoded = ethabi::decode(&[ethabi::ParamType::String], &exec_data[4..]).ok()?; - if let Some(ethabi::Token::String(msg)) = decoded.first() { - return Some(format!("execution reverted: {msg}")) - } - Some("execution reverted".to_string()) - }, - _ => { - log::debug!(target: LOG_TARGET, "Unknown revert function selector: {error_selector:?}"); - Some("execution reverted".to_string()) +fn unwrap_rpc_err(err: &subxt::error::RpcError) -> String { + match err { + subxt::error::RpcError::ClientError(err) => match err + // TODO use the re-export from subxt once available + .downcast_ref::() + { + Some(jsonrpsee::core::ClientError::Call(call_err)) => call_err.message().to_string(), + Some(other_err) => other_err.to_string(), + None => err.to_string(), }, + _ => err.to_string(), } } /// The error type for the client. #[derive(Error, Debug)] pub enum ClientError { - /// A [`jsonrpsee::core::ClientError`] wrapper error. - #[error(transparent)] - Jsonrpsee(#[from] jsonrpsee::core::ClientError), /// A [`subxt::Error`] wrapper error. - #[error(transparent)] + #[error("{}",unwrap_subxt_err(.0))] SubxtError(#[from] subxt::Error), /// A [`RpcError`] wrapper error. - #[error(transparent)] + #[error("{}",unwrap_rpc_err(.0))] RpcError(#[from] RpcError), /// A [`codec::Error`] wrapper error. #[error(transparent)] CodecError(#[from] codec::Error), - /// Contract reverted - #[error("contract reverted")] - Reverted(EthTransactError), + /// The dry run failed. + #[error("Dry run failed")] + DryRunFailed, /// A decimal conversion failed. - #[error("conversion failed")] + #[error("Conversion failed")] ConversionFailed, /// The block hash was not found. - #[error("hash not found")] + #[error("Hash not found")] BlockNotFound, /// The transaction fee could not be found - #[error("transactionFeePaid event not found")] + #[error("TransactionFeePaid event not found")] TxFeeNotFound, /// The cache is empty. - #[error("cache is empty")] + #[error("Cache is empty")] CacheEmpty, } -const REVERT_CODE: i32 = 3; +const GENERIC_ERROR_CODE: ErrorCode = ErrorCode::ServerError(-32000); + +// Convert a `ClientError` to an RPC `ErrorObjectOwned`. impl From for ErrorObjectOwned { - fn from(err: ClientError) -> Self { - match err { - ClientError::SubxtError(subxt::Error::Rpc(err)) | ClientError::RpcError(err) => { - if let Some(err) = unwrap_call_err(&err) { - return err; - } - ErrorObjectOwned::owned::>( - CALL_EXECUTION_FAILED_CODE, - err.to_string(), - None, - ) - }, - ClientError::Reverted(EthTransactError::Data(data)) => { - let msg = extract_revert_message(&data).unwrap_or_default(); - let data = format!("0x{}", hex::encode(data)); - ErrorObjectOwned::owned::(REVERT_CODE, msg, Some(data)) - }, - ClientError::Reverted(EthTransactError::Message(msg)) => - ErrorObjectOwned::owned::(CALL_EXECUTION_FAILED_CODE, msg, None), - _ => - ErrorObjectOwned::owned::(CALL_EXECUTION_FAILED_CODE, err.to_string(), None), - } + fn from(value: ClientError) -> Self { + log::debug!(target: LOG_TARGET, "ClientError: {value:?}"); + ErrorObjectOwned::owned::<()>(GENERIC_ERROR_CODE.code(), value.to_string(), None) } } @@ -263,6 +208,7 @@ struct ClientInner { cache: Shared>, chain_id: u64, max_block_weight: Weight, + native_to_evm_ratio: U256, } impl ClientInner { @@ -278,10 +224,20 @@ impl ClientInner { let rpc = LegacyRpcMethods::::new(RpcClient::new(rpc_client.clone())); - let (chain_id, max_block_weight) = - tokio::try_join!(chain_id(&api), max_block_weight(&api))?; + let (native_to_evm_ratio, chain_id, max_block_weight) = + tokio::try_join!(native_to_evm_ratio(&api), chain_id(&api), max_block_weight(&api))?; + + Ok(Self { api, rpc_client, rpc, cache, chain_id, max_block_weight, native_to_evm_ratio }) + } + + /// Convert a native balance to an EVM balance. + fn native_to_evm_decimals(&self, value: U256) -> U256 { + value.saturating_mul(self.native_to_evm_ratio) + } - Ok(Self { api, rpc_client, rpc, cache, chain_id, max_block_weight }) + /// Convert an evm balance to a native balance. + fn evm_to_native_decimals(&self, value: U256) -> U256 { + value / self.native_to_evm_ratio } /// Get the receipt infos from the extrinsics in a block. @@ -294,74 +250,53 @@ impl ClientInner { // Filter extrinsics from pallet_revive let extrinsics = extrinsics.iter().flat_map(|ext| { + let ext = ext.ok()?; + let call = ext.as_extrinsic::().ok()??; - let transaction_hash = H256(keccak_256(&call.payload)); - let signed_tx = TransactionSigned::decode(&call.payload).ok()?; - let from = signed_tx.recover_eth_address().ok()?; - let tx_info = GenericTransaction::from_signed(signed_tx.clone(), Some(from)); - let contract_address = if tx_info.to.is_none() { - Some(create1(&from, tx_info.nonce.unwrap_or_default().try_into().ok()?)) + let tx = rlp::decode::(&call.payload).ok()?; + let from = tx.recover_eth_address().ok()?; + let contract_address = if tx.transaction_legacy_unsigned.to.is_none() { + Some(create1(&from, tx.transaction_legacy_unsigned.nonce.try_into().ok()?)) } else { None }; - Some((from, signed_tx, tx_info, transaction_hash, contract_address, ext)) + Some((from, tx, contract_address, ext)) }); // Map each extrinsic to a receipt stream::iter(extrinsics) - .map(|(from, signed_tx, tx_info, transaction_hash, contract_address, ext)| async move { + .map(|(from, tx, contract_address, ext)| async move { let events = ext.events().await?; let tx_fees = events.find_first::()?.ok_or(ClientError::TxFeeNotFound)?; - let gas_price = tx_info.gas_price.unwrap_or_default(); + let gas_price = tx.transaction_legacy_unsigned.gas_price; let gas_used = (tx_fees.tip.saturating_add(tx_fees.actual_fee)) .checked_div(gas_price.as_u128()) .unwrap_or_default(); let success = events.has::()?; let transaction_index = ext.index(); + let transaction_hash = BlakeTwo256::hash(&Vec::from(ext.bytes()).encode()); let block_hash = block.hash(); let block_number = block.number().into(); - // get logs from ContractEmitted event - let logs = events.iter() - .filter_map(|event_details| { - let event_details = event_details.ok()?; - let event = event_details.as_event::().ok()??; - - Some(Log { - address: event.contract, - topics: event.topics, - data: Some(event.data.into()), - block_number: Some(block_number), - transaction_hash, - transaction_index: Some(transaction_index.into()), - block_hash: Some(block_hash), - log_index: Some(event_details.index().into()), - ..Default::default() - }) - }).collect(); - - - log::debug!(target: LOG_TARGET, "Adding receipt for tx hash: {transaction_hash:?} - block: {block_number:?}"); - let receipt = ReceiptInfo::new( + let receipt = ReceiptInfo { block_hash, block_number, contract_address, from, - logs, - tx_info.to, - gas_price, - gas_used.into(), - success, + to: tx.transaction_legacy_unsigned.to, + effective_gas_price: gas_price, + gas_used: gas_used.into(), + status: Some(if success { U256::one() } else { U256::zero() }), transaction_hash, - transaction_index.into(), - tx_info.r#type.unwrap_or_default() - ); + transaction_index: transaction_index.into(), + ..Default::default() + }; - Ok::<_, ClientError>((receipt.transaction_hash, (signed_tx, receipt))) + Ok::<_, ClientError>((receipt.transaction_hash, (tx.into(), receipt))) }) .buffer_unordered(10) .collect::>>() @@ -385,6 +320,13 @@ async fn max_block_weight(api: &OnlineClient) -> Result) -> Result { + let query = subxt_client::constants().revive().native_to_eth_ratio(); + let ratio = api.constants().at(&query)?; + Ok(U256::from(ratio)) +} + /// Extract the block timestamp. async fn extract_block_timestamp(block: &SubstrateBlock) -> Option { let extrinsics = block.extrinsics().await.ok()?; @@ -409,6 +351,7 @@ impl Client { let (tx, mut updates) = tokio::sync::watch::channel(()); spawn_handle.spawn("subscribe-blocks", None, Self::subscribe_blocks(inner.clone(), tx)); + spawn_handle.spawn("subscribe-reconnect", None, Self::subscribe_reconnect(inner.clone())); updates.changed().await.expect("tx is not dropped"); Ok(Self { inner, updates }) @@ -466,6 +409,18 @@ impl Client { } } + /// Subscribe and log reconnection events. + async fn subscribe_reconnect(inner: Arc) { + let rpc = inner.as_ref().rpc_client.clone(); + loop { + let reconnected = rpc.reconnect_initiated().await; + log::info!(target: LOG_TARGET, "RPC client connection lost"); + let now = std::time::Instant::now(); + reconnected.await; + log::info!(target: LOG_TARGET, "RPC client reconnection took `{}s`", now.elapsed().as_secs()); + } + } + /// Subscribe to new blocks and update the cache. async fn subscribe_blocks(inner: Arc, tx: Sender<()>) { log::info!(target: LOG_TARGET, "Subscribing to new blocks"); @@ -473,7 +428,7 @@ impl Client { Ok(s) => s, Err(err) => { log::error!(target: LOG_TARGET, "Failed to subscribe to blocks: {err:?}"); - return; + return }, }; @@ -490,7 +445,7 @@ impl Client { } log::error!(target: LOG_TARGET, "Failed to fetch block: {err:?}"); - return; + return }, }; @@ -506,6 +461,7 @@ impl Client { .unwrap_or_default(); if !receipts.is_empty() { + log::debug!(target: LOG_TARGET, "Adding {} receipts", receipts.len()); let values = receipts .iter() .map(|(hash, (_, receipt))| (receipt.transaction_index, *hash)) @@ -617,9 +573,8 @@ impl Client { let runtime_api = self.runtime_api(at).await?; let payload = subxt_client::apis().revive_api().balance(address); - let balance = runtime_api.call(payload).await?; - - Ok(*balance) + let balance = runtime_api.call(payload).await?.into(); + Ok(self.inner.native_to_evm_decimals(balance)) } /// Get the contract storage for the given contract address and key. @@ -662,23 +617,44 @@ impl Client { Ok(result) } - /// Dry run a transaction and returns the [`EthTransactInfo`] for the transaction. + /// Dry run a transaction and returns the [`EthContractResult`] for the transaction. pub async fn dry_run( &self, - tx: GenericTransaction, + tx: &GenericTransaction, block: BlockNumberOrTagOrHash, - ) -> Result, ClientError> { + ) -> Result, ClientError> { let runtime_api = self.runtime_api(&block).await?; - let payload = subxt_client::apis().revive_api().eth_transact(tx.into()); - let result = runtime_api.call(payload).await?; - match result { - Err(err) => { - log::debug!(target: LOG_TARGET, "Dry run failed {err:?}"); - Err(ClientError::Reverted(err.0)) - }, - Ok(result) => Ok(result.0), - } + let value = self + .inner + .evm_to_native_decimals(tx.value.unwrap_or_default()) + .try_into() + .map_err(|_| ClientError::ConversionFailed)?; + + // TODO: remove once subxt is updated + let from = tx.from.map(|v| v.0.into()); + let to = tx.to.map(|v| v.0.into()); + + let payload = subxt_client::apis().revive_api().eth_transact( + from.unwrap_or_default(), + to, + value, + tx.input.clone().unwrap_or_default().0, + None, + None, + ); + let res = runtime_api.call(payload).await?.0; + Ok(res) + } + + /// Dry run a transaction and returns the gas estimate for the transaction. + pub async fn estimate_gas( + &self, + tx: &GenericTransaction, + block: BlockNumberOrTagOrHash, + ) -> Result { + let dry_run = self.dry_run(tx, block).await?; + Ok(U256::from(dry_run.fee / GAS_PRICE as u128) + GAS_PRICE) } /// Get the nonce of the given address. diff --git a/substrate/frame/revive/rpc/src/example.rs b/substrate/frame/revive/rpc/src/example.rs index 3b9a33296ef4..cdf5ce9d1b98 100644 --- a/substrate/frame/revive/rpc/src/example.rs +++ b/substrate/frame/revive/rpc/src/example.rs @@ -20,7 +20,8 @@ use crate::{EthRpcClient, ReceiptInfo}; use anyhow::Context; use pallet_revive::evm::{ - Account, BlockTag, Bytes, GenericTransaction, TransactionLegacyUnsigned, H160, H256, U256, + rlp::*, Account, BlockTag, Bytes, GenericTransaction, TransactionLegacyUnsigned, H160, H256, + U256, }; /// Wait for a transaction receipt. @@ -39,152 +40,57 @@ pub async fn wait_for_receipt( anyhow::bail!("Failed to get receipt") } -/// Wait for a successful transaction receipt. -pub async fn wait_for_successful_receipt( +/// Send a transaction. +pub async fn send_transaction( + signer: &Account, client: &(impl EthRpcClient + Send + Sync), - hash: H256, -) -> anyhow::Result { - let receipt = wait_for_receipt(client, hash).await?; - if receipt.is_success() { - Ok(receipt) - } else { - anyhow::bail!("Transaction failed") - } -} - -/// Transaction builder. -pub struct TransactionBuilder { - signer: Account, value: U256, input: Bytes, to: Option, - mutate: Box, -} - -impl Default for TransactionBuilder { - fn default() -> Self { - Self { - signer: Account::default(), - value: U256::zero(), - input: Bytes::default(), - to: None, - mutate: Box::new(|_| {}), - } - } -} - -impl TransactionBuilder { - /// Set the signer. - pub fn signer(mut self, signer: Account) -> Self { - self.signer = signer; - self - } - - /// Set the value. - pub fn value(mut self, value: U256) -> Self { - self.value = value; - self - } - - /// Set the input. - pub fn input(mut self, input: Vec) -> Self { - self.input = Bytes(input); - self - } - - /// Set the destination. - pub fn to(mut self, to: H160) -> Self { - self.to = Some(to); - self - } - - /// Set a mutation function, that mutates the transaction before sending. - pub fn mutate(mut self, mutate: impl FnOnce(&mut TransactionLegacyUnsigned) + 'static) -> Self { - self.mutate = Box::new(mutate); - self - } - - /// Call eth_call to get the result of a view function - pub async fn eth_call( - self, - client: &(impl EthRpcClient + Send + Sync), - ) -> anyhow::Result> { - let TransactionBuilder { signer, value, input, to, .. } = self; - - let from = signer.address(); - let result = client - .call( - GenericTransaction { - from: Some(from), - input: Some(input.clone()), - value: Some(value), - to, - ..Default::default() - }, - None, - ) - .await - .with_context(|| "eth_call failed")?; - Ok(result.0) - } - - /// Send the transaction. - pub async fn send(self, client: &(impl EthRpcClient + Send + Sync)) -> anyhow::Result { - let TransactionBuilder { signer, value, input, to, mutate } = self; - - let from = signer.address(); - let chain_id = Some(client.chain_id().await?); - let gas_price = client.gas_price().await?; - let nonce = client - .get_transaction_count(from, BlockTag::Latest.into()) - .await - .with_context(|| "Failed to fetch account nonce")?; - - let gas = client - .estimate_gas( - GenericTransaction { - from: Some(from), - input: Some(input.clone()), - value: Some(value), - gas_price: Some(gas_price), - to, - ..Default::default() - }, - None, - ) - .await - .with_context(|| "Failed to fetch gas estimate")?; - - let mut unsigned_tx = TransactionLegacyUnsigned { - gas, - nonce, - to, - value, - input, - gas_price, - chain_id, - ..Default::default() - }; - - mutate(&mut unsigned_tx); - - let tx = signer.sign_transaction(unsigned_tx.into()); - let bytes = tx.signed_payload(); - - let hash = client - .send_raw_transaction(bytes.into()) - .await - .with_context(|| "transaction failed")?; - - Ok(hash) - } - - /// Send the transaction and wait for the receipt. - pub async fn send_and_wait_for_receipt( - self, - client: &(impl EthRpcClient + Send + Sync), - ) -> anyhow::Result { - let hash = self.send(client).await?; - wait_for_successful_receipt(client, hash).await - } +) -> anyhow::Result { + let from = signer.address(); + + let chain_id = Some(client.chain_id().await?); + + let gas_price = client.gas_price().await?; + let nonce = client + .get_transaction_count(from, BlockTag::Latest.into()) + .await + .with_context(|| "Failed to fetch account nonce")?; + + let gas = client + .estimate_gas( + GenericTransaction { + from: Some(from), + input: Some(input.clone()), + value: Some(value), + gas_price: Some(gas_price), + to, + ..Default::default() + }, + None, + ) + .await + .with_context(|| "Failed to fetch gas estimate")?; + + let unsigned_tx = TransactionLegacyUnsigned { + gas, + nonce, + to, + value, + input, + gas_price, + chain_id, + ..Default::default() + }; + + let tx = signer.sign_transaction(unsigned_tx.clone()); + let bytes = tx.rlp_bytes().to_vec(); + + let hash = client + .send_raw_transaction(bytes.clone().into()) + .await + .with_context(|| "transaction failed")?; + + Ok(hash) } diff --git a/substrate/frame/revive/rpc/src/lib.rs b/substrate/frame/revive/rpc/src/lib.rs index ccd8bb043e90..88a3cb641784 100644 --- a/substrate/frame/revive/rpc/src/lib.rs +++ b/substrate/frame/revive/rpc/src/lib.rs @@ -23,8 +23,8 @@ use jsonrpsee::{ core::{async_trait, RpcResult}, types::{ErrorCode, ErrorObjectOwned}, }; -use pallet_revive::evm::*; -use sp_core::{keccak_256, H160, H256, U256}; +use pallet_revive::{evm::*, EthContractResult}; +use sp_core::{H160, H256, U256}; use thiserror::Error; pub mod cli; @@ -91,13 +91,13 @@ pub enum EthRpcError { TransactionTypeNotSupported(Byte), } -// TODO use https://eips.ethereum.org/EIPS/eip-1474#error-codes impl From for ErrorObjectOwned { fn from(value: EthRpcError) -> Self { - match value { - EthRpcError::ClientError(err) => Self::from(err), - _ => Self::owned::(ErrorCode::InvalidRequest.code(), value.to_string(), None), - } + let code = match value { + EthRpcError::ClientError(_) => ErrorCode::InternalError, + _ => ErrorCode::InvalidRequest, + }; + Self::owned::(code.code(), value.to_string(), None) } } @@ -121,35 +121,20 @@ impl EthRpcServer for EthRpcServerImpl { transaction_hash: H256, ) -> RpcResult> { let receipt = self.client.receipt(&transaction_hash).await; - log::debug!(target: LOG_TARGET, "transaction_receipt for {transaction_hash:?}: {}", receipt.is_some()); Ok(receipt) } async fn estimate_gas( &self, transaction: GenericTransaction, - block: Option, + _block: Option, ) -> RpcResult { - let dry_run = self.client.dry_run(transaction, block.unwrap_or_default().into()).await?; - Ok(dry_run.eth_gas) - } - - async fn call( - &self, - transaction: GenericTransaction, - block: Option, - ) -> RpcResult { - let dry_run = self - .client - .dry_run(transaction, block.unwrap_or_else(|| BlockTag::Latest.into())) - .await?; - Ok(dry_run.data.into()) + let result = self.client.estimate_gas(&transaction, BlockTag::Latest.into()).await?; + Ok(result) } async fn send_raw_transaction(&self, transaction: Bytes) -> RpcResult { - let hash = H256(keccak_256(&transaction.0)); - - let tx = TransactionSigned::decode(&transaction.0).map_err(|err| { + let tx = rlp::decode::(&transaction.0).map_err(|err| { log::debug!(target: LOG_TARGET, "Failed to decode transaction: {err:?}"); EthRpcError::from(err) })?; @@ -159,28 +144,37 @@ impl EthRpcServer for EthRpcServerImpl { EthRpcError::InvalidSignature })?; - let tx = GenericTransaction::from_signed(tx, Some(eth_addr)); - // Dry run the transaction to get the weight limit and storage deposit limit - let dry_run = self.client.dry_run(tx, BlockTag::Latest.into()).await?; + let TransactionLegacyUnsigned { to, input, value, .. } = tx.transaction_legacy_unsigned; + let dry_run = self + .client + .dry_run( + &GenericTransaction { + from: Some(eth_addr), + input: Some(input.clone()), + to, + value: Some(value), + ..Default::default() + }, + BlockTag::Latest.into(), + ) + .await?; + let EthContractResult { gas_required, storage_deposit, .. } = dry_run; let call = subxt_client::tx().revive().eth_transact( transaction.0, - dry_run.gas_required.into(), - dry_run.storage_deposit, + gas_required.into(), + storage_deposit, ); - self.client.submit(call).await.map_err(|err| { - log::debug!(target: LOG_TARGET, "submit call failed: {err:?}"); - err - })?; - log::debug!(target: LOG_TARGET, "send_raw_transaction hash: {hash:?}"); + let hash = self.client.submit(call).await?; Ok(hash) } - async fn send_transaction(&self, mut transaction: GenericTransaction) -> RpcResult { + async fn send_transaction(&self, transaction: GenericTransaction) -> RpcResult { log::debug!(target: LOG_TARGET, "{transaction:#?}"); + let GenericTransaction { from, gas, gas_price, input, to, value, r#type, .. } = transaction; - let Some(from) = transaction.from else { + let Some(from) = from else { log::debug!(target: LOG_TARGET, "Transaction must have a sender"); return Err(EthRpcError::InvalidTransaction.into()); }; @@ -191,26 +185,27 @@ impl EthRpcServer for EthRpcServerImpl { .find(|account| account.address() == from) .ok_or(EthRpcError::AccountNotFound(from))?; - if transaction.gas.is_none() { - transaction.gas = Some(self.estimate_gas(transaction.clone(), None).await?); - } + let gas_price = gas_price.unwrap_or_else(|| U256::from(GAS_PRICE)); + let chain_id = Some(self.client.chain_id().into()); + let input = input.unwrap_or_default(); + let value = value.unwrap_or_default(); + let r#type = r#type.unwrap_or_default(); - if transaction.gas_price.is_none() { - transaction.gas_price = Some(self.gas_price().await?); - } + let Some(gas) = gas else { + log::debug!(target: LOG_TARGET, "Transaction must have a gas limit"); + return Err(EthRpcError::InvalidTransaction.into()); + }; - if transaction.nonce.is_none() { - transaction.nonce = - Some(self.get_transaction_count(from, BlockTag::Latest.into()).await?); - } + let r#type = Type0::try_from_byte(r#type.clone()) + .map_err(|_| EthRpcError::TransactionTypeNotSupported(r#type))?; - if transaction.chain_id.is_none() { - transaction.chain_id = Some(self.chain_id().await?); - } + let nonce = self.get_transaction_count(from, BlockTag::Latest.into()).await?; - let tx = transaction.try_into_unsigned().map_err(|_| EthRpcError::InvalidTransaction)?; - let payload = account.sign_transaction(tx).signed_payload(); - self.send_raw_transaction(Bytes(payload)).await + let tx = + TransactionLegacyUnsigned { chain_id, gas, gas_price, input, nonce, to, value, r#type }; + let tx = account.sign_transaction(tx); + let rlp_bytes = rlp::encode(&tx).to_vec(); + self.send_raw_transaction(Bytes(rlp_bytes)).await } async fn get_block_by_hash( @@ -248,6 +243,23 @@ impl EthRpcServer for EthRpcServerImpl { Ok(self.accounts.iter().map(|account| account.address()).collect()) } + async fn call( + &self, + transaction: GenericTransaction, + block: Option, + ) -> RpcResult { + let dry_run = self + .client + .dry_run(&transaction, block.unwrap_or_else(|| BlockTag::Latest.into())) + .await?; + let output = dry_run.result.map_err(|err| { + log::debug!(target: LOG_TARGET, "Dry run failed: {err:?}"); + ClientError::DryRunFailed + })?; + + Ok(output.into()) + } + async fn get_block_by_number( &self, block: BlockNumberOrTag, diff --git a/substrate/frame/revive/rpc/src/rpc_methods_gen.rs b/substrate/frame/revive/rpc/src/rpc_methods_gen.rs index ad34dbfdfb49..339080368969 100644 --- a/substrate/frame/revive/rpc/src/rpc_methods_gen.rs +++ b/substrate/frame/revive/rpc/src/rpc_methods_gen.rs @@ -14,7 +14,6 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. - //! Generated JSON-RPC methods. #![allow(missing_docs)] diff --git a/substrate/frame/revive/rpc/src/subxt_client.rs b/substrate/frame/revive/rpc/src/subxt_client.rs index 1e1c395028a4..cb2737beae70 100644 --- a/substrate/frame/revive/rpc/src/subxt_client.rs +++ b/substrate/frame/revive/rpc/src/subxt_client.rs @@ -21,26 +21,9 @@ use subxt::config::{signed_extensions, Config, PolkadotConfig}; #[subxt::subxt( runtime_metadata_path = "revive_chain.metadata", - // TODO remove once subxt use the same U256 type substitute_type( - path = "primitive_types::U256", - with = "::subxt::utils::Static<::sp_core::U256>" - ), - substitute_type( - path = "pallet_revive::evm::api::rpc_types_gen::GenericTransaction", - with = "::subxt::utils::Static<::pallet_revive::evm::GenericTransaction>" - ), - substitute_type( - path = "pallet_revive::primitives::EthTransactInfo", - with = "::subxt::utils::Static<::pallet_revive::EthTransactInfo>" - ), - substitute_type( - path = "pallet_revive::primitives::EthTransactError", - with = "::subxt::utils::Static<::pallet_revive::EthTransactError>" - ), - substitute_type( - path = "pallet_revive::primitives::ExecReturnValue", - with = "::subxt::utils::Static<::pallet_revive::ExecReturnValue>" + path = "pallet_revive::primitives::EthContractResult", + with = "::subxt::utils::Static<::pallet_revive::EthContractResult>" ), substitute_type( path = "sp_weights::weight_v2::Weight", diff --git a/substrate/frame/revive/rpc/src/tests.rs b/substrate/frame/revive/rpc/src/tests.rs index e64e16d45b2a..01fcb6ae3bd2 100644 --- a/substrate/frame/revive/rpc/src/tests.rs +++ b/substrate/frame/revive/rpc/src/tests.rs @@ -18,23 +18,21 @@ use crate::{ cli::{self, CliCommand}, - example::{wait_for_successful_receipt, TransactionBuilder}, + example::{send_transaction, wait_for_receipt}, EthRpcClient, }; use clap::Parser; -use ethabi::Token; use jsonrpsee::ws_client::{WsClient, WsClientBuilder}; use pallet_revive::{ create1, - evm::{Account, BlockTag, U256}, + evm::{Account, BlockTag, Bytes, U256}, }; -use static_init::dynamic; use std::thread; use substrate_cli_test_utils::*; -/// Create a websocket client with a 120s timeout. +/// Create a websocket client with a 30s timeout. async fn ws_client_with_retry(url: &str) -> WsClient { - let timeout = tokio::time::Duration::from_secs(120); + let timeout = tokio::time::Duration::from_secs(30); tokio::time::timeout(timeout, async { loop { if let Ok(client) = WsClientBuilder::default().build(url).await { @@ -48,126 +46,56 @@ async fn ws_client_with_retry(url: &str) -> WsClient { .expect("Hit timeout") } -fn get_contract(name: &str) -> anyhow::Result<(Vec, ethabi::Contract)> { - let pvm_dir: std::path::PathBuf = "./examples/js/pvm".into(); - let abi_dir: std::path::PathBuf = "./examples/js/abi".into(); - let bytecode = std::fs::read(pvm_dir.join(format!("{}.polkavm", name)))?; - - let abi = std::fs::read(abi_dir.join(format!("{}.json", name)))?; - let contract = ethabi::Contract::load(abi.as_slice())?; - - Ok((bytecode, contract)) -} - -struct SharedResources { - _node_handle: std::thread::JoinHandle<()>, - _rpc_handle: std::thread::JoinHandle<()>, -} - -impl SharedResources { - fn start() -> Self { - // Start the node. - let _node_handle = thread::spawn(move || { - if let Err(e) = start_node_inline(vec![ - "--dev", - "--rpc-port=45789", - "--no-telemetry", - "--no-prometheus", - "-lerror,evm=debug,sc_rpc_server=info,runtime::revive=trace", - ]) { - panic!("Node exited with error: {e:?}"); - } - }); - - // Start the rpc server. - let args = CliCommand::parse_from([ +#[tokio::test] +async fn test_jsonrpsee_server() -> anyhow::Result<()> { + // Start the node. + let _ = thread::spawn(move || { + if let Err(e) = start_node_inline(vec![ "--dev", - "--rpc-port=45788", - "--node-rpc-url=ws://localhost:45789", + "--rpc-port=45789", + "--no-telemetry", "--no-prometheus", - "-linfo,eth-rpc=debug", - ]); - - let _rpc_handle = thread::spawn(move || { - if let Err(e) = cli::run(args) { - panic!("eth-rpc exited with error: {e:?}"); - } - }); - - Self { _node_handle, _rpc_handle } - } - - async fn client() -> WsClient { - ws_client_with_retry("ws://localhost:45788").await - } -} - -#[dynamic(lazy)] -static mut SHARED_RESOURCES: SharedResources = SharedResources::start(); - -macro_rules! unwrap_call_err( - ($err:expr) => { - match $err.downcast_ref::().unwrap() { - jsonrpsee::core::client::Error::Call(call) => call, - _ => panic!("Expected Call error"), + "-lerror,evm=debug,sc_rpc_server=info,runtime::revive=debug", + ]) { + panic!("Node exited with error: {e:?}"); } - } -); - -#[tokio::test] -async fn transfer() -> anyhow::Result<()> { - let _lock = SHARED_RESOURCES.write(); - let client = SharedResources::client().await; - - let ethan = Account::from(subxt_signer::eth::dev::ethan()); - let initial_balance = client.get_balance(ethan.address(), BlockTag::Latest.into()).await?; - - let value = 1_000_000_000_000_000_000_000u128.into(); - let hash = TransactionBuilder::default() - .value(value) - .to(ethan.address()) - .send(&client) - .await?; - - let receipt = wait_for_successful_receipt(&client, hash).await?; - assert_eq!( - Some(ethan.address()), - receipt.to, - "Receipt should have the correct contract address." - ); - - let increase = - client.get_balance(ethan.address(), BlockTag::Latest.into()).await? - initial_balance; - assert_eq!(value, increase); - Ok(()) -} + }); + + // Start the rpc server. + let args = CliCommand::parse_from([ + "--dev", + "--rpc-port=45788", + "--node-rpc-url=ws://localhost:45789", + "--no-prometheus", + "-linfo,eth-rpc=debug", + ]); + let _ = thread::spawn(move || { + if let Err(e) = cli::run(args) { + panic!("eth-rpc exited with error: {e:?}"); + } + }); -#[tokio::test] -async fn deploy_and_call() -> anyhow::Result<()> { - let _lock = SHARED_RESOURCES.write(); - let client = SharedResources::client().await; + let client = ws_client_with_retry("ws://localhost:45788").await; let account = Account::default(); // Balance transfer let ethan = Account::from(subxt_signer::eth::dev::ethan()); - let initial_balance = client.get_balance(ethan.address(), BlockTag::Latest.into()).await?; + let ethan_balance = client.get_balance(ethan.address(), BlockTag::Latest.into()).await?; + assert_eq!(U256::zero(), ethan_balance); let value = 1_000_000_000_000_000_000_000u128.into(); - let hash = TransactionBuilder::default() - .value(value) - .to(ethan.address()) - .send(&client) - .await?; + let hash = + send_transaction(&account, &client, value, Bytes::default(), Some(ethan.address())).await?; - let receipt = wait_for_successful_receipt(&client, hash).await?; + let receipt = wait_for_receipt(&client, hash).await?; assert_eq!( Some(ethan.address()), receipt.to, "Receipt should have the correct contract address." ); - let updated_balance = client.get_balance(ethan.address(), BlockTag::Latest.into()).await?; - assert_eq!(value, updated_balance - initial_balance); + let ethan_balance = client.get_balance(ethan.address(), BlockTag::Latest.into()).await?; + assert_eq!(value, ethan_balance, "ethan's balance should be the same as the value sent."); // Deploy contract let data = b"hello world".to_vec(); @@ -175,8 +103,8 @@ async fn deploy_and_call() -> anyhow::Result<()> { let (bytes, _) = pallet_revive_fixtures::compile_module("dummy")?; let input = bytes.into_iter().chain(data.clone()).collect::>(); let nonce = client.get_transaction_count(account.address(), BlockTag::Latest.into()).await?; - let hash = TransactionBuilder::default().value(value).input(input).send(&client).await?; - let receipt = wait_for_successful_receipt(&client, hash).await?; + let hash = send_transaction(&account, &client, value, input.into(), None).await?; + let receipt = wait_for_receipt(&client, hash).await?; let contract_address = create1(&account.address(), nonce.try_into().unwrap()); assert_eq!( Some(contract_address), @@ -188,135 +116,15 @@ async fn deploy_and_call() -> anyhow::Result<()> { assert_eq!(value, balance, "Contract balance should be the same as the value sent."); // Call contract - let hash = TransactionBuilder::default() - .value(value) - .to(contract_address) - .send(&client) - .await?; - let receipt = wait_for_successful_receipt(&client, hash).await?; - + let hash = + send_transaction(&account, &client, U256::zero(), Bytes::default(), Some(contract_address)) + .await?; + let receipt = wait_for_receipt(&client, hash).await?; assert_eq!( Some(contract_address), receipt.to, "Receipt should have the correct contract address." ); - let increase = client.get_balance(contract_address, BlockTag::Latest.into()).await? - balance; - assert_eq!(value, increase, "contract's balance should have increased by the value sent."); - - // Balance transfer to contract - let balance = client.get_balance(contract_address, BlockTag::Latest.into()).await?; - let hash = TransactionBuilder::default() - .value(value) - .to(contract_address) - .send(&client) - .await?; - - wait_for_successful_receipt(&client, hash).await?; - let increase = client.get_balance(contract_address, BlockTag::Latest.into()).await? - balance; - assert_eq!(value, increase, "contract's balance should have increased by the value sent."); - Ok(()) -} - -#[tokio::test] -async fn revert_call() -> anyhow::Result<()> { - let _lock = SHARED_RESOURCES.write(); - let client = SharedResources::client().await; - let (bytecode, contract) = get_contract("Errors")?; - let receipt = TransactionBuilder::default() - .input(bytecode) - .send_and_wait_for_receipt(&client) - .await?; - - let err = TransactionBuilder::default() - .to(receipt.contract_address.unwrap()) - .input(contract.function("triggerRequireError")?.encode_input(&[])?.to_vec()) - .send(&client) - .await - .unwrap_err(); - - let call_err = unwrap_call_err!(err.source().unwrap()); - assert_eq!(call_err.message(), "execution reverted: This is a require error"); - assert_eq!(call_err.code(), 3); - Ok(()) -} - -#[tokio::test] -async fn event_logs() -> anyhow::Result<()> { - let _lock = SHARED_RESOURCES.write(); - let client = SharedResources::client().await; - let (bytecode, contract) = get_contract("EventExample")?; - let receipt = TransactionBuilder::default() - .input(bytecode) - .send_and_wait_for_receipt(&client) - .await?; - - let receipt = TransactionBuilder::default() - .to(receipt.contract_address.unwrap()) - .input(contract.function("triggerEvent")?.encode_input(&[])?.to_vec()) - .send_and_wait_for_receipt(&client) - .await?; - assert_eq!(receipt.logs.len(), 1, "There should be one log."); - Ok(()) -} - -#[tokio::test] -async fn invalid_transaction() -> anyhow::Result<()> { - let _lock = SHARED_RESOURCES.write(); - let client = SharedResources::client().await; - let ethan = Account::from(subxt_signer::eth::dev::ethan()); - - let err = TransactionBuilder::default() - .value(U256::from(1_000_000_000_000u128)) - .to(ethan.address()) - .mutate(|tx| tx.chain_id = Some(42u32.into())) - .send(&client) - .await - .unwrap_err(); - - let call_err = unwrap_call_err!(err.source().unwrap()); - assert_eq!(call_err.message(), "Invalid Transaction"); - - Ok(()) -} - -#[tokio::test] -async fn native_evm_ratio_works() -> anyhow::Result<()> { - let _lock = SHARED_RESOURCES.write(); - let client = SharedResources::client().await; - let (bytecode, contract) = get_contract("PiggyBank")?; - let contract_address = TransactionBuilder::default() - .input(bytecode) - .send_and_wait_for_receipt(&client) - .await? - .contract_address - .unwrap(); - - let value = 10_000_000_000_000_000_000u128; // 10 eth - TransactionBuilder::default() - .to(contract_address) - .input(contract.function("deposit")?.encode_input(&[])?.to_vec()) - .value(value.into()) - .send_and_wait_for_receipt(&client) - .await?; - - let contract_value = client.get_balance(contract_address, BlockTag::Latest.into()).await?; - assert_eq!(contract_value, value.into()); - - let withdraw_value = 1_000_000_000_000_000_000u128; // 1 eth - TransactionBuilder::default() - .to(contract_address) - .input( - contract - .function("withdraw")? - .encode_input(&[Token::Uint(withdraw_value.into())])? - .to_vec(), - ) - .send_and_wait_for_receipt(&client) - .await?; - - let contract_value = client.get_balance(contract_address, BlockTag::Latest.into()).await?; - assert_eq!(contract_value, (value - withdraw_value).into()); - Ok(()) } diff --git a/substrate/frame/revive/src/benchmarking/call_builder.rs b/substrate/frame/revive/src/benchmarking/call_builder.rs index 1177d47aadc3..c666383abb2f 100644 --- a/substrate/frame/revive/src/benchmarking/call_builder.rs +++ b/substrate/frame/revive/src/benchmarking/call_builder.rs @@ -21,7 +21,7 @@ use crate::{ exec::{ExportedFunction, Ext, Key, Stack}, storage::meter::Meter, transient_storage::MeterEntry, - wasm::{PreparedCall, Runtime}, + wasm::{ApiVersion, PreparedCall, Runtime}, BalanceOf, Config, DebugBuffer, Error, GasMeter, MomentOf, Origin, WasmBlob, Weight, }; use alloc::{vec, vec::Vec}; @@ -45,7 +45,7 @@ pub struct CallSetup { impl Default for CallSetup where - T: Config, + T: Config + pallet_balances::Config, BalanceOf: Into + TryFrom, MomentOf: Into, T::Hash: frame_support::traits::IsType, @@ -57,7 +57,7 @@ where impl CallSetup where - T: Config, + T: Config + pallet_balances::Config, BalanceOf: Into + TryFrom, MomentOf: Into, T::Hash: frame_support::traits::IsType, @@ -164,7 +164,13 @@ where module: WasmBlob, input: Vec, ) -> PreparedCall<'a, StackExt<'a, T>> { - module.prepare_call(Runtime::new(ext, input), ExportedFunction::Call).unwrap() + module + .prepare_call( + Runtime::new(ext, input), + ExportedFunction::Call, + ApiVersion::UnsafeNewest, + ) + .unwrap() } /// Add transient_storage diff --git a/substrate/frame/revive/src/benchmarking/mod.rs b/substrate/frame/revive/src/benchmarking/mod.rs index e67c39ec0899..593c16cbb2d8 100644 --- a/substrate/frame/revive/src/benchmarking/mod.rs +++ b/substrate/frame/revive/src/benchmarking/mod.rs @@ -23,7 +23,6 @@ mod call_builder; mod code; use self::{call_builder::CallSetup, code::WasmModule}; use crate::{ - evm::runtime::GAS_PRICE, exec::{Key, MomentOf}, limits, storage::WriteOutcome, @@ -35,10 +34,11 @@ use frame_benchmarking::v2::*; use frame_support::{ self, assert_ok, storage::child, - traits::fungible::InspectHold, + traits::{fungible::InspectHold, Currency}, weights::{Weight, WeightMeter}, }; use frame_system::RawOrigin; +use pallet_balances; use pallet_revive_uapi::{CallFlags, ReturnErrorCode, StorageFlags}; use sp_runtime::traits::{Bounded, Hash}; @@ -68,7 +68,7 @@ struct Contract { impl Contract where - T: Config, + T: Config + pallet_balances::Config, BalanceOf: Into + TryFrom, MomentOf: Into, T::Hash: frame_support::traits::IsType, @@ -103,7 +103,7 @@ where origin, 0u32.into(), Weight::MAX, - DepositLimit::Balance(default_deposit_limit::()), + default_deposit_limit::(), Code::Upload(module.code), data, salt, @@ -220,10 +220,11 @@ fn default_deposit_limit() -> BalanceOf { #[benchmarks( where BalanceOf: Into + TryFrom, - T: Config, + T: Config + pallet_balances::Config, MomentOf: Into, ::RuntimeEvent: From>, ::RuntimeCall: From>, + as Currency>::Balance: From>, ::Hash: frame_support::traits::IsType, )] mod benchmarks { @@ -363,10 +364,10 @@ mod benchmarks { // We just call a dummy contract to measure the overhead of the call extrinsic. // The size of the data has no influence on the costs of this extrinsic as long as the contract - // won't call `seal_call_data_copy` in its constructor to copy the data to contract memory. + // won't call `seal_input` in its constructor to copy the data to contract memory. // The dummy contract used here does not do this. The costs for the data copy is billed as - // part of `seal_call_data_copy`. The costs for invoking a contract of a specific size are not - // part of this benchmark because we cannot know the size of the contract when issuing a call + // part of `seal_input`. The costs for invoking a contract of a specific size are not part + // of this benchmark because we cannot know the size of the contract when issuing a call // transaction. See `call_with_code_per_byte` for this. #[benchmark(pov_mode = Measured)] fn call() -> Result<(), BenchmarkError> { @@ -597,15 +598,19 @@ mod benchmarks { #[benchmark(pov_mode = Measured)] fn seal_code_size() { let contract = Contract::::with_index(1, WasmModule::dummy(), vec![]).unwrap(); - build_runtime!(runtime, memory: [contract.address.encode(),]); + build_runtime!(runtime, memory: [contract.address.encode(), vec![0u8; 32], ]); let result; #[block] { - result = runtime.bench_code_size(memory.as_mut_slice(), 0); + result = runtime.bench_code_size(memory.as_mut_slice(), 0, 20); } - assert_eq!(result.unwrap(), WasmModule::dummy().code.len() as u64); + assert_ok!(result); + assert_eq!( + U256::from_little_endian(&memory[20..]), + U256::from(WasmModule::dummy().code.len()) + ); } #[benchmark(pov_mode = Measured)] @@ -668,18 +673,6 @@ mod benchmarks { ); } - #[benchmark(pov_mode = Measured)] - fn seal_ref_time_left() { - build_runtime!(runtime, memory: [vec![], ]); - - let result; - #[block] - { - result = runtime.bench_ref_time_left(memory.as_mut_slice()); - } - assert_eq!(result.unwrap(), runtime.ext().gas_meter().gas_left().ref_time()); - } - #[benchmark(pov_mode = Measured)] fn seal_balance() { build_runtime!(runtime, memory: [[0u8;32], ]); @@ -780,70 +773,6 @@ mod benchmarks { assert_eq!(U256::from_little_endian(&memory[..]), runtime.ext().minimum_balance()); } - #[benchmark(pov_mode = Measured)] - fn seal_return_data_size() { - let mut setup = CallSetup::::default(); - let (mut ext, _) = setup.ext(); - let mut runtime = crate::wasm::Runtime::new(&mut ext, vec![]); - let mut memory = memory!(vec![],); - *runtime.ext().last_frame_output_mut() = - ExecReturnValue { data: vec![42; 256], ..Default::default() }; - let result; - #[block] - { - result = runtime.bench_return_data_size(memory.as_mut_slice()); - } - assert_eq!(result.unwrap(), 256); - } - - #[benchmark(pov_mode = Measured)] - fn seal_call_data_size() { - let mut setup = CallSetup::::default(); - let (mut ext, _) = setup.ext(); - let mut runtime = crate::wasm::Runtime::new(&mut ext, vec![42u8; 128 as usize]); - let mut memory = memory!(vec![0u8; 4],); - let result; - #[block] - { - result = runtime.bench_call_data_size(memory.as_mut_slice()); - } - assert_eq!(result.unwrap(), 128); - } - - #[benchmark(pov_mode = Measured)] - fn seal_gas_limit() { - build_runtime!(runtime, memory: []); - let result; - #[block] - { - result = runtime.bench_gas_limit(&mut memory); - } - assert_eq!(result.unwrap(), T::BlockWeights::get().max_block.ref_time()); - } - - #[benchmark(pov_mode = Measured)] - fn seal_gas_price() { - build_runtime!(runtime, memory: []); - let result; - #[block] - { - result = runtime.bench_gas_price(memory.as_mut_slice()); - } - assert_eq!(result.unwrap(), u64::from(GAS_PRICE)); - } - - #[benchmark(pov_mode = Measured)] - fn seal_base_fee() { - build_runtime!(runtime, memory: [[1u8;32], ]); - let result; - #[block] - { - result = runtime.bench_base_fee(memory.as_mut_slice(), 0); - } - assert_ok!(result); - assert_eq!(U256::from_little_endian(&memory[..]), U256::zero()); - } - #[benchmark(pov_mode = Measured)] fn seal_block_number() { build_runtime!(runtime, memory: [[0u8;32], ]); @@ -912,56 +841,18 @@ mod benchmarks { } #[benchmark(pov_mode = Measured)] - fn seal_copy_to_contract(n: Linear<0, { limits::code::BLOB_BYTES - 4 }>) { - let mut setup = CallSetup::::default(); - let (mut ext, _) = setup.ext(); - let mut runtime = crate::wasm::Runtime::new(&mut ext, vec![]); - let mut memory = memory!(n.encode(), vec![0u8; n as usize],); - let result; - #[block] - { - result = runtime.write_sandbox_output( - memory.as_mut_slice(), - 4, - 0, - &vec![42u8; n as usize], - false, - |_| None, - ); - } - assert_ok!(result); - assert_eq!(&memory[..4], &n.encode()); - assert_eq!(&memory[4..], &vec![42u8; n as usize]); - } - - #[benchmark(pov_mode = Measured)] - fn seal_call_data_load() { - let mut setup = CallSetup::::default(); - let (mut ext, _) = setup.ext(); - let mut runtime = crate::wasm::Runtime::new(&mut ext, vec![42u8; 32]); - let mut memory = memory!(vec![0u8; 32],); - let result; - #[block] - { - result = runtime.bench_call_data_load(memory.as_mut_slice(), 0, 0); - } - assert_ok!(result); - assert_eq!(&memory[..], &vec![42u8; 32]); - } - - #[benchmark(pov_mode = Measured)] - fn seal_call_data_copy(n: Linear<0, { limits::code::BLOB_BYTES }>) { + fn seal_input(n: Linear<0, { limits::code::BLOB_BYTES - 4 }>) { let mut setup = CallSetup::::default(); let (mut ext, _) = setup.ext(); let mut runtime = crate::wasm::Runtime::new(&mut ext, vec![42u8; n as usize]); - let mut memory = memory!(vec![0u8; n as usize],); + let mut memory = memory!(n.to_le_bytes(), vec![0u8; n as usize],); let result; #[block] { - result = runtime.bench_call_data_copy(memory.as_mut_slice(), 0, n, 0); + result = runtime.bench_input(memory.as_mut_slice(), 4, 0); } assert_ok!(result); - assert_eq!(&memory[..], &vec![42u8; n as usize]); + assert_eq!(&memory[4..], &vec![42u8; n as usize]); } #[benchmark(pov_mode = Measured)] @@ -1625,7 +1516,7 @@ mod benchmarks { let callee_bytes = callee.encode(); let callee_len = callee_bytes.len() as u32; - let value: BalanceOf = (1_000_000 * t).into(); + let value: BalanceOf = t.into(); let value_bytes = Into::::into(value).encode(); let deposit: BalanceOf = (u32::MAX - 100).into(); @@ -1664,36 +1555,25 @@ mod benchmarks { #[benchmark(pov_mode = Measured)] fn seal_delegate_call() -> Result<(), BenchmarkError> { - let Contract { account_id: address, .. } = - Contract::::with_index(1, WasmModule::dummy(), vec![]).unwrap(); - - let address_bytes = address.encode(); - let address_len = address_bytes.len() as u32; - - let deposit: BalanceOf = (u32::MAX - 100).into(); - let deposit_bytes = Into::::into(deposit).encode(); + let hash = Contract::::with_index(1, WasmModule::dummy(), vec![])?.info()?.code_hash; let mut setup = CallSetup::::default(); - setup.set_storage_deposit_limit(deposit); setup.set_origin(Origin::from_account_id(setup.contract().account_id.clone())); let (mut ext, _) = setup.ext(); let mut runtime = crate::wasm::Runtime::<_, [u8]>::new(&mut ext, vec![]); - let mut memory = memory!(address_bytes, deposit_bytes,); + let mut memory = memory!(hash.encode(),); let result; #[block] { result = runtime.bench_delegate_call( memory.as_mut_slice(), - 0, // flags - 0, // address_ptr - 0, // ref_time_limit - 0, // proof_size_limit - address_len, // deposit_ptr - 0, // input_data_ptr - 0, // input_data_len - SENTINEL, // output_ptr + 0, // flags + 0, // code_hash_ptr + 0, // input_data_ptr + 0, // input_data_len + SENTINEL, // output_ptr 0, ); } @@ -1711,7 +1591,7 @@ mod benchmarks { let hash_bytes = hash.encode(); let hash_len = hash_bytes.len() as u32; - let value: BalanceOf = 1_000_000u32.into(); + let value: BalanceOf = 1u32.into(); let value_bytes = Into::::into(value).encode(); let value_len = value_bytes.len() as u32; @@ -1765,10 +1645,7 @@ mod benchmarks { assert_ok!(result); assert!(ContractInfoOf::::get(&addr).is_some()); - assert_eq!( - T::Currency::balance(&account_id), - Pallet::::min_balance() + Pallet::::convert_evm_to_native(value.into()).unwrap() - ); + assert_eq!(T::Currency::balance(&account_id), Pallet::::min_balance() + value); Ok(()) } diff --git a/substrate/frame/revive/src/chain_extension.rs b/substrate/frame/revive/src/chain_extension.rs index 5b3e886a5628..ccea12945054 100644 --- a/substrate/frame/revive/src/chain_extension.rs +++ b/substrate/frame/revive/src/chain_extension.rs @@ -75,7 +75,7 @@ use crate::{ Error, }; use alloc::vec::Vec; -use codec::Decode; +use codec::{Decode, MaxEncodedLen}; use frame_support::weights::Weight; use sp_runtime::DispatchError; @@ -304,6 +304,16 @@ impl<'a, 'b, E: Ext, M: ?Sized + Memory> Environment<'a, 'b, E, M> { Ok(()) } + /// Reads and decodes a type with a size fixed at compile time from contract memory. + /// + /// This function is secure and recommended for all input types of fixed size + /// as long as the cost of reading the memory is included in the overall already charged + /// weight of the chain extension. This should usually be the case when fixed input types + /// are used. + pub fn read_as(&mut self) -> Result { + self.memory.read_as(self.input_ptr) + } + /// Reads and decodes a type with a dynamic size from contract memory. /// /// Make sure to include `len` in your weight calculations. diff --git a/substrate/frame/revive/src/evm/api/account.rs b/substrate/frame/revive/src/evm/api/account.rs index ba1c68ea0cf7..06fb6e7e9c21 100644 --- a/substrate/frame/revive/src/evm/api/account.rs +++ b/substrate/frame/revive/src/evm/api/account.rs @@ -16,9 +16,10 @@ // limitations under the License. //! Utilities for working with Ethereum accounts. use crate::{ - evm::{TransactionSigned, TransactionUnsigned}, + evm::{TransactionLegacySigned, TransactionLegacyUnsigned}, H160, }; +use rlp::Encodable; use sp_runtime::AccountId32; /// A simple account that can sign transactions @@ -37,14 +38,9 @@ impl From for Account { } impl Account { - /// Create a new account from a secret - pub fn from_secret_key(secret_key: [u8; 32]) -> Self { - subxt_signer::eth::Keypair::from_secret_key(secret_key).unwrap().into() - } - /// Get the [`H160`] address of the account. pub fn address(&self) -> H160 { - H160::from_slice(&self.0.public_key().to_account_id().as_ref()) + H160::from_slice(&self.0.account_id().as_ref()) } /// Get the substrate [`AccountId32`] of the account. @@ -56,21 +52,9 @@ impl Account { } /// Sign a transaction. - pub fn sign_transaction(&self, tx: TransactionUnsigned) -> TransactionSigned { - let payload = tx.unsigned_payload(); - let signature = self.0.sign(&payload).0; - tx.with_signature(signature) + pub fn sign_transaction(&self, tx: TransactionLegacyUnsigned) -> TransactionLegacySigned { + let rlp_encoded = tx.rlp_bytes(); + let signature = self.0.sign(&rlp_encoded); + TransactionLegacySigned::from(tx, signature.as_ref()) } } - -#[test] -fn from_secret_key_works() { - let account = Account::from_secret_key(hex_literal::hex!( - "a872f6cbd25a0e04a08b1e21098017a9e6194d101d75e13111f71410c59cd57f" - )); - - assert_eq!( - account.address(), - H160::from(hex_literal::hex!("75e480db528101a381ce68544611c169ad7eb342")) - ) -} diff --git a/substrate/frame/revive/src/evm/api/rlp_codec.rs b/substrate/frame/revive/src/evm/api/rlp_codec.rs index 9b61cd042ec5..e5f24c28a482 100644 --- a/substrate/frame/revive/src/evm/api/rlp_codec.rs +++ b/substrate/frame/revive/src/evm/api/rlp_codec.rs @@ -21,81 +21,14 @@ use super::*; use alloc::vec::Vec; use rlp::{Decodable, Encodable}; -impl TransactionUnsigned { - /// Return the bytes to be signed by the private key. - pub fn unsigned_payload(&self) -> Vec { - use TransactionUnsigned::*; - let mut s = rlp::RlpStream::new(); - match self { - Transaction2930Unsigned(ref tx) => { - s.append(&tx.r#type.value()); - s.append(tx); - }, - Transaction1559Unsigned(ref tx) => { - s.append(&tx.r#type.value()); - s.append(tx); - }, - Transaction4844Unsigned(ref tx) => { - s.append(&tx.r#type.value()); - s.append(tx); - }, - TransactionLegacyUnsigned(ref tx) => { - s.append(tx); - }, - } - - s.out().to_vec() - } -} - -impl TransactionSigned { - /// Encode the Ethereum transaction into bytes. - pub fn signed_payload(&self) -> Vec { - use TransactionSigned::*; - let mut s = rlp::RlpStream::new(); - match self { - Transaction2930Signed(ref tx) => { - s.append(&tx.transaction_2930_unsigned.r#type.value()); - s.append(tx); - }, - Transaction1559Signed(ref tx) => { - s.append(&tx.transaction_1559_unsigned.r#type.value()); - s.append(tx); - }, - Transaction4844Signed(ref tx) => { - s.append(&tx.transaction_4844_unsigned.r#type.value()); - s.append(tx); - }, - TransactionLegacySigned(ref tx) => { - s.append(tx); - }, - } - - s.out().to_vec() - } - - /// Decode the Ethereum transaction from bytes. - pub fn decode(data: &[u8]) -> Result { - if data.len() < 1 { - return Err(rlp::DecoderError::RlpIsTooShort); - } - match data[0] { - TYPE_EIP2930 => rlp::decode::(&data[1..]).map(Into::into), - TYPE_EIP1559 => rlp::decode::(&data[1..]).map(Into::into), - TYPE_EIP4844 => rlp::decode::(&data[1..]).map(Into::into), - _ => rlp::decode::(data).map(Into::into), - } - } -} - -impl TransactionUnsigned { - /// Get a signed transaction payload with a dummy 65 bytes signature. +impl TransactionLegacyUnsigned { + /// Get the rlp encoded bytes of a signed transaction with a dummy 65 bytes signature. pub fn dummy_signed_payload(&self) -> Vec { + let mut s = rlp::RlpStream::new(); + s.append(self); const DUMMY_SIGNATURE: [u8; 65] = [0u8; 65]; - self.unsigned_payload() - .into_iter() - .chain(DUMMY_SIGNATURE.iter().copied()) - .collect::>() + s.append_raw(&DUMMY_SIGNATURE.as_ref(), 1); + s.out().to_vec() } } @@ -114,8 +47,8 @@ impl Encodable for TransactionLegacyUnsigned { s.append(&self.value); s.append(&self.input.0); s.append(&chain_id); - s.append(&0u8); - s.append(&0u8); + s.append(&0_u8); + s.append(&0_u8); } else { s.begin_list(6); s.append(&self.nonce); @@ -131,6 +64,7 @@ impl Encodable for TransactionLegacyUnsigned { } } +/// See impl Decodable for TransactionLegacyUnsigned { fn decode(rlp: &rlp::Rlp) -> Result { Ok(TransactionLegacyUnsigned { @@ -161,18 +95,16 @@ impl Decodable for TransactionLegacyUnsigned { impl Encodable for TransactionLegacySigned { fn rlp_append(&self, s: &mut rlp::RlpStream) { - let tx = &self.transaction_legacy_unsigned; - s.begin_list(9); - s.append(&tx.nonce); - s.append(&tx.gas_price); - s.append(&tx.gas); - match tx.to { + s.append(&self.transaction_legacy_unsigned.nonce); + s.append(&self.transaction_legacy_unsigned.gas_price); + s.append(&self.transaction_legacy_unsigned.gas); + match self.transaction_legacy_unsigned.to { Some(ref to) => s.append(to), None => s.append_empty_data(), }; - s.append(&tx.value); - s.append(&tx.input.0); + s.append(&self.transaction_legacy_unsigned.value); + s.append(&self.transaction_legacy_unsigned.input.0); s.append(&self.v); s.append(&self.r); @@ -180,232 +112,6 @@ impl Encodable for TransactionLegacySigned { } } -impl Encodable for AccessListEntry { - fn rlp_append(&self, s: &mut rlp::RlpStream) { - s.begin_list(2); - s.append(&self.address); - s.append_list(&self.storage_keys); - } -} - -impl Decodable for AccessListEntry { - fn decode(rlp: &rlp::Rlp) -> Result { - Ok(AccessListEntry { address: rlp.val_at(0)?, storage_keys: rlp.list_at(1)? }) - } -} - -/// See -impl Encodable for Transaction1559Unsigned { - fn rlp_append(&self, s: &mut rlp::RlpStream) { - s.begin_list(9); - s.append(&self.chain_id); - s.append(&self.nonce); - s.append(&self.max_priority_fee_per_gas); - s.append(&self.max_fee_per_gas); - s.append(&self.gas); - match self.to { - Some(ref to) => s.append(to), - None => s.append_empty_data(), - }; - s.append(&self.value); - s.append(&self.input.0); - s.append_list(&self.access_list); - } -} - -/// See -impl Encodable for Transaction1559Signed { - fn rlp_append(&self, s: &mut rlp::RlpStream) { - let tx = &self.transaction_1559_unsigned; - s.begin_list(12); - s.append(&tx.chain_id); - s.append(&tx.nonce); - s.append(&tx.max_priority_fee_per_gas); - s.append(&tx.max_fee_per_gas); - s.append(&tx.gas); - match tx.to { - Some(ref to) => s.append(to), - None => s.append_empty_data(), - }; - s.append(&tx.value); - s.append(&tx.input.0); - s.append_list(&tx.access_list); - - s.append(&self.y_parity); - s.append(&self.r); - s.append(&self.s); - } -} - -impl Decodable for Transaction1559Signed { - fn decode(rlp: &rlp::Rlp) -> Result { - Ok(Transaction1559Signed { - transaction_1559_unsigned: { - Transaction1559Unsigned { - chain_id: rlp.val_at(0)?, - nonce: rlp.val_at(1)?, - max_priority_fee_per_gas: rlp.val_at(2)?, - max_fee_per_gas: rlp.val_at(3)?, - gas: rlp.val_at(4)?, - to: { - let to = rlp.at(5)?; - if to.is_empty() { - None - } else { - Some(to.as_val()?) - } - }, - value: rlp.val_at(6)?, - input: Bytes(rlp.val_at(7)?), - access_list: rlp.list_at(8)?, - ..Default::default() - } - }, - y_parity: rlp.val_at(9)?, - r: rlp.val_at(10)?, - s: rlp.val_at(11)?, - ..Default::default() - }) - } -} - -//See https://eips.ethereum.org/EIPS/eip-2930 -impl Encodable for Transaction2930Unsigned { - fn rlp_append(&self, s: &mut rlp::RlpStream) { - s.begin_list(8); - s.append(&self.chain_id); - s.append(&self.nonce); - s.append(&self.gas_price); - s.append(&self.gas); - match self.to { - Some(ref to) => s.append(to), - None => s.append_empty_data(), - }; - s.append(&self.value); - s.append(&self.input.0); - s.append_list(&self.access_list); - } -} - -//See https://eips.ethereum.org/EIPS/eip-2930 -impl Encodable for Transaction2930Signed { - fn rlp_append(&self, s: &mut rlp::RlpStream) { - let tx = &self.transaction_2930_unsigned; - s.begin_list(11); - s.append(&tx.chain_id); - s.append(&tx.nonce); - s.append(&tx.gas_price); - s.append(&tx.gas); - match tx.to { - Some(ref to) => s.append(to), - None => s.append_empty_data(), - }; - s.append(&tx.value); - s.append(&tx.input.0); - s.append_list(&tx.access_list); - s.append(&self.y_parity); - s.append(&self.r); - s.append(&self.s); - } -} - -impl Decodable for Transaction2930Signed { - fn decode(rlp: &rlp::Rlp) -> Result { - Ok(Transaction2930Signed { - transaction_2930_unsigned: { - Transaction2930Unsigned { - chain_id: rlp.val_at(0)?, - nonce: rlp.val_at(1)?, - gas_price: rlp.val_at(2)?, - gas: rlp.val_at(3)?, - to: { - let to = rlp.at(4)?; - if to.is_empty() { - None - } else { - Some(to.as_val()?) - } - }, - value: rlp.val_at(5)?, - input: Bytes(rlp.val_at(6)?), - access_list: rlp.list_at(7)?, - ..Default::default() - } - }, - y_parity: rlp.val_at(8)?, - r: rlp.val_at(9)?, - s: rlp.val_at(10)?, - ..Default::default() - }) - } -} - -//See https://eips.ethereum.org/EIPS/eip-4844 -impl Encodable for Transaction4844Unsigned { - fn rlp_append(&self, s: &mut rlp::RlpStream) { - s.begin_list(11); - s.append(&self.chain_id); - s.append(&self.nonce); - s.append(&self.max_priority_fee_per_gas); - s.append(&self.max_fee_per_gas); - s.append(&self.gas); - s.append(&self.to); - s.append(&self.value); - s.append(&self.input.0); - s.append_list(&self.access_list); - s.append(&self.max_fee_per_blob_gas); - s.append_list(&self.blob_versioned_hashes); - } -} - -//See https://eips.ethereum.org/EIPS/eip-4844 -impl Encodable for Transaction4844Signed { - fn rlp_append(&self, s: &mut rlp::RlpStream) { - let tx = &self.transaction_4844_unsigned; - s.begin_list(14); - s.append(&tx.chain_id); - s.append(&tx.nonce); - s.append(&tx.max_priority_fee_per_gas); - s.append(&tx.max_fee_per_gas); - s.append(&tx.gas); - s.append(&tx.to); - s.append(&tx.value); - s.append(&tx.input.0); - s.append_list(&tx.access_list); - s.append(&tx.max_fee_per_blob_gas); - s.append_list(&tx.blob_versioned_hashes); - s.append(&self.y_parity); - s.append(&self.r); - s.append(&self.s); - } -} - -impl Decodable for Transaction4844Signed { - fn decode(rlp: &rlp::Rlp) -> Result { - Ok(Transaction4844Signed { - transaction_4844_unsigned: { - Transaction4844Unsigned { - chain_id: rlp.val_at(0)?, - nonce: rlp.val_at(1)?, - max_priority_fee_per_gas: rlp.val_at(2)?, - max_fee_per_gas: rlp.val_at(3)?, - gas: rlp.val_at(4)?, - to: rlp.val_at(5)?, - value: rlp.val_at(6)?, - input: Bytes(rlp.val_at(7)?), - access_list: rlp.list_at(8)?, - max_fee_per_blob_gas: rlp.val_at(9)?, - blob_versioned_hashes: rlp.list_at(10)?, - ..Default::default() - } - }, - y_parity: rlp.val_at(11)?, - r: rlp.val_at(12)?, - s: rlp.val_at(13)?, - }) - } -} - /// See impl Decodable for TransactionLegacySigned { fn decode(rlp: &rlp::Rlp) -> Result { @@ -436,7 +142,7 @@ impl Decodable for TransactionLegacySigned { value: rlp.val_at(4)?, input: Bytes(rlp.val_at(5)?), chain_id: extract_chain_id(v).map(|v| v.into()), - r#type: TypeLegacy {}, + r#type: Type0 {}, } }, v, @@ -451,123 +157,31 @@ mod test { use super::*; #[test] - fn encode_decode_tx_works() { - let txs = [ - // Legacy - ( - "f86080808301e24194095e7baea6a6c7c4c2dfeb977efac326af552d87808025a0fe38ca4e44a30002ac54af7cf922a6ac2ba11b7d22f548e8ecb3f51f41cb31b0a06de6a5cbae13c0c856e33acf021b51819636cfc009d39eafb9f606d546e305a8", - r#" - { - "chainId": "0x1", - "gas": "0x1e241", - "gasPrice": "0x0", - "input": "0x", - "nonce": "0x0", - "to": "0x095e7baea6a6c7c4c2dfeb977efac326af552d87", - "type": "0x0", - "value": "0x0", - "r": "0xfe38ca4e44a30002ac54af7cf922a6ac2ba11b7d22f548e8ecb3f51f41cb31b0", - "s": "0x6de6a5cbae13c0c856e33acf021b51819636cfc009d39eafb9f606d546e305a8", - "v": "0x25" - } - "# - ), - // type 1: EIP2930 - ( - "01f89b0180808301e24194095e7baea6a6c7c4c2dfeb977efac326af552d878080f838f7940000000000000000000000000000000000000001e1a0000000000000000000000000000000000000000000000000000000000000000080a0fe38ca4e44a30002ac54af7cf922a6ac2ba11b7d22f548e8ecb3f51f41cb31b0a06de6a5cbae13c0c856e33acf021b51819636cfc009d39eafb9f606d546e305a8", - r#" - { - "accessList": [ - { - "address": "0x0000000000000000000000000000000000000001", - "storageKeys": ["0x0000000000000000000000000000000000000000000000000000000000000000"] - } - ], - "chainId": "0x1", - "gas": "0x1e241", - "gasPrice": "0x0", - "input": "0x", - "nonce": "0x0", - "to": "0x095e7baea6a6c7c4c2dfeb977efac326af552d87", - "type": "0x1", - "value": "0x0", - "r": "0xfe38ca4e44a30002ac54af7cf922a6ac2ba11b7d22f548e8ecb3f51f41cb31b0", - "s": "0x6de6a5cbae13c0c856e33acf021b51819636cfc009d39eafb9f606d546e305a8", - "yParity": "0x0" - } - "# - ), - // type 2: EIP1559 - ( - "02f89c018080018301e24194095e7baea6a6c7c4c2dfeb977efac326af552d878080f838f7940000000000000000000000000000000000000001e1a0000000000000000000000000000000000000000000000000000000000000000080a0fe38ca4e44a30002ac54af7cf922a6ac2ba11b7d22f548e8ecb3f51f41cb31b0a06de6a5cbae13c0c856e33acf021b51819636cfc009d39eafb9f606d546e305a8", - r#" - { - "accessList": [ - { - "address": "0x0000000000000000000000000000000000000001", - "storageKeys": ["0x0000000000000000000000000000000000000000000000000000000000000000"] - } - ], - "chainId": "0x1", - "gas": "0x1e241", - "gasPrice": "0x0", - "input": "0x", - "maxFeePerGas": "0x1", - "maxPriorityFeePerGas": "0x0", - "nonce": "0x0", - "to": "0x095e7baea6a6c7c4c2dfeb977efac326af552d87", - "type": "0x2", - "value": "0x0", - "r": "0xfe38ca4e44a30002ac54af7cf922a6ac2ba11b7d22f548e8ecb3f51f41cb31b0", - "s": "0x6de6a5cbae13c0c856e33acf021b51819636cfc009d39eafb9f606d546e305a8", - "yParity": "0x0" - - } - "# - ), - // type 3: EIP4844 - ( + fn encode_decode_legacy_transaction_works() { + let tx = TransactionLegacyUnsigned { + chain_id: Some(596.into()), + gas: U256::from(21000), + nonce: U256::from(1), + gas_price: U256::from("0x640000006a"), + to: Some(Account::from(subxt_signer::eth::dev::baltathar()).address()), + value: U256::from(123123), + input: Bytes(vec![]), + r#type: Type0, + }; - "03f8bf018002018301e24194095e7baea6a6c7c4c2dfeb977efac326af552d878080f838f7940000000000000000000000000000000000000001e1a0000000000000000000000000000000000000000000000000000000000000000080e1a0000000000000000000000000000000000000000000000000000000000000000080a0fe38ca4e44a30002ac54af7cf922a6ac2ba11b7d22f548e8ecb3f51f41cb31b0a06de6a5cbae13c0c856e33acf021b51819636cfc009d39eafb9f606d546e305a8", - r#" - { - "accessList": [ - { - "address": "0x0000000000000000000000000000000000000001", - "storageKeys": ["0x0000000000000000000000000000000000000000000000000000000000000000"] - } - ], - "blobVersionedHashes": ["0x0000000000000000000000000000000000000000000000000000000000000000"], - "chainId": "0x1", - "gas": "0x1e241", - "input": "0x", - "maxFeePerBlobGas": "0x0", - "maxFeePerGas": "0x1", - "maxPriorityFeePerGas": "0x2", - "nonce": "0x0", - "to": "0x095e7baea6a6c7c4c2dfeb977efac326af552d87", - "type": "0x3", - "value": "0x0", - "r": "0xfe38ca4e44a30002ac54af7cf922a6ac2ba11b7d22f548e8ecb3f51f41cb31b0", - "s": "0x6de6a5cbae13c0c856e33acf021b51819636cfc009d39eafb9f606d546e305a8", - "yParity": "0x0" - } - "# - ) - ]; + let rlp_bytes = rlp::encode(&tx); + let decoded = rlp::decode::(&rlp_bytes).unwrap(); + assert_eq!(&tx, &decoded); - for (tx, json) in txs { - let raw_tx = hex::decode(tx).unwrap(); - let tx = TransactionSigned::decode(&raw_tx).unwrap(); - assert_eq!(tx.signed_payload(), raw_tx); - let expected_tx = serde_json::from_str(json).unwrap(); - assert_eq!(tx, expected_tx); - } + let tx = Account::default().sign_transaction(tx); + let rlp_bytes = rlp::encode(&tx); + let decoded = rlp::decode::(&rlp_bytes).unwrap(); + assert_eq!(&tx, &decoded); } #[test] fn dummy_signed_payload_works() { - let tx: TransactionUnsigned = TransactionLegacyUnsigned { + let tx = TransactionLegacyUnsigned { chain_id: Some(596.into()), gas: U256::from(21000), nonce: U256::from(1), @@ -575,12 +189,31 @@ mod test { to: Some(Account::from(subxt_signer::eth::dev::baltathar()).address()), value: U256::from(123123), input: Bytes(vec![]), - r#type: TypeLegacy, - } - .into(); + r#type: Type0, + }; + + let signed_tx = Account::default().sign_transaction(tx.clone()); + let rlp_bytes = rlp::encode(&signed_tx); + assert_eq!(tx.dummy_signed_payload().len(), rlp_bytes.len()); + } + + #[test] + fn recover_address_works() { + let account = Account::default(); + + let unsigned_tx = TransactionLegacyUnsigned { + value: 200_000_000_000_000_000_000u128.into(), + gas_price: 100_000_000_200u64.into(), + gas: 100_107u32.into(), + nonce: 3.into(), + to: Some(Account::from(subxt_signer::eth::dev::baltathar()).address()), + chain_id: Some(596.into()), + ..Default::default() + }; + + let tx = account.sign_transaction(unsigned_tx.clone()); + let recovered_address = tx.recover_eth_address().unwrap(); - let dummy_signed_payload = tx.dummy_signed_payload(); - let payload = Account::default().sign_transaction(tx).signed_payload(); - assert_eq!(dummy_signed_payload.len(), payload.len()); + assert_eq!(account.address(), recovered_address); } } diff --git a/substrate/frame/revive/src/evm/api/rpc_types.rs b/substrate/frame/revive/src/evm/api/rpc_types.rs index ed046cb4da44..b15a0a53cd07 100644 --- a/substrate/frame/revive/src/evm/api/rpc_types.rs +++ b/substrate/frame/revive/src/evm/api/rpc_types.rs @@ -15,30 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. //! Utility impl for the RPC types. -use super::*; -use alloc::vec::Vec; -use sp_core::{H160, U256}; - -impl From for BlockNumberOrTagOrHash { - fn from(b: BlockNumberOrTag) -> Self { - match b { - BlockNumberOrTag::U256(n) => BlockNumberOrTagOrHash::U256(n), - BlockNumberOrTag::BlockTag(t) => BlockNumberOrTagOrHash::BlockTag(t), - } - } -} - -impl From for TransactionUnsigned { - fn from(tx: TransactionSigned) -> Self { - use TransactionSigned::*; - match tx { - Transaction4844Signed(tx) => tx.transaction_4844_unsigned.into(), - Transaction1559Signed(tx) => tx.transaction_1559_unsigned.into(), - Transaction2930Signed(tx) => tx.transaction_2930_unsigned.into(), - TransactionLegacySigned(tx) => tx.transaction_legacy_unsigned.into(), - } - } -} +use super::{ReceiptInfo, TransactionInfo, TransactionSigned}; impl TransactionInfo { /// Create a new [`TransactionInfo`] from a receipt and a signed transaction. @@ -53,240 +30,3 @@ impl TransactionInfo { } } } - -impl ReceiptInfo { - /// Initialize a new Receipt - pub fn new( - block_hash: H256, - block_number: U256, - contract_address: Option

, - from: Address, - logs: Vec, - to: Option
, - effective_gas_price: U256, - gas_used: U256, - success: bool, - transaction_hash: H256, - transaction_index: U256, - r#type: Byte, - ) -> Self { - let logs_bloom = Self::logs_bloom(&logs); - ReceiptInfo { - block_hash, - block_number, - contract_address, - from, - logs, - logs_bloom, - to, - effective_gas_price, - gas_used, - status: Some(if success { U256::one() } else { U256::zero() }), - transaction_hash, - transaction_index, - r#type: Some(r#type), - ..Default::default() - } - } - - /// Returns `true` if the transaction was successful. - pub fn is_success(&self) -> bool { - self.status.map_or(false, |status| status == U256::one()) - } - - /// Calculate receipt logs bloom. - fn logs_bloom(logs: &[Log]) -> Bytes256 { - let mut bloom = [0u8; 256]; - for log in logs { - m3_2048(&mut bloom, &log.address.as_ref()); - for topic in &log.topics { - m3_2048(&mut bloom, topic.as_ref()); - } - } - bloom.into() - } -} -/// Specialised Bloom filter that sets three bits out of 2048, given an -/// arbitrary byte sequence. -/// -/// See Section 4.4.1 "Transaction Receipt" of the [Ethereum Yellow Paper][ref]. -/// -/// [ref]: https://ethereum.github.io/yellowpaper/paper.pdf -fn m3_2048(bloom: &mut [u8; 256], bytes: &[u8]) { - let hash = sp_core::keccak_256(bytes); - for i in [0, 2, 4] { - let bit = (hash[i + 1] as usize + ((hash[i] as usize) << 8)) & 0x7FF; - bloom[256 - 1 - bit / 8] |= 1 << (bit % 8); - } -} - -#[test] -fn logs_bloom_works() { - let receipt: ReceiptInfo = serde_json::from_str( - r#" - { - "blockHash": "0x835ee379aaabf4802a22a93ad8164c02bbdde2cc03d4552d5c642faf4e09d1f3", - "blockNumber": "0x2", - "contractAddress": null, - "cumulativeGasUsed": "0x5d92", - "effectiveGasPrice": "0x2dcd5c2d", - "from": "0xb4f1f9ecfe5a28633a27f57300bda217e99b8969", - "gasUsed": "0x5d92", - "logs": [ - { - "address": "0x82bdb002b9b1f36c42df15fbdc6886abcb2ab31d", - "topics": [ - "0x1585375487296ff2f0370daeec4214074a032b31af827c12622fa9a58c16c7d0", - "0x000000000000000000000000b4f1f9ecfe5a28633a27f57300bda217e99b8969" - ], - "data": "0x00000000000000000000000000000000000000000000000000000000000030390000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b48656c6c6f20776f726c64000000000000000000000000000000000000000000", - "blockNumber": "0x2", - "transactionHash": "0xad0075127962bdf73d787f2944bdb5f351876f23c35e6a48c1f5b6463a100af4", - "transactionIndex": "0x0", - "blockHash": "0x835ee379aaabf4802a22a93ad8164c02bbdde2cc03d4552d5c642faf4e09d1f3", - "logIndex": "0x0", - "removed": false - } - ], - "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000400000008000000000000000000000000000000000000000000000000800000000040000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000004000000000000000800000000000000000080000000000000000000000000000000000000000000", - "status": "0x1", - "to": "0x82bdb002b9b1f36c42df15fbdc6886abcb2ab31d", - "transactionHash": "0xad0075127962bdf73d787f2944bdb5f351876f23c35e6a48c1f5b6463a100af4", - "transactionIndex": "0x0", - "type": "0x2" - } - "#, - ) - .unwrap(); - assert_eq!(receipt.logs_bloom, ReceiptInfo::logs_bloom(&receipt.logs)); -} - -impl GenericTransaction { - /// Create a new [`GenericTransaction`] from a signed transaction. - pub fn from_signed(tx: TransactionSigned, from: Option) -> Self { - Self::from_unsigned(tx.into(), from) - } - - /// Create a new [`GenericTransaction`] from a unsigned transaction. - pub fn from_unsigned(tx: TransactionUnsigned, from: Option) -> Self { - use TransactionUnsigned::*; - match tx { - TransactionLegacyUnsigned(tx) => GenericTransaction { - from, - r#type: Some(tx.r#type.as_byte()), - chain_id: tx.chain_id, - input: Some(tx.input), - nonce: Some(tx.nonce), - value: Some(tx.value), - to: tx.to, - gas: Some(tx.gas), - gas_price: Some(tx.gas_price), - ..Default::default() - }, - Transaction4844Unsigned(tx) => GenericTransaction { - from, - r#type: Some(tx.r#type.as_byte()), - chain_id: Some(tx.chain_id), - input: Some(tx.input), - nonce: Some(tx.nonce), - value: Some(tx.value), - to: Some(tx.to), - gas: Some(tx.gas), - gas_price: Some(tx.max_fee_per_blob_gas), - access_list: Some(tx.access_list), - blob_versioned_hashes: tx.blob_versioned_hashes, - max_fee_per_blob_gas: Some(tx.max_fee_per_blob_gas), - max_fee_per_gas: Some(tx.max_fee_per_gas), - max_priority_fee_per_gas: Some(tx.max_priority_fee_per_gas), - ..Default::default() - }, - Transaction1559Unsigned(tx) => GenericTransaction { - from, - r#type: Some(tx.r#type.as_byte()), - chain_id: Some(tx.chain_id), - input: Some(tx.input), - nonce: Some(tx.nonce), - value: Some(tx.value), - to: tx.to, - gas: Some(tx.gas), - gas_price: Some(tx.gas_price), - access_list: Some(tx.access_list), - max_fee_per_gas: Some(tx.max_fee_per_gas), - max_priority_fee_per_gas: Some(tx.max_priority_fee_per_gas), - ..Default::default() - }, - Transaction2930Unsigned(tx) => GenericTransaction { - from, - r#type: Some(tx.r#type.as_byte()), - chain_id: Some(tx.chain_id), - input: Some(tx.input), - nonce: Some(tx.nonce), - value: Some(tx.value), - to: tx.to, - gas: Some(tx.gas), - gas_price: Some(tx.gas_price), - access_list: Some(tx.access_list), - ..Default::default() - }, - } - } - - /// Convert to a [`TransactionUnsigned`]. - pub fn try_into_unsigned(self) -> Result { - match self.r#type.unwrap_or_default().0 { - TYPE_LEGACY => Ok(TransactionLegacyUnsigned { - r#type: TypeLegacy {}, - chain_id: self.chain_id, - input: self.input.unwrap_or_default(), - nonce: self.nonce.unwrap_or_default(), - value: self.value.unwrap_or_default(), - to: self.to, - gas: self.gas.unwrap_or_default(), - gas_price: self.gas_price.unwrap_or_default(), - } - .into()), - TYPE_EIP1559 => Ok(Transaction1559Unsigned { - r#type: TypeEip1559 {}, - chain_id: self.chain_id.unwrap_or_default(), - input: self.input.unwrap_or_default(), - nonce: self.nonce.unwrap_or_default(), - value: self.value.unwrap_or_default(), - to: self.to, - gas: self.gas.unwrap_or_default(), - gas_price: self.gas_price.unwrap_or_default(), - access_list: self.access_list.unwrap_or_default(), - max_fee_per_gas: self.max_fee_per_gas.unwrap_or_default(), - max_priority_fee_per_gas: self.max_priority_fee_per_gas.unwrap_or_default(), - } - .into()), - TYPE_EIP2930 => Ok(Transaction2930Unsigned { - r#type: TypeEip2930 {}, - chain_id: self.chain_id.unwrap_or_default(), - input: self.input.unwrap_or_default(), - nonce: self.nonce.unwrap_or_default(), - value: self.value.unwrap_or_default(), - to: self.to, - gas: self.gas.unwrap_or_default(), - gas_price: self.gas_price.unwrap_or_default(), - access_list: self.access_list.unwrap_or_default(), - } - .into()), - TYPE_EIP4844 => Ok(Transaction4844Unsigned { - r#type: TypeEip4844 {}, - chain_id: self.chain_id.unwrap_or_default(), - input: self.input.unwrap_or_default(), - nonce: self.nonce.unwrap_or_default(), - value: self.value.unwrap_or_default(), - to: self.to.unwrap_or_default(), - gas: self.gas.unwrap_or_default(), - max_fee_per_gas: self.max_fee_per_gas.unwrap_or_default(), - max_fee_per_blob_gas: self.max_fee_per_blob_gas.unwrap_or_default(), - max_priority_fee_per_gas: self.max_priority_fee_per_gas.unwrap_or_default(), - access_list: self.access_list.unwrap_or_default(), - blob_versioned_hashes: self.blob_versioned_hashes, - } - .into()), - _ => Err(()), - } - } -} diff --git a/substrate/frame/revive/src/evm/api/rpc_types_gen.rs b/substrate/frame/revive/src/evm/api/rpc_types_gen.rs index 1d65fdefdde6..1f391ae846a5 100644 --- a/substrate/frame/revive/src/evm/api/rpc_types_gen.rs +++ b/substrate/frame/revive/src/evm/api/rpc_types_gen.rs @@ -17,7 +17,7 @@ //! Generated JSON-RPC types. #![allow(missing_docs)] -use super::{byte::*, TypeEip1559, TypeEip2930, TypeEip4844, TypeLegacy}; +use super::{byte::*, Type0, Type1, Type2, Type3}; use alloc::vec::Vec; use codec::{Decode, Encode}; use derive_more::{From, TryInto}; @@ -94,8 +94,8 @@ pub struct Block { /// Uncles pub uncles: Vec, /// Withdrawals - #[serde(default, skip_serializing_if = "Vec::is_empty")] - pub withdrawals: Vec, + #[serde(skip_serializing_if = "Option::is_none")] + pub withdrawals: Option>, /// Withdrawals root #[serde(rename = "withdrawalsRoot", skip_serializing_if = "Option::is_none")] pub withdrawals_root: Option, @@ -114,7 +114,7 @@ pub enum BlockNumberOrTag { } impl Default for BlockNumberOrTag { fn default() -> Self { - BlockNumberOrTag::BlockTag(Default::default()) + BlockNumberOrTag::U256(Default::default()) } } @@ -133,7 +133,7 @@ pub enum BlockNumberOrTagOrHash { } impl Default for BlockNumberOrTagOrHash { fn default() -> Self { - BlockNumberOrTagOrHash::BlockTag(Default::default()) + BlockNumberOrTagOrHash::U256(Default::default()) } } @@ -148,12 +148,12 @@ pub struct GenericTransaction { pub access_list: Option, /// blobVersionedHashes /// List of versioned blob hashes associated with the transaction's EIP-4844 data blobs. - #[serde(rename = "blobVersionedHashes", default, skip_serializing_if = "Vec::is_empty")] - pub blob_versioned_hashes: Vec, + #[serde(rename = "blobVersionedHashes", skip_serializing_if = "Option::is_none")] + pub blob_versioned_hashes: Option>, /// blobs /// Raw blob data. - #[serde(default, skip_serializing_if = "Vec::is_empty")] - pub blobs: Vec, + #[serde(skip_serializing_if = "Option::is_none")] + pub blobs: Option>, /// chainId /// Chain ID that this transaction is valid on. #[serde(rename = "chainId", skip_serializing_if = "Option::is_none")] @@ -319,7 +319,7 @@ pub enum TransactionUnsigned { } impl Default for TransactionUnsigned { fn default() -> Self { - TransactionUnsigned::TransactionLegacyUnsigned(Default::default()) + TransactionUnsigned::Transaction4844Unsigned(Default::default()) } } @@ -341,13 +341,13 @@ pub type AccessList = Vec; )] pub enum BlockTag { #[serde(rename = "earliest")] + #[default] Earliest, #[serde(rename = "finalized")] Finalized, #[serde(rename = "safe")] Safe, #[serde(rename = "latest")] - #[default] Latest, #[serde(rename = "pending")] Pending, @@ -375,7 +375,8 @@ impl Default for H256OrTransactionInfo { )] pub struct Log { /// address - pub address: Address, + #[serde(skip_serializing_if = "Option::is_none")] + pub address: Option
, /// block hash #[serde(rename = "blockHash", skip_serializing_if = "Option::is_none")] pub block_hash: Option, @@ -392,8 +393,8 @@ pub struct Log { #[serde(skip_serializing_if = "Option::is_none")] pub removed: Option, /// topics - #[serde(default, skip_serializing_if = "Vec::is_empty")] - pub topics: Vec, + #[serde(skip_serializing_if = "Option::is_none")] + pub topics: Option>, /// transaction hash #[serde(rename = "transactionHash")] pub transaction_hash: H256, @@ -455,7 +456,7 @@ pub struct Transaction1559Unsigned { /// to address pub to: Option
, /// type - pub r#type: TypeEip1559, + pub r#type: Type2, /// value pub value: U256, } @@ -486,7 +487,7 @@ pub struct Transaction2930Unsigned { /// to address pub to: Option
, /// type - pub r#type: TypeEip2930, + pub r#type: Type1, /// value pub value: U256, } @@ -530,7 +531,7 @@ pub struct Transaction4844Unsigned { /// to address pub to: Address, /// type - pub r#type: TypeEip4844, + pub r#type: Type3, /// value pub value: U256, } @@ -557,7 +558,7 @@ pub struct TransactionLegacyUnsigned { /// to address pub to: Option
, /// type - pub r#type: TypeLegacy, + pub r#type: Type0, /// value pub value: U256, } @@ -574,7 +575,7 @@ pub enum TransactionSigned { } impl Default for TransactionSigned { fn default() -> Self { - TransactionSigned::TransactionLegacySigned(Default::default()) + TransactionSigned::Transaction4844Signed(Default::default()) } } @@ -622,8 +623,8 @@ pub struct Transaction1559Signed { pub v: Option, /// yParity /// The parity (0 for even, 1 for odd) of the y-value of the secp256k1 signature. - #[serde(rename = "yParity")] - pub y_parity: U256, + #[serde(rename = "yParity", skip_serializing_if = "Option::is_none")] + pub y_parity: Option, } /// Signed 2930 Transaction @@ -661,8 +662,8 @@ pub struct Transaction4844Signed { pub s: U256, /// yParity /// The parity (0 for even, 1 for odd) of the y-value of the secp256k1 signature. - #[serde(rename = "yParity")] - pub y_parity: U256, + #[serde(rename = "yParity", skip_serializing_if = "Option::is_none")] + pub y_parity: Option, } /// Signed Legacy Transaction diff --git a/substrate/frame/revive/src/evm/api/signature.rs b/substrate/frame/revive/src/evm/api/signature.rs index 9f39b92b461e..957d50c8e324 100644 --- a/substrate/frame/revive/src/evm/api/signature.rs +++ b/substrate/frame/revive/src/evm/api/signature.rs @@ -15,11 +15,49 @@ // See the License for the specific language governing permissions and // limitations under the License. //! Ethereum signature utilities -use super::*; +use super::{TransactionLegacySigned, TransactionLegacyUnsigned}; +use rlp::Encodable; use sp_core::{H160, U256}; use sp_io::{crypto::secp256k1_ecdsa_recover, hashing::keccak_256}; +impl TransactionLegacyUnsigned { + /// Recover the Ethereum address, from an RLP encoded transaction and a 65 bytes signature. + pub fn recover_eth_address(rlp_encoded: &[u8], signature: &[u8; 65]) -> Result { + let hash = keccak_256(rlp_encoded); + let mut addr = H160::default(); + let pk = secp256k1_ecdsa_recover(&signature, &hash).map_err(|_| ())?; + addr.assign_from_slice(&keccak_256(&pk[..])[12..]); + + Ok(addr) + } +} + impl TransactionLegacySigned { + /// Create a signed transaction from an [`TransactionLegacyUnsigned`] and a signature. + pub fn from( + transaction_legacy_unsigned: TransactionLegacyUnsigned, + signature: &[u8; 65], + ) -> TransactionLegacySigned { + let r = U256::from_big_endian(&signature[..32]); + let s = U256::from_big_endian(&signature[32..64]); + let recovery_id = signature[64] as u32; + let v = transaction_legacy_unsigned + .chain_id + .map(|chain_id| chain_id * 2 + 35 + recovery_id) + .unwrap_or_else(|| U256::from(27) + recovery_id); + + TransactionLegacySigned { transaction_legacy_unsigned, r, s, v } + } + + /// Get the raw 65 bytes signature from the signed transaction. + pub fn raw_signature(&self) -> Result<[u8; 65], ()> { + let mut s = [0u8; 65]; + self.r.write_as_big_endian(s[0..32].as_mut()); + self.s.write_as_big_endian(s[32..64].as_mut()); + s[64] = self.extract_recovery_id().ok_or(())?; + Ok(s) + } + /// Get the recovery ID from the signed transaction. /// See https://eips.ethereum.org/EIPS/eip-155 fn extract_recovery_id(&self) -> Option { @@ -33,154 +71,10 @@ impl TransactionLegacySigned { self.v.try_into().ok() } } -} - -impl TransactionUnsigned { - /// Extract the unsigned transaction from a signed transaction. - pub fn from_signed(tx: TransactionSigned) -> Self { - match tx { - TransactionSigned::TransactionLegacySigned(signed) => - Self::TransactionLegacyUnsigned(signed.transaction_legacy_unsigned), - TransactionSigned::Transaction4844Signed(signed) => - Self::Transaction4844Unsigned(signed.transaction_4844_unsigned), - TransactionSigned::Transaction1559Signed(signed) => - Self::Transaction1559Unsigned(signed.transaction_1559_unsigned), - TransactionSigned::Transaction2930Signed(signed) => - Self::Transaction2930Unsigned(signed.transaction_2930_unsigned), - } - } - - /// Create a signed transaction from an [`TransactionUnsigned`] and a signature. - pub fn with_signature(self, signature: [u8; 65]) -> TransactionSigned { - let r = U256::from_big_endian(&signature[..32]); - let s = U256::from_big_endian(&signature[32..64]); - let recovery_id = signature[64]; - - match self { - TransactionUnsigned::Transaction2930Unsigned(transaction_2930_unsigned) => - Transaction2930Signed { - transaction_2930_unsigned, - r, - s, - v: None, - y_parity: U256::from(recovery_id), - } - .into(), - TransactionUnsigned::Transaction1559Unsigned(transaction_1559_unsigned) => - Transaction1559Signed { - transaction_1559_unsigned, - r, - s, - v: None, - y_parity: U256::from(recovery_id), - } - .into(), - - TransactionUnsigned::Transaction4844Unsigned(transaction_4844_unsigned) => - Transaction4844Signed { - transaction_4844_unsigned, - r, - s, - y_parity: U256::from(recovery_id), - } - .into(), - - TransactionUnsigned::TransactionLegacyUnsigned(transaction_legacy_unsigned) => { - let v = transaction_legacy_unsigned - .chain_id - .map(|chain_id| { - chain_id - .saturating_mul(U256::from(2)) - .saturating_add(U256::from(35u32 + recovery_id as u32)) - }) - .unwrap_or_else(|| U256::from(27u32 + recovery_id as u32)); - - TransactionLegacySigned { transaction_legacy_unsigned, r, s, v }.into() - }, - } - } -} - -impl TransactionSigned { - /// Get the raw 65 bytes signature from the signed transaction. - pub fn raw_signature(&self) -> Result<[u8; 65], ()> { - use TransactionSigned::*; - let (r, s, v) = match self { - TransactionLegacySigned(tx) => (tx.r, tx.s, tx.extract_recovery_id().ok_or(())?), - Transaction4844Signed(tx) => (tx.r, tx.s, tx.y_parity.try_into().map_err(|_| ())?), - Transaction1559Signed(tx) => (tx.r, tx.s, tx.y_parity.try_into().map_err(|_| ())?), - Transaction2930Signed(tx) => (tx.r, tx.s, tx.y_parity.try_into().map_err(|_| ())?), - }; - let mut sig = [0u8; 65]; - r.write_as_big_endian(sig[0..32].as_mut()); - s.write_as_big_endian(sig[32..64].as_mut()); - sig[64] = v; - Ok(sig) - } - /// Recover the Ethereum address, from a signed transaction. + /// Recover the Ethereum address from the signed transaction. pub fn recover_eth_address(&self) -> Result { - use TransactionSigned::*; - - let mut s = rlp::RlpStream::new(); - match self { - TransactionLegacySigned(tx) => { - let tx = &tx.transaction_legacy_unsigned; - s.append(tx); - }, - Transaction4844Signed(tx) => { - let tx = &tx.transaction_4844_unsigned; - s.append(&tx.r#type.value()); - s.append(tx); - }, - Transaction1559Signed(tx) => { - let tx = &tx.transaction_1559_unsigned; - s.append(&tx.r#type.value()); - s.append(tx); - }, - Transaction2930Signed(tx) => { - let tx = &tx.transaction_2930_unsigned; - s.append(&tx.r#type.value()); - s.append(tx); - }, - } - let bytes = s.out().to_vec(); - let signature = self.raw_signature()?; - - let hash = keccak_256(&bytes); - let mut addr = H160::default(); - let pk = secp256k1_ecdsa_recover(&signature, &hash).map_err(|_| ())?; - addr.assign_from_slice(&keccak_256(&pk[..])[12..]); - Ok(addr) - } -} - -#[test] -fn sign_and_recover_work() { - use crate::evm::TransactionUnsigned; - let txs = [ - // Legacy - "f86080808301e24194095e7baea6a6c7c4c2dfeb977efac326af552d87808026a07b2e762a17a71a46b422e60890a04512cf0d907ccf6b78b5bd6e6977efdc2bf5a01ea673d50bbe7c2236acb498ceb8346a8607c941f0b8cbcde7cf439aa9369f1f", - //// type 1: EIP2930 - "01f89b0180808301e24194095e7baea6a6c7c4c2dfeb977efac326af552d878080f838f7940000000000000000000000000000000000000001e1a0000000000000000000000000000000000000000000000000000000000000000080a0c45a61b3d1d00169c649e7326e02857b850efb96e587db4b9aad29afc80d0752a070ae1eb47ab4097dbed2f19172ae286492621b46ac737ee6c32fb18a00c94c9c", - // type 2: EIP1559 - "02f89c018080018301e24194095e7baea6a6c7c4c2dfeb977efac326af552d878080f838f7940000000000000000000000000000000000000001e1a0000000000000000000000000000000000000000000000000000000000000000080a055d72bbc3047d4b9d3e4b8099f187143202407746118204cc2e0cb0c85a68baea04f6ef08a1418c70450f53398d9f0f2d78d9e9d6b8a80cba886b67132c4a744f2", - // type 3: EIP4844 - "03f8bf018002018301e24194095e7baea6a6c7c4c2dfeb977efac326af552d878080f838f7940000000000000000000000000000000000000001e1a0000000000000000000000000000000000000000000000000000000000000000080e1a0000000000000000000000000000000000000000000000000000000000000000001a0672b8bac466e2cf1be3148c030988d40d582763ecebbc07700dfc93bb070d8a4a07c635887005b11cb58964c04669ac2857fa633aa66f662685dadfd8bcacb0f21", - ]; - let account = Account::from_secret_key(hex_literal::hex!( - "a872f6cbd25a0e04a08b1e21098017a9e6194d101d75e13111f71410c59cd57f" - )); - - for tx in txs { - let raw_tx = hex::decode(tx).unwrap(); - let tx = TransactionSigned::decode(&raw_tx).unwrap(); - - let address = tx.recover_eth_address(); - assert_eq!(address.unwrap(), account.address()); - - let unsigned = TransactionUnsigned::from_signed(tx.clone()); - let signed = account.sign_transaction(unsigned); - assert_eq!(tx, signed); + let rlp_encoded = self.transaction_legacy_unsigned.rlp_bytes(); + TransactionLegacyUnsigned::recover_eth_address(&rlp_encoded, &self.raw_signature()?) } } diff --git a/substrate/frame/revive/src/evm/api/type_id.rs b/substrate/frame/revive/src/evm/api/type_id.rs index c6e018a379b3..7434ca6e9b7f 100644 --- a/substrate/frame/revive/src/evm/api/type_id.rs +++ b/substrate/frame/revive/src/evm/api/type_id.rs @@ -17,7 +17,6 @@ //! Ethereum Typed Transaction types use super::Byte; use codec::{Decode, Encode}; -use paste::paste; use rlp::Decodable; use scale_info::TypeInfo; use serde::{Deserialize, Deserializer, Serialize, Serializer}; @@ -30,14 +29,8 @@ macro_rules! transaction_type { #[derive(Clone, Default, Debug, Eq, PartialEq)] pub struct $name; - // upper case const name - paste! { - #[doc = concat!("Transaction value for type identifier: ", $value)] - pub const [<$name:snake:upper>]: u8 = $value; - } - impl $name { - /// Convert to u8 + /// Get the value of the type pub fn value(&self) -> u8 { $value } @@ -114,12 +107,7 @@ macro_rules! transaction_type { }; } -transaction_type!(TypeLegacy, 0); -transaction_type!(TypeEip2930, 1); -transaction_type!(TypeEip1559, 2); -transaction_type!(TypeEip4844, 3); - -#[test] -fn transaction_type() { - assert_eq!(TYPE_EIP2930, 1u8); -} +transaction_type!(Type0, 0); +transaction_type!(Type1, 1); +transaction_type!(Type2, 2); +transaction_type!(Type3, 3); diff --git a/substrate/frame/revive/src/evm/runtime.rs b/substrate/frame/revive/src/evm/runtime.rs index 24b75de83569..d4c3440a3ea7 100644 --- a/substrate/frame/revive/src/evm/runtime.rs +++ b/substrate/frame/revive/src/evm/runtime.rs @@ -16,7 +16,7 @@ // limitations under the License. //! Runtime types for integrating `pallet-revive` with the EVM. use crate::{ - evm::api::{GenericTransaction, TransactionSigned}, + evm::api::{TransactionLegacySigned, TransactionLegacyUnsigned}, AccountIdOf, AddressMapper, BalanceOf, MomentOf, Weight, LOG_TARGET, }; use codec::{Decode, Encode}; @@ -92,12 +92,8 @@ impl ExtrinsicLike impl ExtrinsicMetadata for UncheckedExtrinsic { - const VERSIONS: &'static [u8] = generic::UncheckedExtrinsic::< - Address, - CallOf, - Signature, - E::Extension, - >::VERSIONS; + const VERSION: u8 = + generic::UncheckedExtrinsic::, Signature, E::Extension>::VERSION; type TransactionExtensions = E::Extension; } @@ -297,7 +293,7 @@ pub trait EthExtra { CallOf: From>, ::Hash: frame_support::traits::IsType, { - let tx = TransactionSigned::decode(&payload).map_err(|err| { + let tx = rlp::decode::(&payload).map_err(|err| { log::debug!(target: LOG_TARGET, "Failed to decode transaction: {err:?}"); InvalidTransaction::Call })?; @@ -309,33 +305,32 @@ pub trait EthExtra { let signer = ::AddressMapper::to_fallback_account_id(&signer); - let GenericTransaction { nonce, chain_id, to, value, input, gas, gas_price, .. } = - GenericTransaction::from_signed(tx, None); + let TransactionLegacyUnsigned { nonce, chain_id, to, value, input, gas, gas_price, .. } = + tx.transaction_legacy_unsigned; if chain_id.unwrap_or_default() != ::ChainId::get().into() { log::debug!(target: LOG_TARGET, "Invalid chain_id {chain_id:?}"); return Err(InvalidTransaction::Call); } - let value = crate::Pallet::::convert_evm_to_native(value.unwrap_or_default()) - .map_err(|err| { - log::debug!(target: LOG_TARGET, "Failed to convert value to native: {err:?}"); - InvalidTransaction::Call - })?; + let value = (value / U256::from(::NativeToEthRatio::get())) + .try_into() + .map_err(|_| InvalidTransaction::Call)?; - let data = input.unwrap_or_default().0; let call = if let Some(dest) = to { crate::Call::call:: { dest, value, gas_limit, storage_deposit_limit, - data, + data: input.0, } } else { - let blob = match polkavm::ProgramBlob::blob_length(&data) { - Some(blob_len) => - blob_len.try_into().ok().and_then(|blob_len| (data.split_at_checked(blob_len))), + let blob = match polkavm::ProgramBlob::blob_length(&input.0) { + Some(blob_len) => blob_len + .try_into() + .ok() + .and_then(|blob_len| (input.0.split_at_checked(blob_len))), _ => None, }; @@ -354,18 +349,17 @@ pub trait EthExtra { } }; - let nonce = nonce.unwrap_or_default().try_into().map_err(|_| InvalidTransaction::Call)?; + let nonce = nonce.try_into().map_err(|_| InvalidTransaction::Call)?; - // Fees calculated with the fixed `GAS_PRICE` - // When we dry-run the transaction, we set the gas to `Fee / GAS_PRICE` + // Fees calculated with the fixed `GAS_PRICE` that should be used to estimate the gas. let eth_fee_no_tip = U256::from(GAS_PRICE) - .saturating_mul(gas.unwrap_or_default()) + .saturating_mul(gas) .try_into() .map_err(|_| InvalidTransaction::Call)?; // Fees with the actual gas_price from the transaction. - let eth_fee: BalanceOf = U256::from(gas_price.unwrap_or_default()) - .saturating_mul(gas.unwrap_or_default()) + let eth_fee: BalanceOf = U256::from(gas_price) + .saturating_mul(gas) .try_into() .map_err(|_| InvalidTransaction::Call)?; @@ -382,8 +376,6 @@ pub trait EthExtra { .into(); log::trace!(target: LOG_TARGET, "try_into_checked_extrinsic: encoded_len: {encoded_len:?} actual_fee: {actual_fee:?} eth_fee: {eth_fee:?}"); - // The fees from the Ethereum transaction should be greater or equal to the actual fees paid - // by the account. if eth_fee < actual_fee { log::debug!(target: LOG_TARGET, "fees {eth_fee:?} too low for the extrinsic {actual_fee:?}"); return Err(InvalidTransaction::Payment.into()) @@ -418,12 +410,48 @@ mod test { }; use frame_support::{error::LookupError, traits::fungible::Mutate}; use pallet_revive_fixtures::compile_module; + use rlp::Encodable; use sp_runtime::{ traits::{Checkable, DispatchTransaction}, MultiAddress, MultiSignature, }; type AccountIdOf = ::AccountId; + /// A simple account that can sign transactions + pub struct Account(subxt_signer::eth::Keypair); + + impl Default for Account { + fn default() -> Self { + Self(subxt_signer::eth::dev::alith()) + } + } + + impl From for Account { + fn from(kp: subxt_signer::eth::Keypair) -> Self { + Self(kp) + } + } + + impl Account { + /// Get the [`AccountId`] of the account. + pub fn account_id(&self) -> AccountIdOf { + let address = self.address(); + ::AddressMapper::to_fallback_account_id(&address) + } + + /// Get the [`H160`] address of the account. + pub fn address(&self) -> H160 { + H160::from_slice(&self.0.account_id().as_ref()) + } + + /// Sign a transaction. + pub fn sign_transaction(&self, tx: TransactionLegacyUnsigned) -> TransactionLegacySigned { + let rlp_encoded = tx.rlp_bytes(); + let signature = self.0.sign(&rlp_encoded); + TransactionLegacySigned::from(tx, signature.as_ref()) + } + } + #[derive(Clone, PartialEq, Eq, Debug)] pub struct Extra; type SignedExtra = (frame_system::CheckNonce, ChargeTransactionPayment); @@ -455,265 +483,235 @@ mod test { /// A builder for creating an unchecked extrinsic, and test that the check function works. #[derive(Clone)] struct UncheckedExtrinsicBuilder { - tx: GenericTransaction, + tx: TransactionLegacyUnsigned, gas_limit: Weight, storage_deposit_limit: BalanceOf, - before_validate: Option>, } impl UncheckedExtrinsicBuilder { /// Create a new builder with default values. fn new() -> Self { Self { - tx: GenericTransaction { - from: Some(Account::default().address()), + tx: TransactionLegacyUnsigned { chain_id: Some(::ChainId::get().into()), - gas_price: Some(U256::from(GAS_PRICE)), + gas_price: U256::from(GAS_PRICE), ..Default::default() }, gas_limit: Weight::zero(), storage_deposit_limit: 0, - before_validate: None, } } fn estimate_gas(&mut self) { - let dry_run = - crate::Pallet::::bare_eth_transact(self.tx.clone(), Weight::MAX, |call| { + let dry_run = crate::Pallet::::bare_eth_transact( + Account::default().account_id(), + self.tx.to, + self.tx.value.try_into().unwrap(), + self.tx.input.clone().0, + Weight::MAX, + u64::MAX, + |call| { let call = RuntimeCall::Contracts(call); let uxt: Ex = sp_runtime::generic::UncheckedExtrinsic::new_bare(call).into(); uxt.encoded_size() as u32 - }); - - match dry_run { - Ok(dry_run) => { - log::debug!(target: LOG_TARGET, "Estimated gas: {:?}", dry_run.eth_gas); - self.tx.gas = Some(dry_run.eth_gas); - }, - Err(err) => { - log::debug!(target: LOG_TARGET, "Failed to estimate gas: {:?}", err); }, - } + crate::DebugInfo::Skip, + crate::CollectEvents::Skip, + ); + self.tx.gas = ((dry_run.fee + GAS_PRICE as u64) / (GAS_PRICE as u64)).into(); } /// Create a new builder with a call to the given address. fn call_with(dest: H160) -> Self { let mut builder = Self::new(); builder.tx.to = Some(dest); - ExtBuilder::default().build().execute_with(|| builder.estimate_gas()); + builder.estimate_gas(); builder } /// Create a new builder with an instantiate call. fn instantiate_with(code: Vec, data: Vec) -> Self { let mut builder = Self::new(); - builder.tx.input = Some(Bytes(code.into_iter().chain(data.into_iter()).collect())); - ExtBuilder::default().build().execute_with(|| builder.estimate_gas()); + builder.tx.input = Bytes(code.into_iter().chain(data.into_iter()).collect()); + builder.estimate_gas(); builder } /// Update the transaction with the given function. - fn update(mut self, f: impl FnOnce(&mut GenericTransaction) -> ()) -> Self { + fn update(mut self, f: impl FnOnce(&mut TransactionLegacyUnsigned) -> ()) -> Self { f(&mut self.tx); self } - /// Set before_validate function. - fn before_validate(mut self, f: impl Fn() + Send + Sync + 'static) -> Self { - self.before_validate = Some(std::sync::Arc::new(f)); - self - } /// Call `check` on the unchecked extrinsic, and `pre_dispatch` on the signed extension. fn check(&self) -> Result<(RuntimeCall, SignedExtra), TransactionValidityError> { - ExtBuilder::default().build().execute_with(|| { - let UncheckedExtrinsicBuilder { - tx, - gas_limit, - storage_deposit_limit, - before_validate, - } = self.clone(); - - // Fund the account. - let account = Account::default(); - let _ = ::Currency::set_balance( - &account.substrate_account(), - 100_000_000_000_000, - ); - - let payload = - account.sign_transaction(tx.try_into_unsigned().unwrap()).signed_payload(); - let call = RuntimeCall::Contracts(crate::Call::eth_transact { - payload, - gas_limit, - storage_deposit_limit, - }); - - let encoded_len = call.encoded_size(); - let uxt: Ex = generic::UncheckedExtrinsic::new_bare(call).into(); - let result: CheckedExtrinsic<_, _, _> = uxt.check(&TestContext {})?; - let (account_id, extra): (AccountId32, SignedExtra) = match result.format { - ExtrinsicFormat::Signed(signer, extra) => (signer, extra), - _ => unreachable!(), - }; + let UncheckedExtrinsicBuilder { tx, gas_limit, storage_deposit_limit } = self.clone(); + + // Fund the account. + let account = Account::default(); + let _ = ::Currency::set_balance( + &account.account_id(), + 100_000_000_000_000, + ); + + let payload = account.sign_transaction(tx).rlp_bytes().to_vec(); + let call = RuntimeCall::Contracts(crate::Call::eth_transact { + payload, + gas_limit, + storage_deposit_limit, + }); - before_validate.map(|f| f()); - extra.clone().validate_and_prepare( - RuntimeOrigin::signed(account_id), - &result.function, - &result.function.get_dispatch_info(), - encoded_len, - 0, - )?; - - Ok((result.function, extra)) - }) + let encoded_len = call.encoded_size(); + let uxt: Ex = generic::UncheckedExtrinsic::new_bare(call).into(); + let result: CheckedExtrinsic<_, _, _> = uxt.check(&TestContext {})?; + let (account_id, extra): (AccountId32, SignedExtra) = match result.format { + ExtrinsicFormat::Signed(signer, extra) => (signer, extra), + _ => unreachable!(), + }; + + extra.clone().validate_and_prepare( + RuntimeOrigin::signed(account_id), + &result.function, + &result.function.get_dispatch_info(), + encoded_len, + )?; + + Ok((result.function, extra)) } } #[test] fn check_eth_transact_call_works() { - let builder = UncheckedExtrinsicBuilder::call_with(H160::from([1u8; 20])); - assert_eq!( - builder.check().unwrap().0, - crate::Call::call:: { - dest: builder.tx.to.unwrap(), - value: builder.tx.value.unwrap_or_default().as_u64(), - gas_limit: builder.gas_limit, - storage_deposit_limit: builder.storage_deposit_limit, - data: builder.tx.input.unwrap_or_default().0 - } - .into() - ); + ExtBuilder::default().build().execute_with(|| { + let builder = UncheckedExtrinsicBuilder::call_with(H160::from([1u8; 20])); + assert_eq!( + builder.check().unwrap().0, + crate::Call::call:: { + dest: builder.tx.to.unwrap(), + value: builder.tx.value.as_u64(), + gas_limit: builder.gas_limit, + storage_deposit_limit: builder.storage_deposit_limit, + data: builder.tx.input.0 + } + .into() + ); + }); } #[test] fn check_eth_transact_instantiate_works() { - let (code, _) = compile_module("dummy").unwrap(); - let data = vec![]; - let builder = UncheckedExtrinsicBuilder::instantiate_with(code.clone(), data.clone()); - - assert_eq!( - builder.check().unwrap().0, - crate::Call::instantiate_with_code:: { - value: builder.tx.value.unwrap_or_default().as_u64(), - gas_limit: builder.gas_limit, - storage_deposit_limit: builder.storage_deposit_limit, - code, - data, - salt: None - } - .into() - ); + ExtBuilder::default().build().execute_with(|| { + let (code, _) = compile_module("dummy").unwrap(); + let data = vec![]; + let builder = UncheckedExtrinsicBuilder::instantiate_with(code.clone(), data.clone()); + + assert_eq!( + builder.check().unwrap().0, + crate::Call::instantiate_with_code:: { + value: builder.tx.value.as_u64(), + gas_limit: builder.gas_limit, + storage_deposit_limit: builder.storage_deposit_limit, + code, + data, + salt: None + } + .into() + ); + }); } #[test] fn check_eth_transact_nonce_works() { - let builder = UncheckedExtrinsicBuilder::call_with(H160::from([1u8; 20])) - .update(|tx| tx.nonce = Some(1u32.into())); - - assert_eq!( - builder.check(), - Err(TransactionValidityError::Invalid(InvalidTransaction::Future)) - ); - - let builder = - UncheckedExtrinsicBuilder::call_with(H160::from([1u8; 20])).before_validate(|| { - >::inc_account_nonce(Account::default().substrate_account()); - }); - - assert_eq!( - builder.check(), - Err(TransactionValidityError::Invalid(InvalidTransaction::Stale)) - ); + ExtBuilder::default().build().execute_with(|| { + let builder = UncheckedExtrinsicBuilder::call_with(H160::from([1u8; 20])) + .update(|tx| tx.nonce = 1u32.into()); + + assert_eq!( + builder.check(), + Err(TransactionValidityError::Invalid(InvalidTransaction::Future)) + ); + + >::inc_account_nonce(Account::default().account_id()); + + let builder = UncheckedExtrinsicBuilder::call_with(H160::from([1u8; 20])); + assert_eq!( + builder.check(), + Err(TransactionValidityError::Invalid(InvalidTransaction::Stale)) + ); + }); } #[test] fn check_eth_transact_chain_id_works() { - let builder = UncheckedExtrinsicBuilder::call_with(H160::from([1u8; 20])) - .update(|tx| tx.chain_id = Some(42.into())); - - assert_eq!( - builder.check(), - Err(TransactionValidityError::Invalid(InvalidTransaction::Call)) - ); + ExtBuilder::default().build().execute_with(|| { + let builder = UncheckedExtrinsicBuilder::call_with(H160::from([1u8; 20])) + .update(|tx| tx.chain_id = Some(42.into())); + + assert_eq!( + builder.check(), + Err(TransactionValidityError::Invalid(InvalidTransaction::Call)) + ); + }); } #[test] fn check_instantiate_data() { - let code = b"invalid code".to_vec(); - let data = vec![1]; - let builder = UncheckedExtrinsicBuilder::instantiate_with(code.clone(), data.clone()); - - // Fail because the tx input fail to get the blob length - assert_eq!( - builder.clone().update(|tx| tx.input = Some(Bytes(vec![1, 2, 3]))).check(), - Err(TransactionValidityError::Invalid(InvalidTransaction::Call)) - ); + ExtBuilder::default().build().execute_with(|| { + let code = b"invalid code".to_vec(); + let data = vec![1]; + let builder = UncheckedExtrinsicBuilder::instantiate_with(code.clone(), data.clone()); + + // Fail because the tx input fail to get the blob length + assert_eq!( + builder.clone().update(|tx| tx.input = Bytes(vec![1, 2, 3])).check(), + Err(TransactionValidityError::Invalid(InvalidTransaction::Call)) + ); + }); } #[test] fn check_transaction_fees() { - let scenarios: [(_, Box, _); 5] = [ - ( - "Eth fees too low", - Box::new(|tx| { - tx.gas_price = Some(tx.gas_price.unwrap() / 2); - }), - InvalidTransaction::Payment, - ), - ( - "Gas fees too high", - Box::new(|tx| { - tx.gas = Some(tx.gas.unwrap() * 2); - }), - InvalidTransaction::Call, - ), - ( - "Gas fees too low", - Box::new(|tx| { - tx.gas = Some(tx.gas.unwrap() * 2); - }), - InvalidTransaction::Call, - ), - ( - "Diff > 10%", - Box::new(|tx| { - tx.gas = Some(tx.gas.unwrap() * 111 / 100); - }), - InvalidTransaction::Call, - ), - ( - "Diff < 10%", - Box::new(|tx| { - tx.gas_price = Some(tx.gas_price.unwrap() * 2); - tx.gas = Some(tx.gas.unwrap() * 89 / 100); - }), - InvalidTransaction::Call, - ), - ]; - - for (msg, update_tx, err) in scenarios { - let builder = - UncheckedExtrinsicBuilder::call_with(H160::from([1u8; 20])).update(update_tx); - - assert_eq!(builder.check(), Err(TransactionValidityError::Invalid(err)), "{}", msg); - } + ExtBuilder::default().build().execute_with(|| { + let scenarios: [(_, Box, _); 5] = [ + ("Eth fees too low", Box::new(|tx| tx.gas_price /= 2), InvalidTransaction::Payment), + ("Gas fees too high", Box::new(|tx| tx.gas *= 2), InvalidTransaction::Call), + ("Gas fees too low", Box::new(|tx| tx.gas *= 2), InvalidTransaction::Call), + ( + "Diff > 10%", + Box::new(|tx| tx.gas = tx.gas * 111 / 100), + InvalidTransaction::Call, + ), + ( + "Diff < 10%", + Box::new(|tx| { + tx.gas_price *= 2; + tx.gas = tx.gas * 89 / 100 + }), + InvalidTransaction::Call, + ), + ]; + + for (msg, update_tx, err) in scenarios { + let builder = + UncheckedExtrinsicBuilder::call_with(H160::from([1u8; 20])).update(update_tx); + + assert_eq!(builder.check(), Err(TransactionValidityError::Invalid(err)), "{}", msg); + } + }); } #[test] fn check_transaction_tip() { - let (code, _) = compile_module("dummy").unwrap(); - let data = vec![]; - let builder = UncheckedExtrinsicBuilder::instantiate_with(code.clone(), data.clone()) - .update(|tx| { - tx.gas_price = Some(tx.gas_price.unwrap() * 103 / 100); - log::debug!(target: LOG_TARGET, "Gas price: {:?}", tx.gas_price); - }); - - let tx = &builder.tx; - let expected_tip = - tx.gas_price.unwrap() * tx.gas.unwrap() - U256::from(GAS_PRICE) * tx.gas.unwrap(); - let (_, extra) = builder.check().unwrap(); - assert_eq!(U256::from(extra.1.tip()), expected_tip); + ExtBuilder::default().build().execute_with(|| { + let (code, _) = compile_module("dummy").unwrap(); + let data = vec![]; + let builder = UncheckedExtrinsicBuilder::instantiate_with(code.clone(), data.clone()) + .update(|tx| tx.gas_price = tx.gas_price * 103 / 100); + + let tx = &builder.tx; + let expected_tip = tx.gas_price * tx.gas - U256::from(GAS_PRICE) * tx.gas; + let (_, extra) = builder.check().unwrap(); + assert_eq!(U256::from(extra.1.tip()), expected_tip); + }); } } diff --git a/substrate/frame/revive/src/exec.rs b/substrate/frame/revive/src/exec.rs index a6a259149768..4f90b41b0de5 100644 --- a/substrate/frame/revive/src/exec.rs +++ b/substrate/frame/revive/src/exec.rs @@ -210,13 +210,7 @@ pub trait Ext: sealing::Sealed { /// Execute code in the current frame. /// /// Returns the code size of the called contract. - fn delegate_call( - &mut self, - gas_limit: Weight, - deposit_limit: U256, - address: H160, - input_data: Vec, - ) -> Result<(), ExecError>; + fn delegate_call(&mut self, code: H256, input_data: Vec) -> Result<(), ExecError>; /// Instantiate a contract from the given code. /// @@ -298,7 +292,7 @@ pub trait Ext: sealing::Sealed { fn code_hash(&self, address: &H160) -> H256; /// Returns the code size of the contract at the given `address` or zero. - fn code_size(&self, address: &H160) -> u64; + fn code_size(&self, address: &H160) -> U256; /// Returns the code hash of the contract being executed. fn own_code_hash(&mut self) -> &H256; @@ -562,9 +556,6 @@ pub struct Stack<'a, T: Config, E> { debug_message: Option<&'a mut DebugBuffer>, /// Transient storage used to store data, which is kept for the duration of a transaction. transient_storage: TransientStorage, - /// Whether or not actual transfer of funds should be performed. - /// This is set to `true` exclusively when we simulate a call through eth_transact. - skip_transfer: bool, /// No executable is held by the struct but influences its behaviour. _phantom: PhantomData, } @@ -578,8 +569,8 @@ struct Frame { account_id: T::AccountId, /// The cached in-storage data of the contract. contract_info: CachedContract, - /// The EVM balance transferred by the caller as part of the call. - value_transferred: U256, + /// The amount of balance transferred by the caller as part of the call. + value_transferred: BalanceOf, /// Determines whether this is a call or instantiate frame. entry_point: ExportedFunction, /// The gas meter capped to the supplied gas limit. @@ -590,30 +581,18 @@ struct Frame { allows_reentry: bool, /// If `true` subsequent calls cannot modify storage. read_only: bool, - /// The delegate call info of the currently executing frame which was spawned by - /// `delegate_call`. - delegate: Option>, + /// The caller of the currently executing frame which was spawned by `delegate_call`. + delegate_caller: Option>, /// The output of the last executed call frame. last_frame_output: ExecReturnValue, } -/// This structure is used to represent the arguments in a delegate call frame in order to -/// distinguish who delegated the call and where it was delegated to. -struct DelegateInfo { - /// The caller of the contract. - pub caller: Origin, - /// The address of the contract the call was delegated to. - pub callee: H160, -} - /// Used in a delegate call frame arguments in order to override the executable and caller. struct DelegatedCall { /// The executable which is run instead of the contracts own `executable`. executable: E, /// The caller of the contract. caller: Origin, - /// The address of the contract the call was delegated to. - callee: H160, } /// Parameter passed in when creating a new `Frame`. @@ -778,9 +757,8 @@ where dest: H160, gas_meter: &'a mut GasMeter, storage_meter: &'a mut storage::meter::Meter, - value: U256, + value: BalanceOf, input_data: Vec, - skip_transfer: bool, debug_message: Option<&'a mut DebugBuffer>, ) -> ExecResult { let dest = T::AddressMapper::to_account_id(&dest); @@ -790,7 +768,6 @@ where gas_meter, storage_meter, value, - skip_transfer, debug_message, )? { stack.run(executable, input_data).map(|_| stack.first_frame.last_frame_output) @@ -814,10 +791,9 @@ where executable: E, gas_meter: &'a mut GasMeter, storage_meter: &'a mut storage::meter::Meter, - value: U256, + value: BalanceOf, input_data: Vec, salt: Option<&[u8; 32]>, - skip_transfer: bool, debug_message: Option<&'a mut DebugBuffer>, ) -> Result<(H160, ExecReturnValue), ExecError> { let (mut stack, executable) = Self::new( @@ -831,7 +807,6 @@ where gas_meter, storage_meter, value, - skip_transfer, debug_message, )? .expect(FRAME_ALWAYS_EXISTS_ON_INSTANTIATE); @@ -859,8 +834,7 @@ where origin, gas_meter, storage_meter, - value.into(), - false, + value, debug_message, ) .unwrap() @@ -869,15 +843,14 @@ where /// Create a new call stack. /// - /// Returns `None` when calling a non existent contract. This is not an error case + /// Returns `None` when calling a non existant contract. This is not an error case /// since this will result in a value transfer. fn new( args: FrameArgs, origin: Origin, gas_meter: &'a mut GasMeter, storage_meter: &'a mut storage::meter::Meter, - value: U256, - skip_transfer: bool, + value: BalanceOf, debug_message: Option<&'a mut DebugBuffer>, ) -> Result, ExecError> { origin.ensure_mapped()?; @@ -905,7 +878,6 @@ where frames: Default::default(), debug_message, transient_storage: TransientStorage::new(limits::TRANSIENT_STORAGE_BYTES), - skip_transfer, _phantom: Default::default(), }; @@ -918,7 +890,7 @@ where /// not initialized, yet. fn new_frame( frame_args: FrameArgs, - value_transferred: U256, + value_transferred: BalanceOf, gas_meter: &mut GasMeter, gas_limit: Weight, storage_meter: &mut storage::meter::GenericMeter, @@ -926,7 +898,8 @@ where read_only: bool, origin_is_caller: bool, ) -> Result, E)>, ExecError> { - let (account_id, contract_info, executable, delegate, entry_point) = match frame_args { + let (account_id, contract_info, executable, delegate_caller, entry_point) = match frame_args + { FrameArgs::Call { dest, cached_info, delegated_call } => { let contract = if let Some(contract) = cached_info { contract @@ -941,8 +914,8 @@ where }; let (executable, delegate_caller) = - if let Some(DelegatedCall { executable, caller, callee }) = delegated_call { - (executable, Some(DelegateInfo { caller, callee })) + if let Some(DelegatedCall { executable, caller }) = delegated_call { + (executable, Some(caller)) } else { (E::from_storage(contract.code_hash, gas_meter)?, None) }; @@ -958,8 +931,8 @@ where use sp_runtime::Saturating; address::create1( &deployer, - // the Nonce from the origin has been incremented pre-dispatch, so we - // need to subtract 1 to get the nonce at the time of the call. + // the Nonce from the origin has been incremented pre-dispatch, so we need + // to subtract 1 to get the nonce at the time of the call. if origin_is_caller { account_nonce.saturating_sub(1u32.into()).saturated_into() } else { @@ -983,7 +956,7 @@ where }; let frame = Frame { - delegate, + delegate_caller, value_transferred, contract_info: CachedContract::Cached(contract_info), account_id, @@ -1002,7 +975,7 @@ where fn push_frame( &mut self, frame_args: FrameArgs, - value_transferred: U256, + value_transferred: BalanceOf, gas_limit: Weight, deposit_limit: BalanceOf, read_only: bool, @@ -1052,7 +1025,7 @@ where let frame = self.top_frame(); let entry_point = frame.entry_point; let delegated_code_hash = - if frame.delegate.is_some() { Some(*executable.code_hash()) } else { None }; + if frame.delegate_caller.is_some() { Some(*executable.code_hash()) } else { None }; // The output of the caller frame will be replaced by the output of this run. // It is also not accessible from nested frames. @@ -1068,7 +1041,7 @@ where self.transient_storage.start_transaction(); - let do_transaction = || -> ExecResult { + let do_transaction = || { let caller = self.caller(); let frame = top_frame_mut!(self); @@ -1083,7 +1056,6 @@ where &frame.account_id, frame.contract_info.get(&frame.account_id), executable.code_info(), - self.skip_transfer, )?; // Needs to be incremented before calling into the code so that it is visible // in case of recursion. @@ -1107,8 +1079,11 @@ where let call_span = T::Debug::new_call_span(&contract_address, entry_point, &input_data); let output = T::Debug::intercept_call(&contract_address, entry_point, &input_data) - .unwrap_or_else(|| executable.execute(self, entry_point, input_data)) - .map_err(|e| ExecError { error: e.error, origin: ErrorOrigin::Callee })?; + .unwrap_or_else(|| { + executable + .execute(self, entry_point, input_data) + .map_err(|e| ExecError { error: e.error, origin: ErrorOrigin::Callee }) + })?; call_span.after_call(&output); @@ -1128,16 +1103,7 @@ where frame.nested_storage.enforce_limit(contract)?; } - let frame = self.top_frame_mut(); - - // If a special limit was set for the sub-call, we enforce it here. - // The sub-call will be rolled back in case the limit is exhausted. - let contract = frame.contract_info.as_contract(); - frame - .nested_storage - .enforce_subcall_limit(contract) - .map_err(|e| ExecError { error: e, origin: ErrorOrigin::Callee })?; - + let frame = self.top_frame(); let account_id = T::AddressMapper::to_address(&frame.account_id); match (entry_point, delegated_code_hash) { (ExportedFunction::Constructor, _) => { @@ -1146,7 +1112,15 @@ where return Err(Error::::TerminatedInConstructor.into()); } + // If a special limit was set for the sub-call, we enforce it here. + // This is needed because contract constructor might write to storage. + // The sub-call will be rolled back in case the limit is exhausted. + let frame = self.top_frame_mut(); + let contract = frame.contract_info.as_contract(); + frame.nested_storage.enforce_subcall_limit(contract)?; + let caller = T::AddressMapper::to_address(self.caller().account_id()?); + // Deposit an instantiation event. Contracts::::deposit_event(Event::Instantiated { deployer: caller, @@ -1160,6 +1134,12 @@ where }); }, (ExportedFunction::Call, None) => { + // If a special limit was set for the sub-call, we enforce it here. + // The sub-call will be rolled back in case the limit is exhausted. + let frame = self.top_frame_mut(); + let contract = frame.contract_info.as_contract(); + frame.nested_storage.enforce_subcall_limit(contract)?; + let caller = self.caller(); Contracts::::deposit_event(Event::Called { caller: caller.clone(), @@ -1302,9 +1282,8 @@ where origin: &Origin, from: &T::AccountId, to: &T::AccountId, - value: U256, + value: BalanceOf, ) -> ExecResult { - let value = crate::Pallet::::convert_evm_to_native(value)?; if value.is_zero() { return Ok(Default::default()); } @@ -1332,7 +1311,7 @@ where origin: &Origin, from: &Origin, to: &T::AccountId, - value: U256, + value: BalanceOf, ) -> ExecResult { // If the from address is root there is no account to transfer from, and therefore we can't // take any `value` other than 0. @@ -1379,11 +1358,7 @@ where /// Returns the *free* balance of the supplied AccountId. fn account_balance(&self, who: &T::AccountId) -> U256 { - crate::Pallet::::convert_native_to_evm(T::Currency::reducible_balance( - who, - Preservation::Preserve, - Fortitude::Polite, - )) + T::Currency::reducible_balance(who, Preservation::Preserve, Fortitude::Polite).into() } /// Certain APIs, e.g. `{set,get}_immutable_data` behave differently depending @@ -1488,20 +1463,11 @@ where result } - fn delegate_call( - &mut self, - gas_limit: Weight, - deposit_limit: U256, - address: H160, - input_data: Vec, - ) -> Result<(), ExecError> { + fn delegate_call(&mut self, code_hash: H256, input_data: Vec) -> Result<(), ExecError> { // We reset the return data now, so it is cleared out even if no new frame was executed. // This is for example the case for unknown code hashes or creating the frame fails. *self.last_frame_output_mut() = Default::default(); - let code_hash = ContractInfoOf::::get(&address) - .ok_or(Error::::CodeNotFound) - .map(|c| c.code_hash)?; let executable = E::from_storage(code_hash, self.gas_meter_mut())?; let top_frame = self.top_frame_mut(); let contract_info = top_frame.contract_info().clone(); @@ -1511,15 +1477,11 @@ where FrameArgs::Call { dest: account_id, cached_info: Some(contract_info), - delegated_call: Some(DelegatedCall { - executable, - caller: self.caller().clone(), - callee: address, - }), + delegated_call: Some(DelegatedCall { executable, caller: self.caller().clone() }), }, value, - gas_limit, - deposit_limit.try_into().map_err(|_| Error::::BalanceConversionFailed)?, + Weight::zero(), + BalanceOf::::zero(), self.is_read_only(), )?; self.run(executable.expect(FRAME_ALWAYS_EXISTS_ON_INSTANTIATE), input_data) @@ -1634,7 +1596,7 @@ where } fn caller(&self) -> Origin { - if let Some(DelegateInfo { caller, .. }) = &self.top_frame().delegate { + if let Some(caller) = &self.top_frame().delegate_caller { caller.clone() } else { self.frames() @@ -1663,7 +1625,7 @@ where }) } - fn code_size(&self, address: &H160) -> u64 { + fn code_size(&self, address: &H160) -> U256 { >::get(&address) .and_then(|contract| CodeInfoOf::::get(contract.code_hash)) .map(|info| info.code_len()) @@ -1688,13 +1650,7 @@ where return Err(Error::::InvalidImmutableAccess.into()); } - // Immutable is read from contract code being executed - let address = self - .top_frame() - .delegate - .as_ref() - .map(|d| d.callee) - .unwrap_or(T::AddressMapper::to_address(self.account_id())); + let address = T::AddressMapper::to_address(self.account_id()); Ok(>::get(address).ok_or_else(|| Error::::InvalidImmutableAccess)?) } @@ -1956,7 +1912,7 @@ mod tests { AddressMapper, Error, }; use assert_matches::assert_matches; - use frame_support::{assert_err, assert_noop, assert_ok, parameter_types}; + use frame_support::{assert_err, assert_ok, parameter_types}; use frame_system::{AccountInfo, EventRecord, Phase}; use pallet_revive_uapi::ReturnFlags; use pretty_assertions::assert_eq; @@ -2110,9 +2066,8 @@ mod tests { BOB_ADDR, &mut gas_meter, &mut storage_meter, - value.into(), + value, vec![], - false, None, ), Ok(_) @@ -2131,7 +2086,7 @@ mod tests { set_balance(&BOB, 0); let origin = Origin::from_account_id(ALICE); - MockStack::transfer(&origin, &ALICE, &BOB, 55u64.into()).unwrap(); + MockStack::transfer(&origin, &ALICE, &BOB, 55).unwrap(); let min_balance = ::Currency::minimum_balance(); assert_eq!(get_balance(&ALICE), 45 - min_balance); @@ -2152,12 +2107,7 @@ mod tests { set_balance(&ALICE, ed * 2); set_balance(&BOB, ed + value); - assert_ok!(MockStack::transfer( - &Origin::from_account_id(ALICE), - &BOB, - &CHARLIE, - value.into() - )); + assert_ok!(MockStack::transfer(&Origin::from_account_id(ALICE), &BOB, &CHARLIE, value)); assert_eq!(get_balance(&ALICE), ed); assert_eq!(get_balance(&BOB), ed); assert_eq!(get_balance(&CHARLIE), ed + value); @@ -2166,7 +2116,7 @@ mod tests { set_balance(&ALICE, ed); set_balance(&BOB, ed + value); assert_err!( - MockStack::transfer(&Origin::from_account_id(ALICE), &BOB, &DJANGO, value.into()), + MockStack::transfer(&Origin::from_account_id(ALICE), &BOB, &DJANGO, value), >::TransferFailed ); @@ -2174,7 +2124,7 @@ mod tests { set_balance(&ALICE, ed * 2); set_balance(&BOB, value); assert_err!( - MockStack::transfer(&Origin::from_account_id(ALICE), &BOB, &EVE, value.into()), + MockStack::transfer(&Origin::from_account_id(ALICE), &BOB, &EVE, value), >::TransferFailed ); // The ED transfer would work. But it should only be executed with the actual transfer @@ -2203,9 +2153,8 @@ mod tests { BOB_ADDR, &mut GasMeter::::new(GAS_LIMIT), &mut storage_meter, - value.into(), + value, vec![], - false, None, ) .unwrap(); @@ -2226,87 +2175,33 @@ mod tests { let delegate_ch = MockLoader::insert(Call, move |ctx, _| { assert_eq!(ctx.ext.value_transferred(), U256::from(value)); - let _ = - ctx.ext.delegate_call(Weight::zero(), U256::zero(), CHARLIE_ADDR, Vec::new())?; + let _ = ctx.ext.delegate_call(success_ch, Vec::new())?; Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Vec::new() }) }); ExtBuilder::default().build().execute_with(|| { place_contract(&BOB, delegate_ch); - place_contract(&CHARLIE, success_ch); set_balance(&ALICE, 100); let balance = get_balance(&BOB_FALLBACK); let origin = Origin::from_account_id(ALICE); let mut storage_meter = storage::meter::Meter::new(&origin, 0, 55).unwrap(); - assert_ok!(MockStack::run_call( + let _ = MockStack::run_call( origin, BOB_ADDR, &mut GasMeter::::new(GAS_LIMIT), &mut storage_meter, - value.into(), + value, vec![], - false, None, - )); + ) + .unwrap(); assert_eq!(get_balance(&ALICE), 100 - value); assert_eq!(get_balance(&BOB_FALLBACK), balance + value); }); } - #[test] - fn delegate_call_missing_contract() { - let missing_ch = MockLoader::insert(Call, move |_ctx, _| { - Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Vec::new() }) - }); - - let delegate_ch = MockLoader::insert(Call, move |ctx, _| { - let _ = - ctx.ext.delegate_call(Weight::zero(), U256::zero(), CHARLIE_ADDR, Vec::new())?; - Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Vec::new() }) - }); - - ExtBuilder::default().build().execute_with(|| { - place_contract(&BOB, delegate_ch); - set_balance(&ALICE, 100); - - let origin = Origin::from_account_id(ALICE); - let mut storage_meter = storage::meter::Meter::new(&origin, 0, 55).unwrap(); - - // contract code missing - assert_noop!( - MockStack::run_call( - origin.clone(), - BOB_ADDR, - &mut GasMeter::::new(GAS_LIMIT), - &mut storage_meter, - U256::zero(), - vec![], - false, - None, - ), - ExecError { - error: Error::::CodeNotFound.into(), - origin: ErrorOrigin::Callee, - } - ); - - // add missing contract code - place_contract(&CHARLIE, missing_ch); - assert_ok!(MockStack::run_call( - origin, - BOB_ADDR, - &mut GasMeter::::new(GAS_LIMIT), - &mut storage_meter, - U256::zero(), - vec![], - false, - None, - )); - }); - } - #[test] fn changes_are_reverted_on_failing_call() { // This test verifies that changes are reverted on a call which fails (or equally, returns @@ -2328,9 +2223,8 @@ mod tests { BOB_ADDR, &mut GasMeter::::new(GAS_LIMIT), &mut storage_meter, - 55u64.into(), + 55, vec![], - false, None, ) .unwrap(); @@ -2352,7 +2246,7 @@ mod tests { ExtBuilder::default().build().execute_with(|| { set_balance(&from, 0); - let result = MockStack::transfer(&origin, &from, &dest, 100u64.into()); + let result = MockStack::transfer(&origin, &from, &dest, 100); assert_eq!(result, Err(Error::::TransferFailed.into())); assert_eq!(get_balance(&from), 0); @@ -2378,9 +2272,8 @@ mod tests { BOB_ADDR, &mut GasMeter::::new(GAS_LIMIT), &mut storage_meter, - U256::zero(), + 0, vec![], - false, None, ); @@ -2408,9 +2301,8 @@ mod tests { BOB_ADDR, &mut GasMeter::::new(GAS_LIMIT), &mut storage_meter, - U256::zero(), + 0, vec![], - false, None, ); @@ -2438,9 +2330,8 @@ mod tests { BOB_ADDR, &mut GasMeter::::new(GAS_LIMIT), &mut storage_meter, - U256::zero(), + 0, vec![1, 2, 3, 4], - false, None, ); assert_matches!(result, Ok(_)); @@ -2474,10 +2365,9 @@ mod tests { executable, &mut gas_meter, &mut storage_meter, - min_balance.into(), + min_balance, vec![1, 2, 3, 4], Some(&[0; 32]), - false, None, ); assert_matches!(result, Ok(_)); @@ -2530,9 +2420,8 @@ mod tests { BOB_ADDR, &mut GasMeter::::new(GAS_LIMIT), &mut storage_meter, - value.into(), + value, vec![], - false, None, ); @@ -2595,9 +2484,8 @@ mod tests { BOB_ADDR, &mut GasMeter::::new(GAS_LIMIT), &mut storage_meter, - U256::zero(), + 0, vec![], - false, None, ); @@ -2661,9 +2549,8 @@ mod tests { BOB_ADDR, &mut GasMeter::::new(GAS_LIMIT), &mut storage_meter, - U256::zero(), + 0, vec![], - false, None, ); @@ -2694,9 +2581,8 @@ mod tests { BOB_ADDR, &mut GasMeter::::new(GAS_LIMIT), &mut storage_meter, - U256::zero(), + 0, vec![], - false, None, ); assert_matches!(result, Ok(_)); @@ -2732,9 +2618,8 @@ mod tests { BOB_ADDR, &mut GasMeter::::new(GAS_LIMIT), &mut storage_meter, - U256::zero(), + 0, vec![0], - false, None, ); assert_matches!(result, Ok(_)); @@ -2759,9 +2644,8 @@ mod tests { BOB_ADDR, &mut GasMeter::::new(GAS_LIMIT), &mut storage_meter, - U256::zero(), + 0, vec![0], - false, None, ); assert_matches!(result, Ok(_)); @@ -2804,9 +2688,8 @@ mod tests { BOB_ADDR, &mut GasMeter::::new(GAS_LIMIT), &mut storage_meter, - U256::zero(), + 0, vec![0], - false, None, ); assert_matches!(result, Ok(_)); @@ -2831,9 +2714,8 @@ mod tests { BOB_ADDR, &mut GasMeter::::new(GAS_LIMIT), &mut storage_meter, - U256::zero(), + 0, vec![0], - false, None, ); assert_matches!(result, Ok(_)); @@ -2858,9 +2740,8 @@ mod tests { BOB_ADDR, &mut GasMeter::::new(GAS_LIMIT), &mut storage_meter, - 1u64.into(), + 1, vec![0], - false, None, ); assert_matches!(result, Err(_)); @@ -2903,9 +2784,8 @@ mod tests { BOB_ADDR, &mut GasMeter::::new(GAS_LIMIT), &mut storage_meter, - U256::zero(), + 0, vec![0], - false, None, ); assert_matches!(result, Ok(_)); @@ -2949,9 +2829,8 @@ mod tests { BOB_ADDR, &mut GasMeter::::new(GAS_LIMIT), &mut storage_meter, - U256::zero(), + 0, vec![], - false, None, ); @@ -2975,10 +2854,9 @@ mod tests { executable, &mut gas_meter, &mut storage_meter, - U256::zero(), // <- zero value + 0, // <- zero value vec![], Some(&[0; 32]), - false, None, ), Err(_) @@ -3011,10 +2889,10 @@ mod tests { executable, &mut gas_meter, &mut storage_meter, - min_balance.into(), + + min_balance, vec![], Some(&[0 ;32]), - false, None, ), Ok((address, ref output)) if output.data == vec![80, 65, 83, 83] => address @@ -3066,10 +2944,10 @@ mod tests { executable, &mut gas_meter, &mut storage_meter, - min_balance.into(), + + min_balance, vec![], Some(&[0; 32]), - false, None, ), Ok((address, ref output)) if output.data == vec![70, 65, 73, 76] => address @@ -3132,9 +3010,8 @@ mod tests { BOB_ADDR, &mut GasMeter::::new(GAS_LIMIT), &mut storage_meter, - (min_balance * 10).into(), + min_balance * 10, vec![], - false, None, ), Ok(_) @@ -3213,9 +3090,8 @@ mod tests { BOB_ADDR, &mut GasMeter::::new(GAS_LIMIT), &mut storage_meter, - U256::zero(), + 0, vec![], - false, None, ), Ok(_) @@ -3256,10 +3132,9 @@ mod tests { executable, &mut gas_meter, &mut storage_meter, - 100u64.into(), + 100, vec![], Some(&[0; 32]), - false, None, ), Err(Error::::TerminatedInConstructor.into()) @@ -3322,9 +3197,8 @@ mod tests { BOB_ADDR, &mut GasMeter::::new(GAS_LIMIT), &mut storage_meter, - U256::zero(), + 0, vec![0], - false, None, ); assert_matches!(result, Ok(_)); @@ -3384,10 +3258,9 @@ mod tests { executable, &mut gas_meter, &mut storage_meter, - 10u64.into(), + 10, vec![], Some(&[0; 32]), - false, None, ); assert_matches!(result, Ok(_)); @@ -3412,7 +3285,7 @@ mod tests { true, false ), - >::TransferFailed, + >::TransferFailed ); exec_success() }); @@ -3432,9 +3305,8 @@ mod tests { BOB_ADDR, &mut gas_meter, &mut storage_meter, - U256::zero(), + 0, vec![], - false, None, ) .unwrap(); @@ -3464,9 +3336,8 @@ mod tests { BOB_ADDR, &mut gas_meter, &mut storage_meter, - U256::zero(), + 0, vec![], - false, Some(&mut debug_buffer), ) .unwrap(); @@ -3498,9 +3369,8 @@ mod tests { BOB_ADDR, &mut gas_meter, &mut storage_meter, - U256::zero(), + 0, vec![], - false, Some(&mut debug_buffer), ); assert!(result.is_err()); @@ -3532,9 +3402,8 @@ mod tests { BOB_ADDR, &mut gas_meter, &mut storage_meter, - U256::zero(), + 0, vec![], - false, Some(&mut debug_buf_after), ) .unwrap(); @@ -3566,9 +3435,8 @@ mod tests { BOB_ADDR, &mut GasMeter::::new(GAS_LIMIT), &mut storage_meter, - U256::zero(), + 0, CHARLIE_ADDR.as_bytes().to_vec(), - false, None, )); @@ -3579,9 +3447,8 @@ mod tests { BOB_ADDR, &mut GasMeter::::new(GAS_LIMIT), &mut storage_meter, - U256::zero(), + 0, BOB_ADDR.as_bytes().to_vec(), - false, None, ) .map_err(|e| e.error), @@ -3630,9 +3497,8 @@ mod tests { BOB_ADDR, &mut GasMeter::::new(GAS_LIMIT), &mut storage_meter, - U256::zero(), + 0, vec![0], - false, None, ) .map_err(|e| e.error), @@ -3665,9 +3531,8 @@ mod tests { BOB_ADDR, &mut gas_meter, &mut storage_meter, - U256::zero(), + 0, vec![], - false, None, ) .unwrap(); @@ -3750,9 +3615,8 @@ mod tests { BOB_ADDR, &mut gas_meter, &mut storage_meter, - U256::zero(), + 0, vec![], - false, None, ) .unwrap(); @@ -3876,10 +3740,9 @@ mod tests { fail_executable, &mut gas_meter, &mut storage_meter, - (min_balance * 100).into(), + min_balance * 100, vec![], Some(&[0; 32]), - false, None, ) .ok(); @@ -3890,10 +3753,9 @@ mod tests { success_executable, &mut gas_meter, &mut storage_meter, - (min_balance * 100).into(), + min_balance * 100, vec![], Some(&[0; 32]), - false, None, )); assert_eq!(System::account_nonce(&ALICE), 1); @@ -3903,10 +3765,9 @@ mod tests { succ_fail_executable, &mut gas_meter, &mut storage_meter, - (min_balance * 200).into(), + min_balance * 200, vec![], Some(&[0; 32]), - false, None, )); assert_eq!(System::account_nonce(&ALICE), 2); @@ -3916,10 +3777,9 @@ mod tests { succ_succ_executable, &mut gas_meter, &mut storage_meter, - (min_balance * 200).into(), + min_balance * 200, vec![], Some(&[0; 32]), - false, None, )); assert_eq!(System::account_nonce(&ALICE), 3); @@ -3986,9 +3846,8 @@ mod tests { BOB_ADDR, &mut gas_meter, &mut storage_meter, - U256::zero(), + 0, vec![], - false, None, )); }); @@ -4098,9 +3957,8 @@ mod tests { BOB_ADDR, &mut gas_meter, &mut storage_meter, - U256::zero(), + 0, vec![], - false, None, )); }); @@ -4138,9 +3996,8 @@ mod tests { BOB_ADDR, &mut gas_meter, &mut storage_meter, - U256::zero(), + 0, vec![], - false, None, )); }); @@ -4178,9 +4035,8 @@ mod tests { BOB_ADDR, &mut gas_meter, &mut storage_meter, - U256::zero(), + 0, vec![], - false, None, )); }); @@ -4232,9 +4088,8 @@ mod tests { BOB_ADDR, &mut gas_meter, &mut storage_meter, - U256::zero(), + 0, vec![], - false, None, )); }); @@ -4289,9 +4144,8 @@ mod tests { BOB_ADDR, &mut gas_meter, &mut storage_meter, - U256::zero(), + 0, vec![], - false, None, )); }); @@ -4365,9 +4219,8 @@ mod tests { BOB_ADDR, &mut GasMeter::::new(GAS_LIMIT), &mut storage_meter, - U256::zero(), + 0, vec![], - false, None, )); }); @@ -4436,9 +4289,8 @@ mod tests { BOB_ADDR, &mut GasMeter::::new(GAS_LIMIT), &mut storage_meter, - U256::zero(), + 0, vec![0], - false, None, ); assert_matches!(result, Ok(_)); @@ -4475,9 +4327,8 @@ mod tests { BOB_ADDR, &mut GasMeter::::new(GAS_LIMIT), &mut storage_meter, - U256::zero(), + 0, vec![], - false, None, )); }); @@ -4538,9 +4389,8 @@ mod tests { BOB_ADDR, &mut GasMeter::::new(GAS_LIMIT), &mut storage_meter, - U256::zero(), + 0, vec![0], - false, None, ); assert_matches!(result, Ok(_)); @@ -4572,9 +4422,8 @@ mod tests { BOB_ADDR, &mut GasMeter::::new(GAS_LIMIT), &mut storage_meter, - U256::zero(), + 0, vec![], - false, None, ); assert_matches!(result, Ok(_)); @@ -4656,9 +4505,8 @@ mod tests { BOB_ADDR, &mut GasMeter::::new(GAS_LIMIT), &mut storage_meter, - U256::zero(), + 0, vec![], - false, None, ) .unwrap() @@ -4725,9 +4573,8 @@ mod tests { BOB_ADDR, &mut GasMeter::::new(GAS_LIMIT), &mut storage_meter, - U256::zero(), + 0, vec![0], - false, None, ); assert_matches!(result, Ok(_)); @@ -4759,12 +4606,7 @@ mod tests { // An unknown code hash to fail the delegate_call on the first condition. *ctx.ext.last_frame_output_mut() = output_revert(); assert_eq!( - ctx.ext.delegate_call( - Weight::zero(), - U256::zero(), - H160([0xff; 20]), - Default::default() - ), + ctx.ext.delegate_call(invalid_code_hash, Default::default()), Err(Error::::CodeNotFound.into()) ); assert_eq!(ctx.ext.last_frame_output(), &Default::default()); @@ -4797,9 +4639,8 @@ mod tests { BOB_ADDR, &mut GasMeter::::new(GAS_LIMIT), &mut storage_meter, - U256::zero(), + 0, vec![], - false, None, ); assert_matches!(result, Ok(_)); @@ -4849,9 +4690,8 @@ mod tests { BOB_ADDR, &mut GasMeter::::new(GAS_LIMIT), &mut storage_meter, - U256::zero(), + 0, vec![], - false, None, ) .unwrap() @@ -4883,12 +4723,14 @@ mod tests { Ok(vec![2]), ); - // Also in a delegate call, we should witness the callee immutable data + // In a delegate call, we should witness the caller immutable data assert_eq!( - ctx.ext - .delegate_call(Weight::zero(), U256::zero(), CHARLIE_ADDR, Vec::new()) - .map(|_| ctx.ext.last_frame_output().data.clone()), - Ok(vec![2]) + ctx.ext.delegate_call(charlie_ch, Vec::new()).map(|_| ctx + .ext + .last_frame_output() + .data + .clone()), + Ok(vec![1]) ); exec_success() @@ -4919,9 +4761,8 @@ mod tests { BOB_ADDR, &mut GasMeter::::new(GAS_LIMIT), &mut storage_meter, - U256::zero(), + 0, vec![], - false, None, ) .unwrap() @@ -4966,9 +4807,8 @@ mod tests { BOB_ADDR, &mut GasMeter::::new(GAS_LIMIT), &mut storage_meter, - U256::zero(), + 0, vec![], - false, None, ) .unwrap() @@ -5011,9 +4851,8 @@ mod tests { BOB_ADDR, &mut GasMeter::::new(GAS_LIMIT), &mut storage_meter, - U256::zero(), + 0, vec![], - false, None, ) .unwrap() @@ -5067,9 +4906,8 @@ mod tests { BOB_ADDR, &mut GasMeter::::new(GAS_LIMIT), &mut storage_meter, - U256::zero(), + 0, vec![0], - false, None, ), Ok(_) diff --git a/substrate/frame/revive/src/lib.rs b/substrate/frame/revive/src/lib.rs index b9a39e7ce4d3..5038ae44afad 100644 --- a/substrate/frame/revive/src/lib.rs +++ b/substrate/frame/revive/src/lib.rs @@ -41,13 +41,13 @@ pub mod test_utils; pub mod weights; use crate::{ - evm::{runtime::GAS_PRICE, GenericTransaction}, + evm::{runtime::GAS_PRICE, TransactionLegacyUnsigned}, exec::{AccountIdOf, ExecError, Executable, Ext, Key, Origin, Stack as ExecStack}, gas::GasMeter, storage::{meter::Meter as StorageMeter, ContractInfo, DeletionQueueManager}, wasm::{CodeInfo, RuntimeCosts, WasmBlob}, }; -use alloc::{boxed::Box, format, vec}; +use alloc::boxed::Box; use codec::{Codec, Decode, Encode}; use environmental::*; use frame_support::{ @@ -59,7 +59,6 @@ use frame_support::{ pallet_prelude::DispatchClass, traits::{ fungible::{Inspect, Mutate, MutateHold}, - tokens::{Fortitude::Polite, Preservation::Preserve}, ConstU32, ConstU64, Contains, EnsureOrigin, Get, IsType, OriginTrait, Time, }, weights::{Weight, WeightMeter}, @@ -74,7 +73,7 @@ use pallet_transaction_payment::OnChargeTransaction; use scale_info::TypeInfo; use sp_core::{H160, H256, U256}; use sp_runtime::{ - traits::{BadOrigin, Bounded, Convert, Dispatchable, Saturating, Zero}, + traits::{BadOrigin, Convert, Dispatchable, Saturating}, DispatchError, }; @@ -115,6 +114,19 @@ const SENTINEL: u32 = u32::MAX; /// Example: `RUST_LOG=runtime::revive=debug my_code --dev` const LOG_TARGET: &str = "runtime::revive"; +/// This version determines which syscalls are available to contracts. +/// +/// Needs to be bumped every time a versioned syscall is added. +const API_VERSION: u16 = 0; + +#[test] +fn api_version_up_to_date() { + assert!( + API_VERSION == crate::wasm::HIGHEST_API_VERSION, + "A new versioned API has been added. The `API_VERSION` needs to be bumped." + ); +} + #[frame_support::pallet] pub mod pallet { use super::*; @@ -367,7 +379,7 @@ pub mod pallet { type RuntimeMemory = ConstU32<{ 128 * 1024 * 1024 }>; type PVFMemory = ConstU32<{ 512 * 1024 * 1024 }>; type ChainId = ConstU64<0>; - type NativeToEthRatio = ConstU32<1>; + type NativeToEthRatio = ConstU32<1_000_000>; } } @@ -549,8 +561,6 @@ pub mod pallet { ExecutionFailed, /// Failed to convert a U256 to a Balance. BalanceConversionFailed, - /// Failed to convert an EVM balance to a native balance. - DecimalPrecisionLoss, /// Immutable data can only be set during deploys and only be read during calls. /// Additionally, it is only valid to set the data once and it must not be empty. InvalidImmutableAccess, @@ -610,6 +620,14 @@ pub mod pallet { #[pallet::storage] pub(crate) type AddressSuffix = StorageMap<_, Identity, H160, [u8; 12]>; + #[pallet::extra_constants] + impl Pallet { + #[pallet::constant_name(ApiVersion)] + fn api_version() -> u16 { + API_VERSION + } + } + #[pallet::hooks] impl Hooks> for Pallet { fn on_idle(_block: BlockNumberFor, limit: Weight) -> Weight { @@ -747,7 +765,7 @@ pub mod pallet { /// /// # Parameters /// - /// * `payload`: The encoded [`crate::evm::TransactionSigned`]. + /// * `payload`: The RLP-encoded [`crate::evm::TransactionLegacySigned`]. /// * `gas_limit`: The gas limit enforced during contract execution. /// * `storage_deposit_limit`: The maximum balance that can be charged to the caller for /// storage usage. @@ -802,7 +820,7 @@ pub mod pallet { dest, value, gas_limit, - DepositLimit::Balance(storage_deposit_limit), + storage_deposit_limit, data, DebugInfo::Skip, CollectEvents::Skip, @@ -838,7 +856,7 @@ pub mod pallet { origin, value, gas_limit, - DepositLimit::Balance(storage_deposit_limit), + storage_deposit_limit, Code::Existing(code_hash), data, salt, @@ -904,7 +922,7 @@ pub mod pallet { origin, value, gas_limit, - DepositLimit::Balance(storage_deposit_limit), + storage_deposit_limit, Code::Upload(code), data, salt, @@ -1062,7 +1080,7 @@ fn dispatch_result( impl Pallet where - BalanceOf: Into + TryFrom + Bounded, + BalanceOf: Into + TryFrom, MomentOf: Into, T::Hash: frame_support::traits::IsType, { @@ -1077,7 +1095,7 @@ where dest: H160, value: BalanceOf, gas_limit: Weight, - storage_deposit_limit: DepositLimit>, + storage_deposit_limit: BalanceOf, data: Vec, debug: DebugInfo, collect_events: CollectEvents, @@ -1091,25 +1109,17 @@ where }; let try_call = || { let origin = Origin::from_runtime_origin(origin)?; - let mut storage_meter = match storage_deposit_limit { - DepositLimit::Balance(limit) => StorageMeter::new(&origin, limit, value)?, - DepositLimit::Unchecked => StorageMeter::new_unchecked(BalanceOf::::max_value()), - }; + let mut storage_meter = StorageMeter::new(&origin, storage_deposit_limit, value)?; let result = ExecStack::>::run_call( origin.clone(), dest, &mut gas_meter, &mut storage_meter, - Self::convert_native_to_evm(value), + value, data, - storage_deposit_limit.is_unchecked(), debug_message.as_mut(), )?; - storage_deposit = storage_meter - .try_into_deposit(&origin, storage_deposit_limit.is_unchecked()) - .inspect_err(|err| { - log::error!(target: LOG_TARGET, "Failed to transfer deposit: {err:?}"); - })?; + storage_deposit = storage_meter.try_into_deposit(&origin)?; Ok(result) }; let result = Self::run_guarded(try_call); @@ -1138,7 +1148,7 @@ where origin: OriginFor, value: BalanceOf, gas_limit: Weight, - storage_deposit_limit: DepositLimit>, + mut storage_deposit_limit: BalanceOf, code: Code, data: Vec, salt: Option<[u8; 32]>, @@ -1149,24 +1159,13 @@ where let mut storage_deposit = Default::default(); let mut debug_message = if debug == DebugInfo::UnsafeDebug { Some(DebugBuffer::default()) } else { None }; - - let unchecked_deposit_limit = storage_deposit_limit.is_unchecked(); - let mut storage_deposit_limit = match storage_deposit_limit { - DepositLimit::Balance(limit) => limit, - DepositLimit::Unchecked => BalanceOf::::max_value(), - }; - let try_instantiate = || { let instantiate_account = T::InstantiateOrigin::ensure_origin(origin.clone())?; let (executable, upload_deposit) = match code { Code::Upload(code) => { let upload_account = T::UploadOrigin::ensure_origin(origin)?; - let (executable, upload_deposit) = Self::try_upload_code( - upload_account, - code, - storage_deposit_limit, - unchecked_deposit_limit, - )?; + let (executable, upload_deposit) = + Self::try_upload_code(upload_account, code, storage_deposit_limit)?; storage_deposit_limit.saturating_reduce(upload_deposit); (executable, upload_deposit) }, @@ -1174,25 +1173,20 @@ where (WasmBlob::from_storage(code_hash, &mut gas_meter)?, Default::default()), }; let instantiate_origin = Origin::from_account_id(instantiate_account.clone()); - let mut storage_meter = if unchecked_deposit_limit { - StorageMeter::new_unchecked(storage_deposit_limit) - } else { - StorageMeter::new(&instantiate_origin, storage_deposit_limit, value)? - }; - + let mut storage_meter = + StorageMeter::new(&instantiate_origin, storage_deposit_limit, value)?; let result = ExecStack::>::run_instantiate( instantiate_account, executable, &mut gas_meter, &mut storage_meter, - Self::convert_native_to_evm(value), + value, data, salt.as_ref(), - unchecked_deposit_limit, debug_message.as_mut(), ); storage_deposit = storage_meter - .try_into_deposit(&instantiate_origin, unchecked_deposit_limit)? + .try_into_deposit(&instantiate_origin)? .saturating_add(&StorageDeposit::Charge(upload_deposit)); result }; @@ -1218,15 +1212,28 @@ where /// /// # Parameters /// - /// - `tx`: The Ethereum transaction to simulate. + /// - `origin`: The origin of the call. + /// - `dest`: The destination address of the call. + /// - `value`: The value to transfer. + /// - `input`: The input data. /// - `gas_limit`: The gas limit enforced during contract execution. + /// - `storage_deposit_limit`: The maximum balance that can be charged to the caller for storage + /// usage. /// - `utx_encoded_size`: A function that takes a call and returns the encoded size of the /// unchecked extrinsic. + /// - `debug`: Debugging configuration. + /// - `collect_events`: Event collection configuration. pub fn bare_eth_transact( - mut tx: GenericTransaction, + origin: T::AccountId, + dest: Option, + value: BalanceOf, + input: Vec, gas_limit: Weight, + storage_deposit_limit: BalanceOf, utx_encoded_size: impl Fn(Call) -> u32, - ) -> Result>, EthTransactError> + debug: DebugInfo, + collect_events: CollectEvents, + ) -> EthContractResult> where T: pallet_transaction_payment::Config, ::RuntimeCall: @@ -1237,205 +1244,149 @@ where T::Nonce: Into, T::Hash: frame_support::traits::IsType, { - log::debug!(target: LOG_TARGET, "bare_eth_transact: tx: {tx:?} gas_limit: {gas_limit:?}"); - - let from = tx.from.unwrap_or_default(); - let origin = T::AddressMapper::to_account_id(&from); + log::debug!(target: LOG_TARGET, "bare_eth_transact: dest: {dest:?} value: {value:?} gas_limit: {gas_limit:?} storage_deposit_limit: {storage_deposit_limit:?}"); + // Get the nonce to encode in the tx. + let nonce: T::Nonce = >::account_nonce(&origin); + + // Use a big enough gas price to ensure that the encoded size is large enough. + let max_gas_fee: BalanceOf = + (pallet_transaction_payment::Pallet::::weight_to_fee(Weight::MAX) / + GAS_PRICE.into()) + .into(); - let storage_deposit_limit = if tx.gas.is_some() { - DepositLimit::Balance(BalanceOf::::max_value()) - } else { - DepositLimit::Unchecked - }; + // A contract call. + if let Some(dest) = dest { + // Dry run the call. + let result = crate::Pallet::::bare_call( + T::RuntimeOrigin::signed(origin), + dest, + value, + gas_limit, + storage_deposit_limit, + input.clone(), + debug, + collect_events, + ); - // TODO remove once we have revisited how we encode the gas limit. - if tx.nonce.is_none() { - tx.nonce = Some(>::account_nonce(&origin).into()); - } - if tx.gas_price.is_none() { - tx.gas_price = Some(GAS_PRICE.into()); - } - if tx.chain_id.is_none() { - tx.chain_id = Some(T::ChainId::get().into()); - } + // Get the encoded size of the transaction. + let tx = TransactionLegacyUnsigned { + value: value.into().saturating_mul(T::NativeToEthRatio::get().into()), + input: input.into(), + nonce: nonce.into(), + chain_id: Some(T::ChainId::get().into()), + gas_price: GAS_PRICE.into(), + gas: max_gas_fee.into(), + to: Some(dest), + ..Default::default() + }; - // Convert the value to the native balance type. - let evm_value = tx.value.unwrap_or_default(); - let native_value = match Self::convert_evm_to_native(evm_value) { - Ok(v) => v, - Err(_) => return Err(EthTransactError::Message("Failed to convert value".into())), - }; + let eth_dispatch_call = crate::Call::::eth_transact { + payload: tx.dummy_signed_payload(), + gas_limit: result.gas_required, + storage_deposit_limit: result.storage_deposit.charge_or_zero(), + }; + let encoded_len = utx_encoded_size(eth_dispatch_call); - let input = tx.input.clone().unwrap_or_default().0; - let debug = DebugInfo::Skip; - let collect_events = CollectEvents::Skip; - - let extract_error = |err| { - if err == Error::::TransferFailed.into() || - err == Error::::StorageDepositNotEnoughFunds.into() || - err == Error::::StorageDepositLimitExhausted.into() - { - let balance = Self::evm_balance(&from); - return Err(EthTransactError::Message( - format!("insufficient funds for gas * price + value: address {from:?} have {balance} (supplied gas {})", - tx.gas.unwrap_or_default())) - ); + // Get the dispatch info of the call. + let dispatch_call: ::RuntimeCall = crate::Call::::call { + dest, + value, + gas_limit: result.gas_required, + storage_deposit_limit: result.storage_deposit.charge_or_zero(), + data: tx.input.0, } + .into(); + let dispatch_info = dispatch_call.get_dispatch_info(); - return Err(EthTransactError::Message(format!( - "Failed to instantiate contract: {err:?}" - ))); - }; - - // Dry run the call - let (mut result, dispatch_info) = match tx.to { - // A contract call. - Some(dest) => { - // Dry run the call. - let result = crate::Pallet::::bare_call( - T::RuntimeOrigin::signed(origin), - dest, - native_value, - gas_limit, - storage_deposit_limit, - input.clone(), - debug, - collect_events, - ); - - let data = match result.result { - Ok(return_value) => { - if return_value.did_revert() { - return Err(EthTransactError::Data(return_value.data)); - } - return_value.data - }, - Err(err) => { - log::debug!(target: LOG_TARGET, "Failed to execute call: {err:?}"); - return extract_error(err) - }, - }; + // Compute the fee. + let fee = pallet_transaction_payment::Pallet::::compute_fee( + encoded_len, + &dispatch_info, + 0u32.into(), + ) + .into(); - let result = EthTransactInfo { - gas_required: result.gas_required, - storage_deposit: result.storage_deposit.charge_or_zero(), - data, - eth_gas: Default::default(), - }; - // Get the dispatch info of the call. - let dispatch_call: ::RuntimeCall = crate::Call::::call { - dest, - value: native_value, - gas_limit: result.gas_required, - storage_deposit_limit: result.storage_deposit, - data: input.clone(), - } - .into(); - (result, dispatch_call.get_dispatch_info()) - }, + log::trace!(target: LOG_TARGET, "bare_eth_call: len: {encoded_len:?} fee: {fee:?}"); + EthContractResult { + gas_required: result.gas_required, + storage_deposit: result.storage_deposit.charge_or_zero(), + result: result.result.map(|v| v.data), + fee, + } // A contract deployment - None => { - // Extract code and data from the input. - let (code, data) = match polkavm::ProgramBlob::blob_length(&input) { - Some(blob_len) => blob_len - .try_into() - .ok() - .and_then(|blob_len| (input.split_at_checked(blob_len))) - .unwrap_or_else(|| (&input[..], &[][..])), - _ => { - log::debug!(target: LOG_TARGET, "Failed to extract polkavm blob length"); - (&input[..], &[][..]) - }, - }; - - // Dry run the call. - let result = crate::Pallet::::bare_instantiate( - T::RuntimeOrigin::signed(origin), - native_value, - gas_limit, - storage_deposit_limit, - Code::Upload(code.to_vec()), - data.to_vec(), - None, - debug, - collect_events, - ); - - let returned_data = match result.result { - Ok(return_value) => { - if return_value.result.did_revert() { - return Err(EthTransactError::Data(return_value.result.data)); - } - return_value.result.data - }, - Err(err) => { - log::debug!(target: LOG_TARGET, "Failed to instantiate: {err:?}"); - return extract_error(err) - }, - }; - - let result = EthTransactInfo { - gas_required: result.gas_required, - storage_deposit: result.storage_deposit.charge_or_zero(), - data: returned_data, - eth_gas: Default::default(), - }; + } else { + // Extract code and data from the input. + let (code, data) = match polkavm::ProgramBlob::blob_length(&input) { + Some(blob_len) => blob_len + .try_into() + .ok() + .and_then(|blob_len| (input.split_at_checked(blob_len))) + .unwrap_or_else(|| (&input[..], &[][..])), + _ => { + log::debug!(target: LOG_TARGET, "Failed to extract polkavm blob length"); + (&input[..], &[][..]) + }, + }; - // Get the dispatch info of the call. - let dispatch_call: ::RuntimeCall = - crate::Call::::instantiate_with_code { - value: native_value, - gas_limit: result.gas_required, - storage_deposit_limit: result.storage_deposit, - code: code.to_vec(), - data: data.to_vec(), - salt: None, - } - .into(); - (result, dispatch_call.get_dispatch_info()) - }, - }; + // Dry run the call. + let result = crate::Pallet::::bare_instantiate( + T::RuntimeOrigin::signed(origin), + value, + gas_limit, + storage_deposit_limit, + Code::Upload(code.to_vec()), + data.to_vec(), + None, + debug, + collect_events, + ); - // The transaction fees depend on the extrinsic's length, which in turn is influenced by - // the encoded length of the gas limit specified in the transaction (tx.gas). - // We iteratively compute the fee by adjusting tx.gas until the fee stabilizes. - // with a maximum of 3 iterations to avoid an infinite loop. - for _ in 0..3 { - let Ok(unsigned_tx) = tx.clone().try_into_unsigned() else { - log::debug!(target: LOG_TARGET, "Failed to convert to unsigned"); - return Err(EthTransactError::Message("Invalid transaction".into())); + // Get the encoded size of the transaction. + let tx = TransactionLegacyUnsigned { + gas: max_gas_fee.into(), + nonce: nonce.into(), + value: value.into().saturating_mul(T::NativeToEthRatio::get().into()), + input: input.clone().into(), + gas_price: GAS_PRICE.into(), + chain_id: Some(T::ChainId::get().into()), + ..Default::default() }; - let eth_dispatch_call = crate::Call::::eth_transact { - payload: unsigned_tx.dummy_signed_payload(), + payload: tx.dummy_signed_payload(), gas_limit: result.gas_required, - storage_deposit_limit: result.storage_deposit, + storage_deposit_limit: result.storage_deposit.charge_or_zero(), }; let encoded_len = utx_encoded_size(eth_dispatch_call); + + // Get the dispatch info of the call. + let dispatch_call: ::RuntimeCall = + crate::Call::::instantiate_with_code { + value, + gas_limit: result.gas_required, + storage_deposit_limit: result.storage_deposit.charge_or_zero(), + code: code.to_vec(), + data: data.to_vec(), + salt: None, + } + .into(); + let dispatch_info = dispatch_call.get_dispatch_info(); + + // Compute the fee. let fee = pallet_transaction_payment::Pallet::::compute_fee( encoded_len, &dispatch_info, 0u32.into(), ) .into(); - let eth_gas: U256 = (fee / GAS_PRICE.into()).into(); - if eth_gas == result.eth_gas { - log::trace!(target: LOG_TARGET, "bare_eth_call: encoded_len: {encoded_len:?} eth_gas: {eth_gas:?}"); - break; + log::trace!(target: LOG_TARGET, "bare_eth_call: len: {encoded_len:?} fee: {fee:?}"); + EthContractResult { + gas_required: result.gas_required, + storage_deposit: result.storage_deposit.charge_or_zero(), + result: result.result.map(|v| v.result.data), + fee, } - result.eth_gas = eth_gas; - tx.gas = Some(eth_gas.into()); - log::debug!(target: LOG_TARGET, "Adjusting Eth gas to: {eth_gas:?}"); } - - Ok(result) - } - - /// Get the balance with EVM decimals of the given `address`. - pub fn evm_balance(address: &H160) -> U256 { - let account = T::AddressMapper::to_account_id(&address); - Self::convert_native_to_evm(T::Currency::reducible_balance(&account, Preserve, Polite)) } /// A generalized version of [`Self::upload_code`]. @@ -1447,7 +1398,7 @@ where storage_deposit_limit: BalanceOf, ) -> CodeUploadResult> { let origin = T::UploadOrigin::ensure_origin(origin)?; - let (module, deposit) = Self::try_upload_code(origin, code, storage_deposit_limit, false)?; + let (module, deposit) = Self::try_upload_code(origin, code, storage_deposit_limit)?; Ok(CodeUploadReturnValue { code_hash: *module.code_hash(), deposit }) } @@ -1465,10 +1416,9 @@ where origin: T::AccountId, code: Vec, storage_deposit_limit: BalanceOf, - skip_transfer: bool, ) -> Result<(WasmBlob, BalanceOf), DispatchError> { let mut module = WasmBlob::from_code(code, origin)?; - let deposit = module.store_code(skip_transfer)?; + let deposit = module.store_code()?; ensure!(storage_deposit_limit >= deposit, >::StorageDepositLimitExhausted); Ok((module, deposit)) } @@ -1491,25 +1441,6 @@ where .and_then(|r| r) }) } - - /// Convert a native balance to EVM balance. - fn convert_native_to_evm(value: BalanceOf) -> U256 { - value.into().saturating_mul(T::NativeToEthRatio::get().into()) - } - - /// Convert an EVM balance to a native balance. - fn convert_evm_to_native(value: U256) -> Result, Error> { - if value.is_zero() { - return Ok(Zero::zero()) - } - let ratio = T::NativeToEthRatio::get().into(); - let res = value.checked_div(ratio).expect("divisor is non-zero; qed"); - if res.saturating_mul(ratio) == value { - res.try_into().map_err(|_| Error::::BalanceConversionFailed) - } else { - Err(Error::::DecimalPrecisionLoss) - } - } } impl Pallet { @@ -1537,8 +1468,8 @@ sp_api::decl_runtime_apis! { BlockNumber: Codec, EventRecord: Codec, { - /// Returns the free balance of the given `[H160]` address, using EVM decimals. - fn balance(address: H160) -> U256; + /// Returns the free balance of the given `[H160]` address. + fn balance(address: H160) -> Balance; /// Returns the nonce of the given `[H160]` address. fn nonce(address: H160) -> Nonce; @@ -1572,7 +1503,14 @@ sp_api::decl_runtime_apis! { /// Perform an Ethereum call. /// /// See [`crate::Pallet::bare_eth_transact`] - fn eth_transact(tx: GenericTransaction) -> Result, EthTransactError>; + fn eth_transact( + origin: H160, + dest: Option, + value: Balance, + input: Vec, + gas_limit: Option, + storage_deposit_limit: Option, + ) -> EthContractResult; /// Upload new code without instantiating a contract from it. /// diff --git a/substrate/frame/revive/src/limits.rs b/substrate/frame/revive/src/limits.rs index 3b55106c67d8..64e66382b9ab 100644 --- a/substrate/frame/revive/src/limits.rs +++ b/substrate/frame/revive/src/limits.rs @@ -47,7 +47,7 @@ pub const NUM_EVENT_TOPICS: u32 = 4; pub const DELEGATE_DEPENDENCIES: u32 = 32; /// Maximum size of events (including topics) and storage values. -pub const PAYLOAD_BYTES: u32 = 448; +pub const PAYLOAD_BYTES: u32 = 512; /// The maximum size of the transient storage in bytes. /// @@ -116,10 +116,7 @@ pub mod code { const BASIC_BLOCK_SIZE: u32 = 1000; /// Make sure that the various program parts are within the defined limits. - pub fn enforce( - blob: Vec, - available_syscalls: &[&[u8]], - ) -> Result { + pub fn enforce(blob: Vec) -> Result { fn round_page(n: u32) -> u64 { // performing the rounding in u64 in order to prevent overflow u64::from(n).next_multiple_of(PAGE_SIZE.into()) @@ -132,56 +129,23 @@ pub mod code { Error::::CodeRejected })?; - if !program.is_64_bit() { - log::debug!(target: LOG_TARGET, "32bit programs are not supported."); - Err(Error::::CodeRejected)?; - } - - // Need to check that no non-existent syscalls are used. This allows us to add - // new syscalls later without affecting already deployed code. - for (idx, import) in program.imports().iter().enumerate() { - // We are being defensive in case an attacker is able to somehow include - // a lot of imports. This is important because we search the array of host - // functions for every import. - if idx == available_syscalls.len() { - log::debug!(target: LOG_TARGET, "Program contains too many imports."); - Err(Error::::CodeRejected)?; - } - let Some(import) = import else { - log::debug!(target: LOG_TARGET, "Program contains malformed import."); - return Err(Error::::CodeRejected.into()); - }; - if !available_syscalls.contains(&import.as_bytes()) { - log::debug!(target: LOG_TARGET, "Program references unknown syscall: {}", import); - Err(Error::::CodeRejected)?; - } - } - // This scans the whole program but we only do it once on code deployment. // It is safe to do unchecked math in u32 because the size of the program // was already checked above. - use polkavm::program::ISA64_V1 as ISA; + use polkavm::program::ISA32_V1_NoSbrk as ISA; let mut num_instructions: u32 = 0; let mut max_basic_block_size: u32 = 0; let mut basic_block_size: u32 = 0; for inst in program.instructions(ISA) { - use polkavm::program::Instruction; num_instructions += 1; basic_block_size += 1; if inst.kind.opcode().starts_new_basic_block() { max_basic_block_size = max_basic_block_size.max(basic_block_size); basic_block_size = 0; } - match inst.kind { - Instruction::invalid => { - log::debug!(target: LOG_TARGET, "invalid instruction at offset {}", inst.offset); - return Err(>::InvalidInstruction.into()) - }, - Instruction::sbrk(_, _) => { - log::debug!(target: LOG_TARGET, "sbrk instruction is not allowed. offset {}", inst.offset); - return Err(>::InvalidInstruction.into()) - }, - _ => (), + if matches!(inst.kind, polkavm::program::Instruction::invalid) { + log::debug!(target: LOG_TARGET, "invalid instruction at offset {}", inst.offset); + return Err(>::InvalidInstruction.into()) } } diff --git a/substrate/frame/revive/src/primitives.rs b/substrate/frame/revive/src/primitives.rs index a7127f812b4b..af0100d59cbe 100644 --- a/substrate/frame/revive/src/primitives.rs +++ b/substrate/frame/revive/src/primitives.rs @@ -17,8 +17,8 @@ //! A crate that hosts a common definitions that are relevant for the pallet-revive. -use crate::{H160, U256}; -use alloc::{string::String, vec::Vec}; +use crate::H160; +use alloc::vec::Vec; use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::weights::Weight; use pallet_revive_uapi::ReturnFlags; @@ -28,30 +28,6 @@ use sp_runtime::{ DispatchError, RuntimeDebug, }; -#[derive(Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug, TypeInfo)] -pub enum DepositLimit { - /// Allows bypassing all balance transfer checks. - Unchecked, - - /// Specifies a maximum allowable balance for a deposit. - Balance(Balance), -} - -impl DepositLimit { - pub fn is_unchecked(&self) -> bool { - match self { - Self::Unchecked => true, - _ => false, - } - } -} - -impl From for DepositLimit { - fn from(value: T) -> Self { - Self::Balance(value) - } -} - /// Result type of a `bare_call` or `bare_instantiate` call as well as `ContractsApi::call` and /// `ContractsApi::instantiate`. /// @@ -108,22 +84,15 @@ pub struct ContractResult { /// The result of the execution of a `eth_transact` call. #[derive(Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug, TypeInfo)] -pub struct EthTransactInfo { +pub struct EthContractResult { + /// The fee charged for the execution. + pub fee: Balance, /// The amount of gas that was necessary to execute the transaction. pub gas_required: Weight, /// Storage deposit charged. pub storage_deposit: Balance, - /// The weight and deposit equivalent in EVM Gas. - pub eth_gas: U256, - /// The execution return value. - pub data: Vec, -} - -/// Error type of a `eth_transact` call. -#[derive(Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug, TypeInfo)] -pub enum EthTransactError { - Data(Vec), - Message(String), + /// The execution result. + pub result: Result, DispatchError>, } /// Result type of a `bare_code_upload` call. diff --git a/substrate/frame/revive/src/storage/meter.rs b/substrate/frame/revive/src/storage/meter.rs index 6eddf048be98..712010bc8257 100644 --- a/substrate/frame/revive/src/storage/meter.rs +++ b/substrate/frame/revive/src/storage/meter.rs @@ -373,36 +373,24 @@ where } } - /// Create new storage meter without checking the limit. - pub fn new_unchecked(limit: BalanceOf) -> Self { - return Self { limit, ..Default::default() } - } - /// The total amount of deposit that should change hands as result of the execution /// that this meter was passed into. This will also perform all the charges accumulated /// in the whole contract stack. /// /// This drops the root meter in order to make sure it is only called when the whole /// execution did finish. - pub fn try_into_deposit( - self, - origin: &Origin, - skip_transfer: bool, - ) -> Result, DispatchError> { - if !skip_transfer { - // Only refund or charge deposit if the origin is not root. - let origin = match origin { - Origin::Root => return Ok(Deposit::Charge(Zero::zero())), - Origin::Signed(o) => o, - }; - for charge in self.charges.iter().filter(|c| matches!(c.amount, Deposit::Refund(_))) { - E::charge(origin, &charge.contract, &charge.amount, &charge.state)?; - } - for charge in self.charges.iter().filter(|c| matches!(c.amount, Deposit::Charge(_))) { - E::charge(origin, &charge.contract, &charge.amount, &charge.state)?; - } + pub fn try_into_deposit(self, origin: &Origin) -> Result, DispatchError> { + // Only refund or charge deposit if the origin is not root. + let origin = match origin { + Origin::Root => return Ok(Deposit::Charge(Zero::zero())), + Origin::Signed(o) => o, + }; + for charge in self.charges.iter().filter(|c| matches!(c.amount, Deposit::Refund(_))) { + E::charge(origin, &charge.contract, &charge.amount, &charge.state)?; + } + for charge in self.charges.iter().filter(|c| matches!(c.amount, Deposit::Charge(_))) { + E::charge(origin, &charge.contract, &charge.amount, &charge.state)?; } - Ok(self.total_deposit) } } @@ -437,18 +425,13 @@ impl> RawMeter { contract: &T::AccountId, contract_info: &mut ContractInfo, code_info: &CodeInfo, - skip_transfer: bool, ) -> Result<(), DispatchError> { debug_assert!(matches!(self.contract_state(), ContractState::Alive)); // We need to make sure that the contract's account exists. let ed = Pallet::::min_balance(); self.total_deposit = Deposit::Charge(ed); - if skip_transfer { - T::Currency::set_balance(contract, ed); - } else { - T::Currency::transfer(origin, contract, ed, Preservation::Preserve)?; - } + T::Currency::transfer(origin, contract, ed, Preservation::Preserve)?; // A consumer is added at account creation and removed it on termination, otherwise the // runtime could remove the account. As long as a contract exists its account must exist. @@ -496,7 +479,6 @@ impl> RawMeter { } if let Deposit::Charge(amount) = total_deposit { if amount > self.limit { - log::debug!( target: LOG_TARGET, "Storage deposit limit exhausted: {:?} > {:?}", amount, self.limit); return Err(>::StorageDepositLimitExhausted.into()) } } @@ -829,10 +811,7 @@ mod tests { nested0.enforce_limit(Some(&mut nested0_info)).unwrap(); meter.absorb(nested0, &BOB, Some(&mut nested0_info)); - assert_eq!( - meter.try_into_deposit(&test_case.origin, false).unwrap(), - test_case.deposit - ); + assert_eq!(meter.try_into_deposit(&test_case.origin).unwrap(), test_case.deposit); assert_eq!(nested0_info.extra_deposit(), 112); assert_eq!(nested1_info.extra_deposit(), 110); @@ -903,10 +882,7 @@ mod tests { nested0.absorb(nested1, &CHARLIE, None); meter.absorb(nested0, &BOB, None); - assert_eq!( - meter.try_into_deposit(&test_case.origin, false).unwrap(), - test_case.deposit - ); + assert_eq!(meter.try_into_deposit(&test_case.origin).unwrap(), test_case.deposit); assert_eq!(TestExtTestValue::get(), test_case.expected) } } diff --git a/substrate/frame/revive/src/test_utils/builder.rs b/substrate/frame/revive/src/test_utils/builder.rs index 8ba5e7384070..e64f58894432 100644 --- a/substrate/frame/revive/src/test_utils/builder.rs +++ b/substrate/frame/revive/src/test_utils/builder.rs @@ -18,8 +18,7 @@ use super::{deposit_limit, GAS_LIMIT}; use crate::{ address::AddressMapper, AccountIdOf, BalanceOf, Code, CollectEvents, Config, ContractResult, - DebugInfo, DepositLimit, EventRecordOf, ExecReturnValue, InstantiateReturnValue, OriginFor, - Pallet, Weight, + DebugInfo, EventRecordOf, ExecReturnValue, InstantiateReturnValue, OriginFor, Pallet, Weight, }; use frame_support::pallet_prelude::DispatchResultWithPostInfo; use paste::paste; @@ -134,7 +133,7 @@ builder!( origin: OriginFor, value: BalanceOf, gas_limit: Weight, - storage_deposit_limit: DepositLimit>, + storage_deposit_limit: BalanceOf, code: Code, data: Vec, salt: Option<[u8; 32]>, @@ -160,7 +159,7 @@ builder!( origin, value: 0u32.into(), gas_limit: GAS_LIMIT, - storage_deposit_limit: DepositLimit::Balance(deposit_limit::()), + storage_deposit_limit: deposit_limit::(), code, data: vec![], salt: Some([0; 32]), @@ -199,7 +198,7 @@ builder!( dest: H160, value: BalanceOf, gas_limit: Weight, - storage_deposit_limit: DepositLimit>, + storage_deposit_limit: BalanceOf, data: Vec, debug: DebugInfo, collect_events: CollectEvents, @@ -217,7 +216,7 @@ builder!( dest, value: 0u32.into(), gas_limit: GAS_LIMIT, - storage_deposit_limit: DepositLimit::Balance(deposit_limit::()), + storage_deposit_limit: deposit_limit::(), data: vec![], debug: DebugInfo::UnsafeDebug, collect_events: CollectEvents::Skip, diff --git a/substrate/frame/revive/src/tests.rs b/substrate/frame/revive/src/tests.rs index 664578bf7672..a35e4d908601 100644 --- a/substrate/frame/revive/src/tests.rs +++ b/substrate/frame/revive/src/tests.rs @@ -29,7 +29,6 @@ use crate::{ ChainExtension, Environment, Ext, RegisteredChainExtension, Result as ExtensionResult, RetVal, ReturnFlags, }, - evm::{runtime::GAS_PRICE, GenericTransaction}, exec::Key, limits, primitives::CodeUploadReturnValue, @@ -39,8 +38,8 @@ use crate::{ wasm::Memory, weights::WeightInfo, AccountId32Mapper, BalanceOf, Code, CodeInfoOf, CollectEvents, Config, ContractInfo, - ContractInfoOf, DebugInfo, DeletionQueueCounter, DepositLimit, Error, EthTransactError, - HoldReason, Origin, Pallet, PristineCode, H160, + ContractInfoOf, DebugInfo, DeletionQueueCounter, Error, HoldReason, Origin, Pallet, + PristineCode, H160, }; use crate::test_utils::builder::Contract; @@ -374,7 +373,7 @@ impl RegisteredChainExtension for TempStorageExtension { parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = frame_system::limits::BlockWeights::simple_max( - Weight::from_parts(2 * WEIGHT_REF_TIME_PER_SECOND, u64::MAX), + Weight::from_parts(2u64 * WEIGHT_REF_TIME_PER_SECOND, u64::MAX), ); pub static ExistentialDeposit: u64 = 1; } @@ -382,7 +381,6 @@ parameter_types! { #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Test { type Block = Block; - type BlockWeights = BlockWeights; type AccountId = AccountId32; type Lookup = IdentityLookup; type AccountData = pallet_balances::AccountData; @@ -418,7 +416,6 @@ impl pallet_proxy::Config for Test { type CallHasher = BlakeTwo256; type AnnouncementDepositBase = ConstU64<1>; type AnnouncementDepositFactor = ConstU64<1>; - type BlockNumberProvider = frame_system::Pallet; } parameter_types! { @@ -439,7 +436,7 @@ parameter_types! { pub static DepositPerByte: BalanceOf = 1; pub const DepositPerItem: BalanceOf = 2; pub static CodeHashLockupDepositPercent: Perbill = Perbill::from_percent(0); - pub static ChainId: u64 = 448; + pub static ChainId: u64 = 384; } impl Convert> for Test { @@ -1130,7 +1127,7 @@ fn deploy_and_call_other_contract() { #[test] fn delegate_call() { let (caller_wasm, _caller_code_hash) = compile_module("delegate_call").unwrap(); - let (callee_wasm, _callee_code_hash) = compile_module("delegate_call_lib").unwrap(); + let (callee_wasm, callee_code_hash) = compile_module("delegate_call_lib").unwrap(); ExtBuilder::default().existential_deposit(500).build().execute_with(|| { let _ = ::Currency::set_balance(&ALICE, 1_000_000); @@ -1140,88 +1137,12 @@ fn delegate_call() { builder::bare_instantiate(Code::Upload(caller_wasm)) .value(300_000) .build_and_unwrap_contract(); - - // Instantiate the 'callee' - let Contract { addr: callee_addr, .. } = - builder::bare_instantiate(Code::Upload(callee_wasm)) - .value(100_000) - .build_and_unwrap_contract(); - - assert_ok!(builder::call(caller_addr) - .value(1337) - .data((callee_addr, 0u64, 0u64).encode()) - .build()); - }); -} - -#[test] -fn delegate_call_with_weight_limit() { - let (caller_wasm, _caller_code_hash) = compile_module("delegate_call").unwrap(); - let (callee_wasm, _callee_code_hash) = compile_module("delegate_call_lib").unwrap(); - - ExtBuilder::default().existential_deposit(500).build().execute_with(|| { - let _ = ::Currency::set_balance(&ALICE, 1_000_000); - - // Instantiate the 'caller' - let Contract { addr: caller_addr, .. } = - builder::bare_instantiate(Code::Upload(caller_wasm)) - .value(300_000) - .build_and_unwrap_contract(); - - // Instantiate the 'callee' - let Contract { addr: callee_addr, .. } = - builder::bare_instantiate(Code::Upload(callee_wasm)) - .value(100_000) - .build_and_unwrap_contract(); - - // fails, not enough weight - assert_err!( - builder::bare_call(caller_addr) - .value(1337) - .data((callee_addr, 100u64, 100u64).encode()) - .build() - .result, - Error::::ContractTrapped, - ); + // Only upload 'callee' code + assert_ok!(Contracts::upload_code(RuntimeOrigin::signed(ALICE), callee_wasm, 100_000,)); assert_ok!(builder::call(caller_addr) .value(1337) - .data((callee_addr, 500_000_000u64, 100_000u64).encode()) - .build()); - }); -} - -#[test] -fn delegate_call_with_deposit_limit() { - let (caller_pvm, _caller_code_hash) = compile_module("delegate_call_deposit_limit").unwrap(); - let (callee_pvm, _callee_code_hash) = compile_module("delegate_call_lib").unwrap(); - - ExtBuilder::default().existential_deposit(500).build().execute_with(|| { - let _ = ::Currency::set_balance(&ALICE, 1_000_000); - - // Instantiate the 'caller' - let Contract { addr: caller_addr, .. } = - builder::bare_instantiate(Code::Upload(caller_pvm)) - .value(300_000) - .build_and_unwrap_contract(); - - // Instantiate the 'callee' - let Contract { addr: callee_addr, .. } = - builder::bare_instantiate(Code::Upload(callee_pvm)) - .value(100_000) - .build_and_unwrap_contract(); - - // Delegate call will write 1 storage and deposit of 2 (1 item) + 32 (bytes) is required. - // Fails, not enough deposit - let ret = builder::bare_call(caller_addr) - .value(1337) - .data((callee_addr, 33u64).encode()) - .build_and_unwrap_result(); - assert_return_code!(ret, RuntimeReturnCode::OutOfResources); - - assert_ok!(builder::call(caller_addr) - .value(1337) - .data((callee_addr, 34u64).encode()) + .data(callee_code_hash.as_ref().to_vec()) .build()); }); } @@ -1248,7 +1169,7 @@ fn transfer_expendable_cannot_kill_account() { test_utils::contract_info_storage_deposit(&addr) ); - // Some or the total balance is held, so it can't be transferred. + // Some ot the total balance is held, so it can't be transferred. assert_err!( <::Currency as Mutate>::transfer( &account, @@ -1677,8 +1598,8 @@ fn instantiate_return_code() { // Contract has enough balance but the passed code hash is invalid ::Currency::set_balance(&contract.account_id, min_balance + 10_000); - let result = builder::bare_call(contract.addr).data(vec![0; 33]).build(); - assert_err!(result.result, >::CodeNotFound); + let result = builder::bare_call(contract.addr).data(vec![0; 33]).build_and_unwrap_result(); + assert_return_code!(result, RuntimeReturnCode::CodeNotFound); // Contract has enough balance but callee reverts because "1" is passed. let result = builder::bare_call(contract.addr) @@ -1875,27 +1796,6 @@ fn lazy_batch_removal_works() { }); } -#[test] -fn ref_time_left_api_works() { - let (code, _) = compile_module("ref_time_left").unwrap(); - - ExtBuilder::default().existential_deposit(100).build().execute_with(|| { - let _ = ::Currency::set_balance(&ALICE, 1_000_000); - - // Create fixture: Constructor calls ref_time_left twice and asserts it to decrease - let Contract { addr, .. } = - builder::bare_instantiate(Code::Upload(code)).build_and_unwrap_contract(); - - // Call the contract: It echoes back the ref_time returned by the ref_time_left API. - let received = builder::bare_call(addr).build_and_unwrap_result(); - assert_eq!(received.flags, ReturnFlags::empty()); - - let returned_value = u64::from_le_bytes(received.data[..8].try_into().unwrap()); - assert!(returned_value > 0); - assert!(returned_value < GAS_LIMIT.ref_time()); - }); -} - #[test] fn lazy_removal_partial_remove_works() { let (code, _hash) = compile_module("self_destruct").unwrap(); @@ -2310,7 +2210,7 @@ fn gas_estimation_for_subcalls() { // Make the same call using the estimated gas. Should succeed. let result = builder::bare_call(addr_caller) .gas_limit(result_orig.gas_required) - .storage_deposit_limit(result_orig.storage_deposit.charge_or_zero().into()) + .storage_deposit_limit(result_orig.storage_deposit.charge_or_zero()) .data(input.clone()) .build(); assert_ok!(&result.result); @@ -2318,7 +2218,7 @@ fn gas_estimation_for_subcalls() { // Check that it fails with too little ref_time let result = builder::bare_call(addr_caller) .gas_limit(result_orig.gas_required.sub_ref_time(1)) - .storage_deposit_limit(result_orig.storage_deposit.charge_or_zero().into()) + .storage_deposit_limit(result_orig.storage_deposit.charge_or_zero()) .data(input.clone()) .build(); assert_err!(result.result, error); @@ -2326,7 +2226,7 @@ fn gas_estimation_for_subcalls() { // Check that it fails with too little proof_size let result = builder::bare_call(addr_caller) .gas_limit(result_orig.gas_required.sub_proof_size(1)) - .storage_deposit_limit(result_orig.storage_deposit.charge_or_zero().into()) + .storage_deposit_limit(result_orig.storage_deposit.charge_or_zero()) .data(input.clone()) .build(); assert_err!(result.result, error); @@ -3483,11 +3383,13 @@ fn deposit_limit_in_nested_calls() { // nested call. This should fail as callee adds up 2 bytes to the storage, meaning // that the nested call should have a deposit limit of at least 2 Balance. The // sub-call should be rolled back, which is covered by the next test case. - let ret = builder::bare_call(addr_caller) - .storage_deposit_limit(DepositLimit::Balance(16)) - .data((102u32, &addr_callee, U256::from(1u64)).encode()) - .build_and_unwrap_result(); - assert_return_code!(ret, RuntimeReturnCode::OutOfResources); + assert_err_ignore_postinfo!( + builder::call(addr_caller) + .storage_deposit_limit(16) + .data((102u32, &addr_callee, U256::from(1u64)).encode()) + .build(), + >::StorageDepositLimitExhausted, + ); // Refund in the callee contract but not enough to cover the 14 Balance required by the // caller. Note that if previous sub-call wouldn't roll back, this call would pass @@ -3503,11 +3405,13 @@ fn deposit_limit_in_nested_calls() { let _ = ::Currency::set_balance(&ALICE, 511); // Require more than the sender's balance. - // Limit the sub call to little balance so it should fail in there - let ret = builder::bare_call(addr_caller) - .data((448, &addr_callee, U256::from(1u64)).encode()) - .build_and_unwrap_result(); - assert_return_code!(ret, RuntimeReturnCode::OutOfResources); + // We don't set a special limit for the nested call. + assert_err_ignore_postinfo!( + builder::call(addr_caller) + .data((512u32, &addr_callee, U256::from(1u64)).encode()) + .build(), + >::StorageDepositLimitExhausted, + ); // Same as above but allow for the additional deposit of 1 Balance in parent. // We set the special deposit limit of 1 Balance for the nested call, which isn't @@ -3579,12 +3483,14 @@ fn deposit_limit_in_nested_instantiate() { // Now we set enough limit in parent call, but an insufficient limit for child // instantiate. This should fail during the charging for the instantiation in // `RawMeter::charge_instantiate()` - let ret = builder::bare_call(addr_caller) - .origin(RuntimeOrigin::signed(BOB)) - .storage_deposit_limit(DepositLimit::Balance(callee_info_len + 2 + ED + 2)) - .data((0u32, &code_hash_callee, U256::from(callee_info_len + 2 + ED + 1)).encode()) - .build_and_unwrap_result(); - assert_return_code!(ret, RuntimeReturnCode::OutOfResources); + assert_err_ignore_postinfo!( + builder::call(addr_caller) + .origin(RuntimeOrigin::signed(BOB)) + .storage_deposit_limit(callee_info_len + 2 + ED + 2) + .data((0u32, &code_hash_callee, U256::from(callee_info_len + 2 + ED + 1)).encode()) + .build(), + >::StorageDepositLimitExhausted, + ); // The charges made on the instantiation should be rolled back. assert_eq!(::Currency::free_balance(&BOB), 1_000_000); @@ -3592,19 +3498,21 @@ fn deposit_limit_in_nested_instantiate() { // item of 1 byte to be covered by the limit, which implies 3 more Balance. // Now we set enough limit for the parent call, but insufficient limit for child // instantiate. This should fail right after the constructor execution. - let ret = builder::bare_call(addr_caller) - .origin(RuntimeOrigin::signed(BOB)) - .storage_deposit_limit(DepositLimit::Balance(callee_info_len + 2 + ED + 3)) // enough parent limit - .data((1u32, &code_hash_callee, U256::from(callee_info_len + 2 + ED + 2)).encode()) - .build_and_unwrap_result(); - assert_return_code!(ret, RuntimeReturnCode::OutOfResources); + assert_err_ignore_postinfo!( + builder::call(addr_caller) + .origin(RuntimeOrigin::signed(BOB)) + .storage_deposit_limit(callee_info_len + 2 + ED + 3) // enough parent limit + .data((1u32, &code_hash_callee, U256::from(callee_info_len + 2 + ED + 2)).encode()) + .build(), + >::StorageDepositLimitExhausted, + ); // The charges made on the instantiation should be rolled back. assert_eq!(::Currency::free_balance(&BOB), 1_000_000); // Set enough deposit limit for the child instantiate. This should succeed. let result = builder::bare_call(addr_caller) .origin(RuntimeOrigin::signed(BOB)) - .storage_deposit_limit((callee_info_len + 2 + ED + 4 + 2).into()) + .storage_deposit_limit(callee_info_len + 2 + ED + 4 + 2) .data((1u32, &code_hash_callee, U256::from(callee_info_len + 2 + ED + 3 + 2)).encode()) .build(); @@ -3758,12 +3666,6 @@ fn locking_delegate_dependency_works() { .map(|c| sp_core::H256(sp_io::hashing::keccak_256(c))) .collect(); - let hash2addr = |code_hash: &H256| { - let mut addr = H160::zero(); - addr.as_bytes_mut().copy_from_slice(&code_hash.as_ref()[..20]); - addr - }; - // Define inputs with various actions to test locking / unlocking delegate_dependencies. // See the contract for more details. let noop_input = (0u32, callee_hashes[0]); @@ -3773,19 +3675,17 @@ fn locking_delegate_dependency_works() { // Instantiate the caller contract with the given input. let instantiate = |input: &(u32, H256)| { - let (action, code_hash) = input; builder::bare_instantiate(Code::Upload(wasm_caller.clone())) .origin(RuntimeOrigin::signed(ALICE_FALLBACK)) - .data((action, hash2addr(code_hash), code_hash).encode()) + .data(input.encode()) .build() }; // Call contract with the given input. let call = |addr_caller: &H160, input: &(u32, H256)| { - let (action, code_hash) = input; builder::bare_call(*addr_caller) .origin(RuntimeOrigin::signed(ALICE_FALLBACK)) - .data((action, hash2addr(code_hash), code_hash).encode()) + .data(input.encode()) .build() }; const ED: u64 = 2000; @@ -3802,7 +3702,7 @@ fn locking_delegate_dependency_works() { // Upload all the delegated codes (they all have the same size) let mut deposit = Default::default(); for code in callee_codes.iter() { - let CodeUploadReturnValue { deposit: deposit_per_code, code_hash } = + let CodeUploadReturnValue { deposit: deposit_per_code, .. } = Contracts::bare_upload_code( RuntimeOrigin::signed(ALICE_FALLBACK), code.clone(), @@ -3810,9 +3710,6 @@ fn locking_delegate_dependency_works() { ) .unwrap(); deposit = deposit_per_code; - // Mock contract info by using first 20 bytes of code_hash as address. - let addr = hash2addr(&code_hash); - ContractInfoOf::::set(&addr, ContractInfo::new(&addr, 0, code_hash).ok()); } // Instantiate should now work. @@ -3849,11 +3746,7 @@ fn locking_delegate_dependency_works() { // Locking self should fail. assert_err!( - builder::bare_call(addr_caller) - .origin(RuntimeOrigin::signed(ALICE_FALLBACK)) - .data((1u32, &addr_caller, self_code_hash).encode()) - .build() - .result, + call(&addr_caller, &(1u32, self_code_hash)).result, Error::::CannotAddSelfAsDelegateDependency ); @@ -3891,8 +3784,8 @@ fn locking_delegate_dependency_works() { // Locking a dependency with a storage limit too low should fail. assert_err!( builder::bare_call(addr_caller) - .storage_deposit_limit((dependency_deposit - 1).into()) - .data((1u32, hash2addr(&callee_hashes[0]), callee_hashes[0]).encode()) + .storage_deposit_limit(dependency_deposit - 1) + .data(lock_delegate_dependency_input.encode()) .build() .result, Error::::StorageDepositLimitExhausted @@ -3902,7 +3795,7 @@ fn locking_delegate_dependency_works() { assert_ok!(Contracts::remove_code(RuntimeOrigin::signed(ALICE_FALLBACK), callee_hashes[0])); // Calling should fail since the delegated contract is not on chain anymore. - assert_err!(call(&addr_caller, &noop_input).result, Error::::CodeNotFound); + assert_err!(call(&addr_caller, &noop_input).result, Error::::ContractTrapped); // Add the dependency back. Contracts::upload_code( @@ -4364,80 +4257,6 @@ fn create1_with_value_works() { }); } -#[test] -fn gas_price_api_works() { - let (code, _) = compile_module("gas_price").unwrap(); - - ExtBuilder::default().existential_deposit(100).build().execute_with(|| { - let _ = ::Currency::set_balance(&ALICE, 1_000_000); - - // Create fixture: Constructor does nothing - let Contract { addr, .. } = - builder::bare_instantiate(Code::Upload(code)).build_and_unwrap_contract(); - - // Call the contract: It echoes back the value returned by the gas price API. - let received = builder::bare_call(addr).build_and_unwrap_result(); - assert_eq!(received.flags, ReturnFlags::empty()); - assert_eq!(u64::from_le_bytes(received.data[..].try_into().unwrap()), u64::from(GAS_PRICE)); - }); -} - -#[test] -fn base_fee_api_works() { - let (code, _) = compile_module("base_fee").unwrap(); - - ExtBuilder::default().existential_deposit(100).build().execute_with(|| { - let _ = ::Currency::set_balance(&ALICE, 1_000_000); - - // Create fixture: Constructor does nothing - let Contract { addr, .. } = - builder::bare_instantiate(Code::Upload(code)).build_and_unwrap_contract(); - - // Call the contract: It echoes back the value returned by the base fee API. - let received = builder::bare_call(addr).build_and_unwrap_result(); - assert_eq!(received.flags, ReturnFlags::empty()); - assert_eq!(U256::from_little_endian(received.data[..].try_into().unwrap()), U256::zero()); - }); -} - -#[test] -fn call_data_size_api_works() { - let (code, _) = compile_module("call_data_size").unwrap(); - - ExtBuilder::default().existential_deposit(100).build().execute_with(|| { - let _ = ::Currency::set_balance(&ALICE, 1_000_000); - - // Create fixture: Constructor does nothing - let Contract { addr, .. } = - builder::bare_instantiate(Code::Upload(code)).build_and_unwrap_contract(); - - // Call the contract: It echoes back the value returned by the call data size API. - let received = builder::bare_call(addr).build_and_unwrap_result(); - assert_eq!(received.flags, ReturnFlags::empty()); - assert_eq!(u64::from_le_bytes(received.data.try_into().unwrap()), 0); - - let received = builder::bare_call(addr).data(vec![1; 256]).build_and_unwrap_result(); - assert_eq!(received.flags, ReturnFlags::empty()); - assert_eq!(u64::from_le_bytes(received.data.try_into().unwrap()), 256); - }); -} - -#[test] -fn call_data_copy_api_works() { - let (code, _) = compile_module("call_data_copy").unwrap(); - - ExtBuilder::default().existential_deposit(100).build().execute_with(|| { - let _ = ::Currency::set_balance(&ALICE, 1_000_000); - - // Create fixture: Constructor does nothing - let Contract { addr, .. } = - builder::bare_instantiate(Code::Upload(code)).build_and_unwrap_contract(); - - // Call fixture: Expects an input of [255; 32] and executes tests. - assert_ok!(builder::call(addr).data(vec![255; 32]).build()); - }); -} - #[test] fn static_data_limit_is_enforced() { let (oom_rw_trailing, _) = compile_module("oom_rw_trailing").unwrap(); @@ -4503,48 +4322,6 @@ fn chain_id_works() { }); } -#[test] -fn call_data_load_api_works() { - let (code, _) = compile_module("call_data_load").unwrap(); - - ExtBuilder::default().existential_deposit(100).build().execute_with(|| { - let _ = ::Currency::set_balance(&ALICE, 1_000_000); - - // Create fixture: Constructor does nothing - let Contract { addr, .. } = - builder::bare_instantiate(Code::Upload(code)).build_and_unwrap_contract(); - - // Call the contract: It reads a byte for the offset and then returns - // what call data load returned using this byte as the offset. - let input = (3u8, U256::max_value(), U256::max_value()).encode(); - let received = builder::bare_call(addr).data(input).build().result.unwrap(); - assert_eq!(received.flags, ReturnFlags::empty()); - assert_eq!(U256::from_little_endian(&received.data), U256::max_value()); - - // Edge case - let input = (2u8, U256::from(255).to_big_endian()).encode(); - let received = builder::bare_call(addr).data(input).build().result.unwrap(); - assert_eq!(received.flags, ReturnFlags::empty()); - assert_eq!(U256::from_little_endian(&received.data), U256::from(65280)); - - // Edge case - let received = builder::bare_call(addr).data(vec![1]).build().result.unwrap(); - assert_eq!(received.flags, ReturnFlags::empty()); - assert_eq!(U256::from_little_endian(&received.data), U256::zero()); - - // OOB case - let input = (42u8).encode(); - let received = builder::bare_call(addr).data(input).build().result.unwrap(); - assert_eq!(received.flags, ReturnFlags::empty()); - assert_eq!(U256::from_little_endian(&received.data), U256::zero()); - - // No calldata should return the zero value - let received = builder::bare_call(addr).build().result.unwrap(); - assert_eq!(received.flags, ReturnFlags::empty()); - assert_eq!(U256::from_little_endian(&received.data), U256::zero()); - }); -} - #[test] fn return_data_api_works() { let (code_return_data_api, _) = compile_module("return_data_api").unwrap(); @@ -4794,153 +4571,3 @@ fn mapped_address_works() { assert_eq!(::Currency::total_balance(&EVE), 1_100); }); } - -#[test] -fn skip_transfer_works() { - let (code_caller, _) = compile_module("call").unwrap(); - let (code, _) = compile_module("set_empty_storage").unwrap(); - - ExtBuilder::default().existential_deposit(100).build().execute_with(|| { - ::Currency::set_balance(&ALICE, 1_000_000); - ::Currency::set_balance(&BOB, 0); - - // fails to instantiate when gas is specified. - assert_err!( - Pallet::::bare_eth_transact( - GenericTransaction { - from: Some(BOB_ADDR), - input: Some(code.clone().into()), - gas: Some(1u32.into()), - ..Default::default() - }, - Weight::MAX, - |_| 0u32 - ), - EthTransactError::Message(format!( - "insufficient funds for gas * price + value: address {BOB_ADDR:?} have 0 (supplied gas 1)" - )) - ); - - // works when no gas is specified. - assert_ok!(Pallet::::bare_eth_transact( - GenericTransaction { - from: Some(ALICE_ADDR), - input: Some(code.clone().into()), - ..Default::default() - }, - Weight::MAX, - |_| 0u32 - )); - - let Contract { addr, .. } = - builder::bare_instantiate(Code::Upload(code)).build_and_unwrap_contract(); - - let Contract { addr: caller_addr, .. } = - builder::bare_instantiate(Code::Upload(code_caller)).build_and_unwrap_contract(); - - // fails to call when gas is specified. - assert_err!( - Pallet::::bare_eth_transact( - GenericTransaction { - from: Some(BOB_ADDR), - to: Some(addr), - gas: Some(1u32.into()), - ..Default::default() - }, - Weight::MAX, - |_| 0u32 - ), - EthTransactError::Message(format!( - "insufficient funds for gas * price + value: address {BOB_ADDR:?} have 0 (supplied gas 1)" - )) - ); - - // fails when calling from a contract when gas is specified. - assert_err!( - Pallet::::bare_eth_transact( - GenericTransaction { - from: Some(BOB_ADDR), - to: Some(caller_addr), - input: Some((0u32, &addr).encode().into()), - gas: Some(1u32.into()), - ..Default::default() - }, - Weight::MAX, - |_| 0u32 - ), - EthTransactError::Message(format!("insufficient funds for gas * price + value: address {BOB_ADDR:?} have 0 (supplied gas 1)")) - ); - - // works when no gas is specified. - assert_ok!(Pallet::::bare_eth_transact( - GenericTransaction { from: Some(BOB_ADDR), to: Some(addr), ..Default::default() }, - Weight::MAX, - |_| 0u32 - )); - - // works when calling from a contract when no gas is specified. - assert_ok!(Pallet::::bare_eth_transact( - GenericTransaction { - from: Some(BOB_ADDR), - to: Some(caller_addr), - input: Some((0u32, &addr).encode().into()), - ..Default::default() - }, - Weight::MAX, - |_| 0u32 - )); - }); -} - -#[test] -fn gas_limit_api_works() { - let (code, _) = compile_module("gas_limit").unwrap(); - - ExtBuilder::default().existential_deposit(100).build().execute_with(|| { - let _ = ::Currency::set_balance(&ALICE, 1_000_000); - - // Create fixture: Constructor does nothing - let Contract { addr, .. } = - builder::bare_instantiate(Code::Upload(code)).build_and_unwrap_contract(); - - // Call the contract: It echoes back the value returned by the gas limit API. - let received = builder::bare_call(addr).build_and_unwrap_result(); - assert_eq!(received.flags, ReturnFlags::empty()); - assert_eq!( - u64::from_le_bytes(received.data[..].try_into().unwrap()), - ::BlockWeights::get().max_block.ref_time() - ); - }); -} - -#[test] -fn unknown_syscall_rejected() { - let (code, _) = compile_module("unknown_syscall").unwrap(); - - ExtBuilder::default().existential_deposit(100).build().execute_with(|| { - ::Currency::set_balance(&ALICE, 1_000_000); - - assert_err!( - builder::bare_instantiate(Code::Upload(code)).build().result, - >::CodeRejected, - ) - }); -} - -#[test] -fn unstable_interface_rejected() { - let (code, _) = compile_module("unstable_interface").unwrap(); - - ExtBuilder::default().existential_deposit(100).build().execute_with(|| { - ::Currency::set_balance(&ALICE, 1_000_000); - - Test::set_unstable_interface(false); - assert_err!( - builder::bare_instantiate(Code::Upload(code.clone())).build().result, - >::CodeRejected, - ); - - Test::set_unstable_interface(true); - assert_ok!(builder::bare_instantiate(Code::Upload(code)).build().result); - }); -} diff --git a/substrate/frame/revive/src/tests/test_debug.rs b/substrate/frame/revive/src/tests/test_debug.rs index c9e19e52ace1..7c4fbba71f65 100644 --- a/substrate/frame/revive/src/tests/test_debug.rs +++ b/substrate/frame/revive/src/tests/test_debug.rs @@ -21,7 +21,6 @@ use crate::{ debug::{CallInterceptor, CallSpan, ExecResult, ExportedFunction, Tracing}, primitives::ExecReturnValue, test_utils::*, - DepositLimit, }; use frame_support::traits::Currency; use pretty_assertions::assert_eq; @@ -115,7 +114,7 @@ fn debugging_works() { RuntimeOrigin::signed(ALICE), 0, GAS_LIMIT, - DepositLimit::Balance(deposit_limit::()), + deposit_limit::(), Code::Upload(wasm), vec![], Some([0u8; 32]), @@ -199,7 +198,7 @@ fn call_interception_works() { RuntimeOrigin::signed(ALICE), 0, GAS_LIMIT, - deposit_limit::().into(), + deposit_limit::(), Code::Upload(wasm), vec![], // some salt to ensure that the address of this contract is unique among all tests diff --git a/substrate/frame/revive/src/wasm/mod.rs b/substrate/frame/revive/src/wasm/mod.rs index b24de61314f9..f10c4f5fddf8 100644 --- a/substrate/frame/revive/src/wasm/mod.rs +++ b/substrate/frame/revive/src/wasm/mod.rs @@ -23,10 +23,13 @@ mod runtime; #[cfg(doc)] pub use crate::wasm::runtime::SyscallDoc; +#[cfg(test)] +pub use runtime::HIGHEST_API_VERSION; + #[cfg(feature = "runtime-benchmarks")] pub use crate::wasm::runtime::{ReturnData, TrapReason}; -pub use crate::wasm::runtime::{Memory, Runtime, RuntimeCosts}; +pub use crate::wasm::runtime::{ApiVersion, Memory, Runtime, RuntimeCosts}; use crate::{ address::AddressMapper, @@ -36,7 +39,7 @@ use crate::{ storage::meter::Diff, weights::WeightInfo, AccountIdOf, BadOrigin, BalanceOf, CodeInfoOf, CodeVec, Config, Error, Event, ExecError, - HoldReason, Pallet, PristineCode, Weight, LOG_TARGET, + HoldReason, Pallet, PristineCode, Weight, API_VERSION, LOG_TARGET, }; use alloc::vec::Vec; use codec::{Decode, Encode, MaxEncodedLen}; @@ -84,6 +87,11 @@ pub struct CodeInfo { refcount: u64, /// Length of the code in bytes. code_len: u32, + /// The API version that this contract operates under. + /// + /// This determines which host functions are available to the contract. This + /// prevents that new host functions become available to already deployed contracts. + api_version: u16, /// The behaviour version that this contract operates under. /// /// Whenever any observeable change (with the exception of weights) are made we need @@ -91,7 +99,7 @@ pub struct CodeInfo { /// exposing the old behaviour depending on the set behaviour version of the contract. /// /// As of right now this is a reserved field that is always set to 0. - behaviour_version: u32, + behaviour_version: u16, } impl ExportedFunction { @@ -122,10 +130,9 @@ where { /// We only check for size and nothing else when the code is uploaded. pub fn from_code(code: Vec, owner: AccountIdOf) -> Result { - // We do validation only when new code is deployed. This allows us to increase + // We do size checks when new code is deployed. This allows us to increase // the limits later without affecting already deployed code. - let available_syscalls = runtime::list_syscalls(T::UnsafeUnstableInterface::get()); - let code = limits::code::enforce::(code, available_syscalls)?; + let code = limits::code::enforce::(code)?; let code_len = code.len() as u32; let bytes_added = code_len.saturating_add(>::max_encoded_len() as u32); @@ -137,6 +144,7 @@ where deposit, refcount: 0, code_len, + api_version: API_VERSION, behaviour_version: Default::default(), }; let code_hash = H256(sp_io::hashing::keccak_256(&code)); @@ -175,7 +183,7 @@ where } /// Puts the module blob into storage, and returns the deposit collected for the storage. - pub fn store_code(&mut self, skip_transfer: bool) -> Result, Error> { + pub fn store_code(&mut self) -> Result, Error> { let code_hash = *self.code_hash(); >::mutate(code_hash, |stored_code_info| { match stored_code_info { @@ -187,16 +195,15 @@ where // the `owner` is always the origin of the current transaction. None => { let deposit = self.code_info.deposit; - - if !skip_transfer { - T::Currency::hold( + T::Currency::hold( &HoldReason::CodeUploadDepositReserve.into(), &self.code_info.owner, deposit, - ) .map_err(|err| { log::debug!(target: LOG_TARGET, "failed to store code for owner: {:?}: {err:?}", self.code_info.owner); + ) + .map_err(|err| { + log::debug!(target: LOG_TARGET, "failed to store code for owner: {:?}: {err:?}", self.code_info.owner); >::StorageDepositNotEnoughFunds })?; - } self.code_info.refcount = 0; >::insert(code_hash, &self.code); @@ -222,6 +229,7 @@ impl CodeInfo { deposit: Default::default(), refcount: 0, code_len: 0, + api_version: API_VERSION, behaviour_version: Default::default(), } } @@ -242,7 +250,7 @@ impl CodeInfo { } /// Returns the code length. - pub fn code_len(&self) -> u64 { + pub fn code_len(&self) -> U256 { self.code_len.into() } } @@ -251,6 +259,7 @@ pub struct PreparedCall<'a, E: Ext> { module: polkavm::Module, instance: polkavm::RawInstance, runtime: Runtime<'a, E, polkavm::RawInstance>, + api_version: ApiVersion, } impl<'a, E: Ext> PreparedCall<'a, E> @@ -261,9 +270,12 @@ where pub fn call(mut self) -> ExecResult { let exec_result = loop { let interrupt = self.instance.run(); - if let Some(exec_result) = - self.runtime.handle_interrupt(interrupt, &self.module, &mut self.instance) - { + if let Some(exec_result) = self.runtime.handle_interrupt( + interrupt, + &self.module, + &mut self.instance, + self.api_version, + ) { break exec_result } }; @@ -277,18 +289,12 @@ impl WasmBlob { self, mut runtime: Runtime, entry_point: ExportedFunction, + api_version: ApiVersion, ) -> Result, ExecError> { let mut config = polkavm::Config::default(); config.set_backend(Some(polkavm::BackendKind::Interpreter)); - config.set_cache_enabled(false); - #[cfg(feature = "std")] - if std::env::var_os("REVIVE_USE_COMPILER").is_some() { - config.set_backend(Some(polkavm::BackendKind::Compiler)); - } - let engine = polkavm::Engine::new(&config).expect( - "on-chain (no_std) use of interpreter is hard coded. - interpreter is available on all plattforms; qed", - ); + let engine = + polkavm::Engine::new(&config).expect("interpreter is available on all plattforms; qed"); let mut module_config = polkavm::ModuleConfig::new(); module_config.set_page_size(limits::PAGE_SIZE); @@ -300,15 +306,6 @@ impl WasmBlob { Error::::CodeRejected })?; - // This is checked at deploy time but we also want to reject pre-existing - // 32bit programs. - // TODO: Remove when we reset the test net. - // https://github.com/paritytech/contract-issues/issues/11 - if !module.is_64_bit() { - log::debug!(target: LOG_TARGET, "32bit programs are not supported."); - Err(Error::::CodeRejected)?; - } - let entry_program_counter = module .exports() .find(|export| export.symbol().as_bytes() == entry_point.identifier().as_bytes()) @@ -330,7 +327,7 @@ impl WasmBlob { instance.set_gas(gas_limit_polkavm); instance.prepare_call_untyped(entry_program_counter, &[]); - Ok(PreparedCall { module, instance, runtime }) + Ok(PreparedCall { module, instance, runtime, api_version }) } } @@ -351,7 +348,13 @@ where function: ExportedFunction, input_data: Vec, ) -> ExecResult { - let prepared_call = self.prepare_call(Runtime::new(ext, input_data), function)?; + let api_version = if ::UnsafeUnstableInterface::get() { + ApiVersion::UnsafeNewest + } else { + ApiVersion::Versioned(self.code_info.api_version) + }; + let prepared_call = + self.prepare_call(Runtime::new(ext, input_data), function, api_version)?; prepared_call.call() } diff --git a/substrate/frame/revive/src/wasm/runtime.rs b/substrate/frame/revive/src/wasm/runtime.rs index 52f79f2eb55a..8310fe701013 100644 --- a/substrate/frame/revive/src/wasm/runtime.rs +++ b/substrate/frame/revive/src/wasm/runtime.rs @@ -19,7 +19,6 @@ use crate::{ address::AddressMapper, - evm::runtime::GAS_PRICE, exec::{ExecError, ExecResult, Ext, Key}, gas::{ChargedAmount, Token}, limits, @@ -28,7 +27,7 @@ use crate::{ Config, Error, LOG_TARGET, SENTINEL, }; use alloc::{boxed::Box, vec, vec::Vec}; -use codec::{Decode, DecodeLimit, Encode}; +use codec::{Decode, DecodeLimit, Encode, MaxEncodedLen}; use core::{fmt, marker::PhantomData, mem}; use frame_support::{ dispatch::DispatchInfo, ensure, pallet_prelude::DispatchResultWithPostInfo, parameter_types, @@ -45,6 +44,14 @@ type CallOf = ::RuntimeCall; /// The maximum nesting depth a contract can use when encoding types. const MAX_DECODE_NESTING: u32 = 256; +#[derive(Clone, Copy)] +pub enum ApiVersion { + /// Expose all APIs even unversioned ones. Only used for testing and benchmarking. + UnsafeNewest, + /// Only expose API's up to and including the specified version. + Versioned(u16), +} + /// Abstraction over the memory access within syscalls. /// /// The reason for this abstraction is that we run syscalls on the host machine when @@ -66,13 +73,6 @@ pub trait Memory { /// - designated area is not within the bounds of the sandbox memory. fn write(&mut self, ptr: u32, buf: &[u8]) -> Result<(), DispatchError>; - /// Zero the designated location in the sandbox memory. - /// - /// Returns `Err` if one of the following conditions occurs: - /// - /// - designated area is not within the bounds of the sandbox memory. - fn zero(&mut self, ptr: u32, len: u32) -> Result<(), DispatchError>; - /// Read designated chunk from the sandbox memory. /// /// Returns `Err` if one of the following conditions occurs: @@ -126,13 +126,34 @@ pub trait Memory { /// /// # Note /// - /// Make sure to charge a proportional amount of weight if `len` is not fixed. + /// There must be an extra benchmark for determining the influence of `len` with + /// regard to the overall weight. fn read_as_unbounded(&self, ptr: u32, len: u32) -> Result { let buf = self.read(ptr, len)?; let decoded = D::decode_all_with_depth_limit(MAX_DECODE_NESTING, &mut buf.as_ref()) .map_err(|_| DispatchError::from(Error::::DecodingFailed))?; Ok(decoded) } + + /// Reads and decodes a type with a size fixed at compile time from contract memory. + /// + /// # Only use on fixed size types + /// + /// Don't use this for types where the encoded size is not fixed but merely bounded. Otherwise + /// this implementation will out of bound access the buffer declared by the guest. Some examples + /// of those bounded but not fixed types: Enums with data, `BoundedVec` or any compact encoded + /// integer. + /// + /// # Note + /// + /// The weight of reading a fixed value is included in the overall weight of any + /// contract callable function. + fn read_as(&self, ptr: u32) -> Result { + let buf = self.read(ptr, D::max_encoded_len() as u32)?; + let decoded = D::decode_with_depth_limit(MAX_DECODE_NESTING, &mut buf.as_ref()) + .map_err(|_| DispatchError::from(Error::::DecodingFailed))?; + Ok(decoded) + } } /// Allows syscalls access to the PolkaVM instance they are executing in. @@ -143,8 +164,8 @@ pub trait Memory { pub trait PolkaVmInstance: Memory { fn gas(&self) -> polkavm::Gas; fn set_gas(&mut self, gas: polkavm::Gas); - fn read_input_regs(&self) -> (u64, u64, u64, u64, u64, u64); - fn write_output(&mut self, output: u64); + fn read_input_regs(&self) -> (u32, u32, u32, u32, u32, u32); + fn write_output(&mut self, output: u32); } // Memory implementation used in benchmarking where guest memory is mapped into the host. @@ -170,10 +191,6 @@ impl Memory for [u8] { bound_checked.copy_from_slice(buf); Ok(()) } - - fn zero(&mut self, ptr: u32, len: u32) -> Result<(), DispatchError> { - <[u8] as Memory>::write(self, ptr, &vec![0; len as usize]) - } } impl Memory for polkavm::RawInstance { @@ -186,10 +203,6 @@ impl Memory for polkavm::RawInstance { fn write(&mut self, ptr: u32, buf: &[u8]) -> Result<(), DispatchError> { self.write_memory(ptr, buf).map_err(|_| Error::::OutOfBounds.into()) } - - fn zero(&mut self, ptr: u32, len: u32) -> Result<(), DispatchError> { - self.zero_memory(ptr, len).map_err(|_| Error::::OutOfBounds.into()) - } } impl PolkaVmInstance for polkavm::RawInstance { @@ -201,7 +214,7 @@ impl PolkaVmInstance for polkavm::RawInstance { self.set_gas(gas) } - fn read_input_regs(&self) -> (u64, u64, u64, u64, u64, u64) { + fn read_input_regs(&self) -> (u32, u32, u32, u32, u32, u32) { ( self.reg(polkavm::Reg::A0), self.reg(polkavm::Reg::A1), @@ -212,7 +225,7 @@ impl PolkaVmInstance for polkavm::RawInstance { ) } - fn write_output(&mut self, output: u64) { + fn write_output(&mut self, output: u32) { self.set_reg(polkavm::Reg::A0, output); } } @@ -283,16 +296,8 @@ pub enum RuntimeCosts { CopyFromContract(u32), /// Weight charged for copying data to the sandbox. CopyToContract(u32), - /// Weight of calling `seal_call_data_load``. - CallDataLoad, - /// Weight of calling `seal_call_data_copy`. - CallDataCopy(u32), /// Weight of calling `seal_caller`. Caller, - /// Weight of calling `seal_call_data_size`. - CallDataSize, - /// Weight of calling `seal_return_data_size`. - ReturnDataSize, /// Weight of calling `seal_origin`. Origin, /// Weight of calling `seal_is_contract`. @@ -309,8 +314,6 @@ pub enum RuntimeCosts { CallerIsRoot, /// Weight of calling `seal_address`. Address, - /// Weight of calling `seal_ref_time_left`. - RefTimeLeft, /// Weight of calling `seal_weight_left`. WeightLeft, /// Weight of calling `seal_balance`. @@ -325,14 +328,8 @@ pub enum RuntimeCosts { BlockNumber, /// Weight of calling `seal_block_hash`. BlockHash, - /// Weight of calling `seal_gas_price`. - GasPrice, - /// Weight of calling `seal_base_fee`. - BaseFee, /// Weight of calling `seal_now`. Now, - /// Weight of calling `seal_gas_limit`. - GasLimit, /// Weight of calling `seal_weight_to_fee`. WeightToFee, /// Weight of calling `seal_terminate`, passing the number of locked dependencies. @@ -459,12 +456,8 @@ impl Token for RuntimeCosts { use self::RuntimeCosts::*; match *self { HostFn => cost_args!(noop_host_fn, 1), - CopyToContract(len) => T::WeightInfo::seal_copy_to_contract(len), + CopyToContract(len) => T::WeightInfo::seal_input(len), CopyFromContract(len) => T::WeightInfo::seal_return(len), - CallDataSize => T::WeightInfo::seal_call_data_size(), - ReturnDataSize => T::WeightInfo::seal_return_data_size(), - CallDataLoad => T::WeightInfo::seal_call_data_load(), - CallDataCopy(len) => T::WeightInfo::seal_call_data_copy(len), Caller => T::WeightInfo::seal_caller(), Origin => T::WeightInfo::seal_origin(), IsContract => T::WeightInfo::seal_is_contract(), @@ -474,7 +467,6 @@ impl Token for RuntimeCosts { CallerIsOrigin => T::WeightInfo::seal_caller_is_origin(), CallerIsRoot => T::WeightInfo::seal_caller_is_root(), Address => T::WeightInfo::seal_address(), - RefTimeLeft => T::WeightInfo::seal_ref_time_left(), WeightLeft => T::WeightInfo::seal_weight_left(), Balance => T::WeightInfo::seal_balance(), BalanceOf => T::WeightInfo::seal_balance_of(), @@ -482,10 +474,7 @@ impl Token for RuntimeCosts { MinimumBalance => T::WeightInfo::seal_minimum_balance(), BlockNumber => T::WeightInfo::seal_block_number(), BlockHash => T::WeightInfo::seal_block_hash(), - GasPrice => T::WeightInfo::seal_gas_price(), - BaseFee => T::WeightInfo::seal_base_fee(), Now => T::WeightInfo::seal_now(), - GasLimit => T::WeightInfo::seal_gas_limit(), WeightToFee => T::WeightInfo::seal_weight_to_fee(), Terminate(locked_dependencies) => T::WeightInfo::seal_terminate(locked_dependencies), DepositEvent { num_topic, len } => T::WeightInfo::seal_deposit_event(num_topic, len), @@ -547,17 +536,16 @@ macro_rules! charge_gas { /// The kind of call that should be performed. enum CallType { /// Execute another instantiated contract - Call { value_ptr: u32 }, - /// Execute another contract code in the context (storage, account ID, value) of the caller - /// contract - DelegateCall, + Call { callee_ptr: u32, value_ptr: u32, deposit_ptr: u32, weight: Weight }, + /// Execute deployed code in the context (storage, account ID, value) of the caller contract + DelegateCall { code_hash_ptr: u32 }, } impl CallType { fn cost(&self) -> RuntimeCosts { match self { CallType::Call { .. } => RuntimeCosts::CallBase, - CallType::DelegateCall => RuntimeCosts::DelegateCallBase, + CallType::DelegateCall { .. } => RuntimeCosts::DelegateCallBase, } } } @@ -583,6 +571,7 @@ impl<'a, E: Ext, M: PolkaVmInstance> Runtime<'a, E, M> { interrupt: Result, module: &polkavm::Module, instance: &mut M, + api_version: ApiVersion, ) -> Option { use polkavm::InterruptKind::*; @@ -602,7 +591,7 @@ impl<'a, E: Ext, M: PolkaVmInstance> Runtime<'a, E, M> { let Some(syscall_symbol) = module.imports().get(idx) else { return Some(Err(>::InvalidSyscall.into())); }; - match self.handle_ecall(instance, syscall_symbol.as_bytes()) { + match self.handle_ecall(instance, syscall_symbol.as_bytes(), api_version) { Ok(None) => None, Ok(Some(return_value)) => { instance.write_output(return_value); @@ -776,24 +765,29 @@ impl<'a, E: Ext, M: ?Sized + Memory> Runtime<'a, E, M> { Ok(()) } - /// Fallible conversion of a `ExecError` to `ReturnErrorCode`. - /// - /// This is used when converting the error returned from a subcall in order to decide - /// whether to trap the caller or allow handling of the error. - fn exec_error_into_return_code(from: ExecError) -> Result { - use crate::exec::ErrorOrigin::Callee; + /// Fallible conversion of `DispatchError` to `ReturnErrorCode`. + fn err_into_return_code(from: DispatchError) -> Result { use ReturnErrorCode::*; let transfer_failed = Error::::TransferFailed.into(); - let out_of_gas = Error::::OutOfGas.into(); - let out_of_deposit = Error::::StorageDepositLimitExhausted.into(); + let no_code = Error::::CodeNotFound.into(); + let not_found = Error::::ContractNotFound.into(); + + match from { + x if x == transfer_failed => Ok(TransferFailed), + x if x == no_code => Ok(CodeNotFound), + x if x == not_found => Ok(NotCallable), + err => Err(err), + } + } + + /// Fallible conversion of a `ExecError` to `ReturnErrorCode`. + fn exec_error_into_return_code(from: ExecError) -> Result { + use crate::exec::ErrorOrigin::Callee; - // errors in the callee do not trap the caller match (from.error, from.origin) { - (err, _) if err == transfer_failed => Ok(TransferFailed), - (err, Callee) if err == out_of_gas || err == out_of_deposit => Ok(OutOfResources), - (_, Callee) => Ok(CalleeTrapped), - (err, _) => Err(err), + (_, Callee) => Ok(ReturnErrorCode::CalleeTrapped), + (err, _) => Self::err_into_return_code(err), } } @@ -993,9 +987,6 @@ impl<'a, E: Ext, M: ?Sized + Memory> Runtime<'a, E, M> { memory: &mut M, flags: CallFlags, call_type: CallType, - callee_ptr: u32, - deposit_ptr: u32, - weight: Weight, input_data_ptr: u32, input_data_len: u32, output_ptr: u32, @@ -1003,10 +994,6 @@ impl<'a, E: Ext, M: ?Sized + Memory> Runtime<'a, E, M> { ) -> Result { self.charge_gas(call_type.cost())?; - let callee = memory.read_h160(callee_ptr)?; - let deposit_limit = - if deposit_ptr == SENTINEL { U256::zero() } else { memory.read_u256(deposit_ptr)? }; - let input_data = if flags.contains(CallFlags::CLONE_INPUT) { let input = self.input_data.as_ref().ok_or(Error::::InputForwarded)?; charge_gas!(self, RuntimeCosts::CallInputCloned(input.len() as u32))?; @@ -1019,7 +1006,13 @@ impl<'a, E: Ext, M: ?Sized + Memory> Runtime<'a, E, M> { }; let call_outcome = match call_type { - CallType::Call { value_ptr } => { + CallType::Call { callee_ptr, value_ptr, deposit_ptr, weight } => { + let callee = memory.read_h160(callee_ptr)?; + let deposit_limit = if deposit_ptr == SENTINEL { + U256::zero() + } else { + memory.read_u256(deposit_ptr)? + }; let read_only = flags.contains(CallFlags::READ_ONLY); let value = memory.read_u256(value_ptr)?; if value > 0u32.into() { @@ -1040,11 +1033,13 @@ impl<'a, E: Ext, M: ?Sized + Memory> Runtime<'a, E, M> { read_only, ) }, - CallType::DelegateCall => { + CallType::DelegateCall { code_hash_ptr } => { if flags.intersects(CallFlags::ALLOW_REENTRY | CallFlags::READ_ONLY) { return Err(Error::::InvalidCallFlags.into()); } - self.ext.delegate_call(weight, deposit_limit, callee, input_data) + + let code_hash = memory.read_h256(code_hash_ptr)?; + self.ext.delegate_call(code_hash, input_data) }, }; @@ -1158,18 +1153,14 @@ impl<'a, E: Ext, M: ?Sized + Memory> Runtime<'a, E, M> { #[define_env] pub mod env { /// Noop function used to benchmark the time it takes to execute an empty function. - /// - /// Marked as stable because it needs to be called from benchmarks even when the benchmarked - /// parachain has unstable functions disabled. #[cfg(feature = "runtime-benchmarks")] - #[stable] fn noop(&mut self, memory: &mut M) -> Result<(), TrapReason> { Ok(()) } /// Set the value at the given key in the contract storage. /// See [`pallet_revive_uapi::HostFn::set_storage_v2`] - #[stable] + #[api_version(0)] #[mutating] fn set_storage( &mut self, @@ -1183,9 +1174,23 @@ pub mod env { self.set_storage(memory, flags, key_ptr, key_len, value_ptr, value_len) } + /// Clear the value at the given key in the contract storage. + /// See [`pallet_revive_uapi::HostFn::clear_storage`] + #[api_version(0)] + #[mutating] + fn clear_storage( + &mut self, + memory: &mut M, + flags: u32, + key_ptr: u32, + key_len: u32, + ) -> Result { + self.clear_storage(memory, flags, key_ptr, key_len) + } + /// Retrieve the value under the given key from storage. /// See [`pallet_revive_uapi::HostFn::get_storage`] - #[stable] + #[api_version(0)] fn get_storage( &mut self, memory: &mut M, @@ -1198,9 +1203,38 @@ pub mod env { self.get_storage(memory, flags, key_ptr, key_len, out_ptr, out_len_ptr) } + /// Checks whether there is a value stored under the given key. + /// See [`pallet_revive_uapi::HostFn::contains_storage`] + #[api_version(0)] + fn contains_storage( + &mut self, + memory: &mut M, + flags: u32, + key_ptr: u32, + key_len: u32, + ) -> Result { + self.contains_storage(memory, flags, key_ptr, key_len) + } + + /// Retrieve and remove the value under the given key from storage. + /// See [`pallet_revive_uapi::HostFn::take_storage`] + #[api_version(0)] + #[mutating] + fn take_storage( + &mut self, + memory: &mut M, + flags: u32, + key_ptr: u32, + key_len: u32, + out_ptr: u32, + out_len_ptr: u32, + ) -> Result { + self.take_storage(memory, flags, key_ptr, key_len, out_ptr, out_len_ptr) + } + /// Make a call to another contract. /// See [`pallet_revive_uapi::HostFn::call`]. - #[stable] + #[api_version(0)] fn call( &mut self, memory: &mut M, @@ -1218,10 +1252,12 @@ pub mod env { self.call( memory, CallFlags::from_bits(flags).ok_or(Error::::InvalidCallFlags)?, - CallType::Call { value_ptr }, - callee_ptr, - deposit_ptr, - Weight::from_parts(ref_time_limit, proof_size_limit), + CallType::Call { + callee_ptr, + value_ptr, + deposit_ptr, + weight: Weight::from_parts(ref_time_limit, proof_size_limit), + }, input_data_ptr, input_data_len, output_ptr, @@ -1231,15 +1267,12 @@ pub mod env { /// Execute code in the context (storage, caller, value) of the current contract. /// See [`pallet_revive_uapi::HostFn::delegate_call`]. - #[stable] + #[api_version(0)] fn delegate_call( &mut self, memory: &mut M, flags: u32, - address_ptr: u32, - ref_time_limit: u64, - proof_size_limit: u64, - deposit_ptr: u32, + code_hash_ptr: u32, input_data_ptr: u32, input_data_len: u32, output_ptr: u32, @@ -1248,10 +1281,7 @@ pub mod env { self.call( memory, CallFlags::from_bits(flags).ok_or(Error::::InvalidCallFlags)?, - CallType::DelegateCall, - address_ptr, - deposit_ptr, - Weight::from_parts(ref_time_limit, proof_size_limit), + CallType::DelegateCall { code_hash_ptr }, input_data_ptr, input_data_len, output_ptr, @@ -1261,7 +1291,7 @@ pub mod env { /// Instantiate a contract with the specified code hash. /// See [`pallet_revive_uapi::HostFn::instantiate`]. - #[stable] + #[api_version(0)] #[mutating] fn instantiate( &mut self, @@ -1293,83 +1323,32 @@ pub mod env { ) } - /// Returns the total size of the contract call input data. - /// See [`pallet_revive_uapi::HostFn::call_data_size `]. - #[stable] - fn call_data_size(&mut self, memory: &mut M) -> Result { - self.charge_gas(RuntimeCosts::CallDataSize)?; - Ok(self - .input_data - .as_ref() - .map(|input| input.len().try_into().expect("usize fits into u64; qed")) - .unwrap_or_default()) + /// Remove the calling account and transfer remaining **free** balance. + /// See [`pallet_revive_uapi::HostFn::terminate`]. + #[api_version(0)] + #[mutating] + fn terminate(&mut self, memory: &mut M, beneficiary_ptr: u32) -> Result<(), TrapReason> { + self.terminate(memory, beneficiary_ptr) } /// Stores the input passed by the caller into the supplied buffer. - /// See [`pallet_revive_uapi::HostFn::call_data_copy`]. - #[stable] - fn call_data_copy( - &mut self, - memory: &mut M, - out_ptr: u32, - out_len: u32, - offset: u32, - ) -> Result<(), TrapReason> { - self.charge_gas(RuntimeCosts::CallDataCopy(out_len))?; - - let Some(input) = self.input_data.as_ref() else { - return Err(Error::::InputForwarded.into()); - }; - - let start = offset as usize; - if start >= input.len() { - memory.zero(out_ptr, out_len)?; - return Ok(()); - } - - let end = start.saturating_add(out_len as usize).min(input.len()); - memory.write(out_ptr, &input[start..end])?; - - let bytes_written = (end - start) as u32; - memory.zero(out_ptr.saturating_add(bytes_written), out_len - bytes_written)?; - - Ok(()) - } - - /// Stores the U256 value at given call input `offset` into the supplied buffer. - /// See [`pallet_revive_uapi::HostFn::call_data_load`]. - #[stable] - fn call_data_load( - &mut self, - memory: &mut M, - out_ptr: u32, - offset: u32, - ) -> Result<(), TrapReason> { - self.charge_gas(RuntimeCosts::CallDataLoad)?; - - let Some(input) = self.input_data.as_ref() else { - return Err(Error::::InputForwarded.into()); - }; - - let mut data = [0; 32]; - let start = offset as usize; - let data = if start >= input.len() { - data // Any index is valid to request; OOB offsets return zero. + /// See [`pallet_revive_uapi::HostFn::input`]. + #[api_version(0)] + fn input(&mut self, memory: &mut M, out_ptr: u32, out_len_ptr: u32) -> Result<(), TrapReason> { + if let Some(input) = self.input_data.take() { + self.write_sandbox_output(memory, out_ptr, out_len_ptr, &input, false, |len| { + Some(RuntimeCosts::CopyToContract(len)) + })?; + self.input_data = Some(input); + Ok(()) } else { - let end = start.saturating_add(32).min(input.len()); - data[..end - start].copy_from_slice(&input[start..end]); - data.reverse(); - data // Solidity expects right-padded data - }; - - self.write_fixed_sandbox_output(memory, out_ptr, &data, false, already_charged)?; - - Ok(()) + Err(Error::::InputForwarded.into()) + } } /// Cease contract execution and save a data buffer as a result of the execution. /// See [`pallet_revive_uapi::HostFn::return_value`]. - #[stable] + #[api_version(0)] fn seal_return( &mut self, memory: &mut M, @@ -1383,7 +1362,7 @@ pub mod env { /// Stores the address of the caller into the supplied buffer. /// See [`pallet_revive_uapi::HostFn::caller`]. - #[stable] + #[api_version(0)] fn caller(&mut self, memory: &mut M, out_ptr: u32) -> Result<(), TrapReason> { self.charge_gas(RuntimeCosts::Caller)?; let caller = ::AddressMapper::to_address(self.ext.caller().account_id()?); @@ -1398,7 +1377,7 @@ pub mod env { /// Stores the address of the call stack origin into the supplied buffer. /// See [`pallet_revive_uapi::HostFn::origin`]. - #[stable] + #[api_version(0)] fn origin(&mut self, memory: &mut M, out_ptr: u32) -> Result<(), TrapReason> { self.charge_gas(RuntimeCosts::Origin)?; let origin = ::AddressMapper::to_address(self.ext.origin().account_id()?); @@ -1411,9 +1390,18 @@ pub mod env { )?) } + /// Checks whether a specified address belongs to a contract. + /// See [`pallet_revive_uapi::HostFn::is_contract`]. + #[api_version(0)] + fn is_contract(&mut self, memory: &mut M, account_ptr: u32) -> Result { + self.charge_gas(RuntimeCosts::IsContract)?; + let address = memory.read_h160(account_ptr)?; + Ok(self.ext.is_contract(&address) as u32) + } + /// Retrieve the code hash for a specified contract address. /// See [`pallet_revive_uapi::HostFn::code_hash`]. - #[stable] + #[api_version(0)] fn code_hash(&mut self, memory: &mut M, addr_ptr: u32, out_ptr: u32) -> Result<(), TrapReason> { self.charge_gas(RuntimeCosts::CodeHash)?; let address = memory.read_h160(addr_ptr)?; @@ -1428,16 +1416,53 @@ pub mod env { /// Retrieve the code size for a given contract address. /// See [`pallet_revive_uapi::HostFn::code_size`]. - #[stable] - fn code_size(&mut self, memory: &mut M, addr_ptr: u32) -> Result { + #[api_version(0)] + fn code_size(&mut self, memory: &mut M, addr_ptr: u32, out_ptr: u32) -> Result<(), TrapReason> { self.charge_gas(RuntimeCosts::CodeSize)?; let address = memory.read_h160(addr_ptr)?; - Ok(self.ext.code_size(&address)) + Ok(self.write_fixed_sandbox_output( + memory, + out_ptr, + &self.ext.code_size(&address).to_little_endian(), + false, + already_charged, + )?) + } + + /// Retrieve the code hash of the currently executing contract. + /// See [`pallet_revive_uapi::HostFn::own_code_hash`]. + #[api_version(0)] + fn own_code_hash(&mut self, memory: &mut M, out_ptr: u32) -> Result<(), TrapReason> { + self.charge_gas(RuntimeCosts::OwnCodeHash)?; + let code_hash = *self.ext.own_code_hash(); + Ok(self.write_fixed_sandbox_output( + memory, + out_ptr, + code_hash.as_bytes(), + false, + already_charged, + )?) + } + + /// Checks whether the caller of the current contract is the origin of the whole call stack. + /// See [`pallet_revive_uapi::HostFn::caller_is_origin`]. + #[api_version(0)] + fn caller_is_origin(&mut self, _memory: &mut M) -> Result { + self.charge_gas(RuntimeCosts::CallerIsOrigin)?; + Ok(self.ext.caller_is_origin() as u32) + } + + /// Checks whether the caller of the current contract is root. + /// See [`pallet_revive_uapi::HostFn::caller_is_root`]. + #[api_version(0)] + fn caller_is_root(&mut self, _memory: &mut M) -> Result { + self.charge_gas(RuntimeCosts::CallerIsRoot)?; + Ok(self.ext.caller_is_root() as u32) } /// Stores the address of the current contract into the supplied buffer. /// See [`pallet_revive_uapi::HostFn::address`]. - #[stable] + #[api_version(0)] fn address(&mut self, memory: &mut M, out_ptr: u32) -> Result<(), TrapReason> { self.charge_gas(RuntimeCosts::Address)?; let address = self.ext.address(); @@ -1452,7 +1477,7 @@ pub mod env { /// Stores the price for the specified amount of weight into the supplied buffer. /// See [`pallet_revive_uapi::HostFn::weight_to_fee`]. - #[stable] + #[api_version(0)] fn weight_to_fee( &mut self, memory: &mut M, @@ -1471,9 +1496,30 @@ pub mod env { )?) } + /// Stores the amount of weight left into the supplied buffer. + /// See [`pallet_revive_uapi::HostFn::weight_left`]. + #[api_version(0)] + fn weight_left( + &mut self, + memory: &mut M, + out_ptr: u32, + out_len_ptr: u32, + ) -> Result<(), TrapReason> { + self.charge_gas(RuntimeCosts::WeightLeft)?; + let gas_left = &self.ext.gas_meter().gas_left().encode(); + Ok(self.write_sandbox_output( + memory, + out_ptr, + out_len_ptr, + gas_left, + false, + already_charged, + )?) + } + /// Stores the immutable data into the supplied buffer. /// See [`pallet_revive_uapi::HostFn::get_immutable_data`]. - #[stable] + #[api_version(0)] fn get_immutable_data( &mut self, memory: &mut M, @@ -1489,7 +1535,7 @@ pub mod env { /// Attaches the supplied immutable data to the currently executing contract. /// See [`pallet_revive_uapi::HostFn::set_immutable_data`]. - #[stable] + #[api_version(0)] fn set_immutable_data(&mut self, memory: &mut M, ptr: u32, len: u32) -> Result<(), TrapReason> { if len > limits::IMMUTABLE_BYTES { return Err(Error::::OutOfBounds.into()); @@ -1503,7 +1549,7 @@ pub mod env { /// Stores the *free* balance of the current account into the supplied buffer. /// See [`pallet_revive_uapi::HostFn::balance`]. - #[stable] + #[api_version(0)] fn balance(&mut self, memory: &mut M, out_ptr: u32) -> Result<(), TrapReason> { self.charge_gas(RuntimeCosts::Balance)?; Ok(self.write_fixed_sandbox_output( @@ -1517,7 +1563,7 @@ pub mod env { /// Stores the *free* balance of the supplied address into the supplied buffer. /// See [`pallet_revive_uapi::HostFn::balance`]. - #[stable] + #[api_version(0)] fn balance_of( &mut self, memory: &mut M, @@ -1537,7 +1583,7 @@ pub mod env { /// Returns the chain ID. /// See [`pallet_revive_uapi::HostFn::chain_id`]. - #[stable] + #[api_version(0)] fn chain_id(&mut self, memory: &mut M, out_ptr: u32) -> Result<(), TrapReason> { Ok(self.write_fixed_sandbox_output( memory, @@ -1548,17 +1594,9 @@ pub mod env { )?) } - /// Returns the block ref_time limit. - /// See [`pallet_revive_uapi::HostFn::gas_limit`]. - #[stable] - fn gas_limit(&mut self, memory: &mut M) -> Result { - self.charge_gas(RuntimeCosts::GasLimit)?; - Ok(::BlockWeights::get().max_block.ref_time()) - } - /// Stores the value transferred along with this call/instantiate into the supplied buffer. /// See [`pallet_revive_uapi::HostFn::value_transferred`]. - #[stable] + #[api_version(0)] fn value_transferred(&mut self, memory: &mut M, out_ptr: u32) -> Result<(), TrapReason> { self.charge_gas(RuntimeCosts::ValueTransferred)?; Ok(self.write_fixed_sandbox_output( @@ -1570,37 +1608,29 @@ pub mod env { )?) } - /// Returns the simulated ethereum `GASPRICE` value. - /// See [`pallet_revive_uapi::HostFn::gas_price`]. - #[stable] - fn gas_price(&mut self, memory: &mut M) -> Result { - self.charge_gas(RuntimeCosts::GasPrice)?; - Ok(GAS_PRICE.into()) - } - - /// Returns the simulated ethereum `BASEFEE` value. - /// See [`pallet_revive_uapi::HostFn::base_fee`]. - #[stable] - fn base_fee(&mut self, memory: &mut M, out_ptr: u32) -> Result<(), TrapReason> { - self.charge_gas(RuntimeCosts::BaseFee)?; + /// Load the latest block timestamp into the supplied buffer + /// See [`pallet_revive_uapi::HostFn::now`]. + #[api_version(0)] + fn now(&mut self, memory: &mut M, out_ptr: u32) -> Result<(), TrapReason> { + self.charge_gas(RuntimeCosts::Now)?; Ok(self.write_fixed_sandbox_output( memory, out_ptr, - &U256::zero().to_little_endian(), + &self.ext.now().to_little_endian(), false, already_charged, )?) } - /// Load the latest block timestamp into the supplied buffer - /// See [`pallet_revive_uapi::HostFn::now`]. - #[stable] - fn now(&mut self, memory: &mut M, out_ptr: u32) -> Result<(), TrapReason> { - self.charge_gas(RuntimeCosts::Now)?; + /// Stores the minimum balance (a.k.a. existential deposit) into the supplied buffer. + /// See [`pallet_revive_uapi::HostFn::minimum_balance`]. + #[api_version(0)] + fn minimum_balance(&mut self, memory: &mut M, out_ptr: u32) -> Result<(), TrapReason> { + self.charge_gas(RuntimeCosts::MinimumBalance)?; Ok(self.write_fixed_sandbox_output( memory, out_ptr, - &self.ext.now().to_little_endian(), + &self.ext.minimum_balance().to_little_endian(), false, already_charged, )?) @@ -1608,7 +1638,7 @@ pub mod env { /// Deposit a contract event with the data buffer and optional list of topics. /// See [pallet_revive_uapi::HostFn::deposit_event] - #[stable] + #[api_version(0)] #[mutating] fn deposit_event( &mut self, @@ -1648,7 +1678,7 @@ pub mod env { /// Stores the current block number of the current contract into the supplied buffer. /// See [`pallet_revive_uapi::HostFn::block_number`]. - #[stable] + #[api_version(0)] fn block_number(&mut self, memory: &mut M, out_ptr: u32) -> Result<(), TrapReason> { self.charge_gas(RuntimeCosts::BlockNumber)?; Ok(self.write_fixed_sandbox_output( @@ -1662,7 +1692,7 @@ pub mod env { /// Stores the block hash at given block height into the supplied buffer. /// See [`pallet_revive_uapi::HostFn::block_hash`]. - #[stable] + #[api_version(0)] fn block_hash( &mut self, memory: &mut M, @@ -1681,9 +1711,25 @@ pub mod env { )?) } + /// Computes the SHA2 256-bit hash on the given input buffer. + /// See [`pallet_revive_uapi::HostFn::hash_sha2_256`]. + #[api_version(0)] + fn hash_sha2_256( + &mut self, + memory: &mut M, + input_ptr: u32, + input_len: u32, + output_ptr: u32, + ) -> Result<(), TrapReason> { + self.charge_gas(RuntimeCosts::HashSha256(input_len))?; + Ok(self.compute_hash_on_intermediate_buffer( + memory, sha2_256, input_ptr, input_len, output_ptr, + )?) + } + /// Computes the KECCAK 256-bit hash on the given input buffer. /// See [`pallet_revive_uapi::HostFn::hash_keccak_256`]. - #[stable] + #[api_version(0)] fn hash_keccak_256( &mut self, memory: &mut M, @@ -1697,53 +1743,36 @@ pub mod env { )?) } - /// Stores the length of the data returned by the last call into the supplied buffer. - /// See [`pallet_revive_uapi::HostFn::return_data_size`]. - #[stable] - fn return_data_size(&mut self, memory: &mut M) -> Result { - self.charge_gas(RuntimeCosts::ReturnDataSize)?; - Ok(self - .ext - .last_frame_output() - .data - .len() - .try_into() - .expect("usize fits into u64; qed")) - } - - /// Stores data returned by the last call, starting from `offset`, into the supplied buffer. - /// See [`pallet_revive_uapi::HostFn::return_data`]. - #[stable] - fn return_data_copy( + /// Computes the BLAKE2 256-bit hash on the given input buffer. + /// See [`pallet_revive_uapi::HostFn::hash_blake2_256`]. + #[api_version(0)] + fn hash_blake2_256( &mut self, memory: &mut M, - out_ptr: u32, - out_len_ptr: u32, - offset: u32, + input_ptr: u32, + input_len: u32, + output_ptr: u32, ) -> Result<(), TrapReason> { - let output = mem::take(self.ext.last_frame_output_mut()); - let result = if offset as usize > output.data.len() { - Err(Error::::OutOfBounds.into()) - } else { - self.write_sandbox_output( - memory, - out_ptr, - out_len_ptr, - &output.data[offset as usize..], - false, - |len| Some(RuntimeCosts::CopyToContract(len)), - ) - }; - *self.ext.last_frame_output_mut() = output; - Ok(result?) - } - - /// Returns the amount of ref_time left. - /// See [`pallet_revive_uapi::HostFn::ref_time_left`]. - #[stable] - fn ref_time_left(&mut self, memory: &mut M) -> Result { - self.charge_gas(RuntimeCosts::RefTimeLeft)?; - Ok(self.ext.gas_meter().gas_left().ref_time()) + self.charge_gas(RuntimeCosts::HashBlake256(input_len))?; + Ok(self.compute_hash_on_intermediate_buffer( + memory, blake2_256, input_ptr, input_len, output_ptr, + )?) + } + + /// Computes the BLAKE2 128-bit hash on the given input buffer. + /// See [`pallet_revive_uapi::HostFn::hash_blake2_128`]. + #[api_version(0)] + fn hash_blake2_128( + &mut self, + memory: &mut M, + input_ptr: u32, + input_len: u32, + output_ptr: u32, + ) -> Result<(), TrapReason> { + self.charge_gas(RuntimeCosts::HashBlake128(input_len))?; + Ok(self.compute_hash_on_intermediate_buffer( + memory, blake2_128, input_ptr, input_len, output_ptr, + )?) } /// Call into the chain extension provided by the chain if any. @@ -1776,6 +1805,28 @@ pub mod env { ret } + /// Emit a custom debug message. + /// See [`pallet_revive_uapi::HostFn::debug_message`]. + #[api_version(0)] + fn debug_message( + &mut self, + memory: &mut M, + str_ptr: u32, + str_len: u32, + ) -> Result { + let str_len = str_len.min(limits::DEBUG_BUFFER_BYTES); + self.charge_gas(RuntimeCosts::DebugMessage(str_len))?; + if self.ext.append_debug_buffer("") { + let data = memory.read(str_ptr, str_len)?; + if let Some(msg) = core::str::from_utf8(&data).ok() { + self.ext.append_debug_buffer(msg); + } + Ok(ReturnErrorCode::Success) + } else { + Ok(ReturnErrorCode::LoggingDisabled) + } + } + /// Call some dispatchable of the runtime. /// See [`frame_support::traits::call_runtime`]. #[mutating] @@ -1795,68 +1846,86 @@ pub mod env { ) } - /// Checks whether the caller of the current contract is the origin of the whole call stack. - /// See [`pallet_revive_uapi::HostFn::caller_is_origin`]. - fn caller_is_origin(&mut self, _memory: &mut M) -> Result { - self.charge_gas(RuntimeCosts::CallerIsOrigin)?; - Ok(self.ext.caller_is_origin() as u32) - } - - /// Checks whether the caller of the current contract is root. - /// See [`pallet_revive_uapi::HostFn::caller_is_root`]. - fn caller_is_root(&mut self, _memory: &mut M) -> Result { - self.charge_gas(RuntimeCosts::CallerIsRoot)?; - Ok(self.ext.caller_is_root() as u32) - } - - /// Clear the value at the given key in the contract storage. - /// See [`pallet_revive_uapi::HostFn::clear_storage`] + /// Execute an XCM program locally, using the contract's address as the origin. + /// See [`pallet_revive_uapi::HostFn::execute_xcm`]. #[mutating] - fn clear_storage( + fn xcm_execute( &mut self, memory: &mut M, - flags: u32, - key_ptr: u32, - key_len: u32, - ) -> Result { - self.clear_storage(memory, flags, key_ptr, key_len) - } + msg_ptr: u32, + msg_len: u32, + ) -> Result { + use frame_support::dispatch::DispatchInfo; + use xcm::VersionedXcm; + use xcm_builder::{ExecuteController, ExecuteControllerWeightInfo}; - /// Checks whether there is a value stored under the given key. - /// See [`pallet_revive_uapi::HostFn::contains_storage`] - fn contains_storage( - &mut self, - memory: &mut M, - flags: u32, - key_ptr: u32, - key_len: u32, - ) -> Result { - self.contains_storage(memory, flags, key_ptr, key_len) + self.charge_gas(RuntimeCosts::CopyFromContract(msg_len))?; + let message: VersionedXcm> = memory.read_as_unbounded(msg_ptr, msg_len)?; + + let execute_weight = + <::Xcm as ExecuteController<_, _>>::WeightInfo::execute(); + let weight = self.ext.gas_meter().gas_left().max(execute_weight); + let dispatch_info = DispatchInfo { call_weight: weight, ..Default::default() }; + + self.call_dispatchable::( + dispatch_info, + RuntimeCosts::CallXcmExecute, + |runtime| { + let origin = crate::RawOrigin::Signed(runtime.ext.account_id().clone()).into(); + let weight_used = <::Xcm>::execute( + origin, + Box::new(message), + weight.saturating_sub(execute_weight), + )?; + + Ok(Some(weight_used.saturating_add(execute_weight)).into()) + }, + ) } - /// Emit a custom debug message. - /// See [`pallet_revive_uapi::HostFn::debug_message`]. - fn debug_message( + /// Send an XCM program from the contract to the specified destination. + /// See [`pallet_revive_uapi::HostFn::send_xcm`]. + #[mutating] + fn xcm_send( &mut self, memory: &mut M, - str_ptr: u32, - str_len: u32, + dest_ptr: u32, + dest_len: u32, + msg_ptr: u32, + msg_len: u32, + output_ptr: u32, ) -> Result { - let str_len = str_len.min(limits::DEBUG_BUFFER_BYTES); - self.charge_gas(RuntimeCosts::DebugMessage(str_len))?; - if self.ext.append_debug_buffer("") { - let data = memory.read(str_ptr, str_len)?; - if let Some(msg) = core::str::from_utf8(&data).ok() { - self.ext.append_debug_buffer(msg); - } - Ok(ReturnErrorCode::Success) - } else { - Ok(ReturnErrorCode::LoggingDisabled) + use xcm::{VersionedLocation, VersionedXcm}; + use xcm_builder::{SendController, SendControllerWeightInfo}; + + self.charge_gas(RuntimeCosts::CopyFromContract(dest_len))?; + let dest: VersionedLocation = memory.read_as_unbounded(dest_ptr, dest_len)?; + + self.charge_gas(RuntimeCosts::CopyFromContract(msg_len))?; + let message: VersionedXcm<()> = memory.read_as_unbounded(msg_ptr, msg_len)?; + + let weight = <::Xcm as SendController<_>>::WeightInfo::send(); + self.charge_gas(RuntimeCosts::CallRuntime(weight))?; + let origin = crate::RawOrigin::Signed(self.ext.account_id().clone()).into(); + + match <::Xcm>::send(origin, dest.into(), message.into()) { + Ok(message_id) => { + memory.write(output_ptr, &message_id.encode())?; + Ok(ReturnErrorCode::Success) + }, + Err(e) => { + if self.ext.append_debug_buffer("") { + self.ext.append_debug_buffer("seal0::xcm_send failed with: "); + self.ext.append_debug_buffer(e.into()); + }; + Ok(ReturnErrorCode::XcmSendFailed) + }, } } /// Recovers the ECDSA public key from the given message hash and signature. /// See [`pallet_revive_uapi::HostFn::ecdsa_recover`]. + #[api_version(0)] fn ecdsa_recover( &mut self, memory: &mut M, @@ -1885,8 +1954,59 @@ pub mod env { } } + /// Verify a sr25519 signature + /// See [`pallet_revive_uapi::HostFn::sr25519_verify`]. + #[api_version(0)] + fn sr25519_verify( + &mut self, + memory: &mut M, + signature_ptr: u32, + pub_key_ptr: u32, + message_len: u32, + message_ptr: u32, + ) -> Result { + self.charge_gas(RuntimeCosts::Sr25519Verify(message_len))?; + + let mut signature: [u8; 64] = [0; 64]; + memory.read_into_buf(signature_ptr, &mut signature)?; + + let mut pub_key: [u8; 32] = [0; 32]; + memory.read_into_buf(pub_key_ptr, &mut pub_key)?; + + let message: Vec = memory.read(message_ptr, message_len)?; + + if self.ext.sr25519_verify(&signature, &message, &pub_key) { + Ok(ReturnErrorCode::Success) + } else { + Ok(ReturnErrorCode::Sr25519VerifyFailed) + } + } + + /// Replace the contract code at the specified address with new code. + /// See [`pallet_revive_uapi::HostFn::set_code_hash`]. + /// + /// Disabled until the internal implementation takes care of collecting + /// the immutable data of the new code hash. + #[mutating] + fn set_code_hash( + &mut self, + memory: &mut M, + code_hash_ptr: u32, + ) -> Result { + self.charge_gas(RuntimeCosts::SetCodeHash)?; + let code_hash: H256 = memory.read_h256(code_hash_ptr)?; + match self.ext.set_code_hash(code_hash) { + Err(err) => { + let code = Self::err_into_return_code(err)?; + Ok(code) + }, + Ok(()) => Ok(ReturnErrorCode::Success), + } + } + /// Calculates Ethereum address from the ECDSA compressed public key and stores /// See [`pallet_revive_uapi::HostFn::ecdsa_to_eth_address`]. + #[api_version(0)] fn ecdsa_to_eth_address( &mut self, memory: &mut M, @@ -1906,61 +2026,9 @@ pub mod env { } } - /// Computes the BLAKE2 128-bit hash on the given input buffer. - /// See [`pallet_revive_uapi::HostFn::hash_blake2_128`]. - fn hash_blake2_128( - &mut self, - memory: &mut M, - input_ptr: u32, - input_len: u32, - output_ptr: u32, - ) -> Result<(), TrapReason> { - self.charge_gas(RuntimeCosts::HashBlake128(input_len))?; - Ok(self.compute_hash_on_intermediate_buffer( - memory, blake2_128, input_ptr, input_len, output_ptr, - )?) - } - - /// Computes the BLAKE2 256-bit hash on the given input buffer. - /// See [`pallet_revive_uapi::HostFn::hash_blake2_256`]. - fn hash_blake2_256( - &mut self, - memory: &mut M, - input_ptr: u32, - input_len: u32, - output_ptr: u32, - ) -> Result<(), TrapReason> { - self.charge_gas(RuntimeCosts::HashBlake256(input_len))?; - Ok(self.compute_hash_on_intermediate_buffer( - memory, blake2_256, input_ptr, input_len, output_ptr, - )?) - } - - /// Computes the SHA2 256-bit hash on the given input buffer. - /// See [`pallet_revive_uapi::HostFn::hash_sha2_256`]. - fn hash_sha2_256( - &mut self, - memory: &mut M, - input_ptr: u32, - input_len: u32, - output_ptr: u32, - ) -> Result<(), TrapReason> { - self.charge_gas(RuntimeCosts::HashSha256(input_len))?; - Ok(self.compute_hash_on_intermediate_buffer( - memory, sha2_256, input_ptr, input_len, output_ptr, - )?) - } - - /// Checks whether a specified address belongs to a contract. - /// See [`pallet_revive_uapi::HostFn::is_contract`]. - fn is_contract(&mut self, memory: &mut M, account_ptr: u32) -> Result { - self.charge_gas(RuntimeCosts::IsContract)?; - let address = memory.read_h160(account_ptr)?; - Ok(self.ext.is_contract(&address) as u32) - } - /// Adds a new delegate dependency to the contract. /// See [`pallet_revive_uapi::HostFn::lock_delegate_dependency`]. + #[api_version(0)] #[mutating] fn lock_delegate_dependency( &mut self, @@ -1973,75 +2041,9 @@ pub mod env { Ok(()) } - /// Stores the minimum balance (a.k.a. existential deposit) into the supplied buffer. - /// See [`pallet_revive_uapi::HostFn::minimum_balance`]. - fn minimum_balance(&mut self, memory: &mut M, out_ptr: u32) -> Result<(), TrapReason> { - self.charge_gas(RuntimeCosts::MinimumBalance)?; - Ok(self.write_fixed_sandbox_output( - memory, - out_ptr, - &self.ext.minimum_balance().to_little_endian(), - false, - already_charged, - )?) - } - - /// Retrieve the code hash of the currently executing contract. - /// See [`pallet_revive_uapi::HostFn::own_code_hash`]. - fn own_code_hash(&mut self, memory: &mut M, out_ptr: u32) -> Result<(), TrapReason> { - self.charge_gas(RuntimeCosts::OwnCodeHash)?; - let code_hash = *self.ext.own_code_hash(); - Ok(self.write_fixed_sandbox_output( - memory, - out_ptr, - code_hash.as_bytes(), - false, - already_charged, - )?) - } - - /// Replace the contract code at the specified address with new code. - /// See [`pallet_revive_uapi::HostFn::set_code_hash`]. - /// - /// Disabled until the internal implementation takes care of collecting - /// the immutable data of the new code hash. - #[mutating] - fn set_code_hash(&mut self, memory: &mut M, code_hash_ptr: u32) -> Result<(), TrapReason> { - self.charge_gas(RuntimeCosts::SetCodeHash)?; - let code_hash: H256 = memory.read_h256(code_hash_ptr)?; - self.ext.set_code_hash(code_hash)?; - Ok(()) - } - - /// Verify a sr25519 signature - /// See [`pallet_revive_uapi::HostFn::sr25519_verify`]. - fn sr25519_verify( - &mut self, - memory: &mut M, - signature_ptr: u32, - pub_key_ptr: u32, - message_len: u32, - message_ptr: u32, - ) -> Result { - self.charge_gas(RuntimeCosts::Sr25519Verify(message_len))?; - - let mut signature: [u8; 64] = [0; 64]; - memory.read_into_buf(signature_ptr, &mut signature)?; - - let mut pub_key: [u8; 32] = [0; 32]; - memory.read_into_buf(pub_key_ptr, &mut pub_key)?; - - let message: Vec = memory.read(message_ptr, message_len)?; - - if self.ext.sr25519_verify(&signature, &message, &pub_key) { - Ok(ReturnErrorCode::Success) - } else { - Ok(ReturnErrorCode::Sr25519VerifyFailed) - } - } - /// Removes the delegate dependency from the contract. /// see [`pallet_revive_uapi::HostFn::unlock_delegate_dependency`]. + #[api_version(0)] #[mutating] fn unlock_delegate_dependency( &mut self, @@ -2054,122 +2056,43 @@ pub mod env { Ok(()) } - /// Retrieve and remove the value under the given key from storage. - /// See [`pallet_revive_uapi::HostFn::take_storage`] - #[mutating] - fn take_storage( - &mut self, - memory: &mut M, - flags: u32, - key_ptr: u32, - key_len: u32, - out_ptr: u32, - out_len_ptr: u32, - ) -> Result { - self.take_storage(memory, flags, key_ptr, key_len, out_ptr, out_len_ptr) - } - - /// Remove the calling account and transfer remaining **free** balance. - /// See [`pallet_revive_uapi::HostFn::terminate`]. - #[mutating] - fn terminate(&mut self, memory: &mut M, beneficiary_ptr: u32) -> Result<(), TrapReason> { - self.terminate(memory, beneficiary_ptr) - } - - /// Stores the amount of weight left into the supplied buffer. - /// See [`pallet_revive_uapi::HostFn::weight_left`]. - fn weight_left( - &mut self, - memory: &mut M, - out_ptr: u32, - out_len_ptr: u32, - ) -> Result<(), TrapReason> { - self.charge_gas(RuntimeCosts::WeightLeft)?; - let gas_left = &self.ext.gas_meter().gas_left().encode(); - Ok(self.write_sandbox_output( + /// Stores the length of the data returned by the last call into the supplied buffer. + /// See [`pallet_revive_uapi::HostFn::return_data_size`]. + #[api_version(0)] + fn return_data_size(&mut self, memory: &mut M, out_ptr: u32) -> Result<(), TrapReason> { + Ok(self.write_fixed_sandbox_output( memory, out_ptr, - out_len_ptr, - gas_left, + &U256::from(self.ext.last_frame_output().data.len()).to_little_endian(), false, - already_charged, + |len| Some(RuntimeCosts::CopyToContract(len)), )?) } - /// Execute an XCM program locally, using the contract's address as the origin. - /// See [`pallet_revive_uapi::HostFn::execute_xcm`]. - #[mutating] - fn xcm_execute( - &mut self, - memory: &mut M, - msg_ptr: u32, - msg_len: u32, - ) -> Result { - use frame_support::dispatch::DispatchInfo; - use xcm::VersionedXcm; - use xcm_builder::{ExecuteController, ExecuteControllerWeightInfo}; - - self.charge_gas(RuntimeCosts::CopyFromContract(msg_len))?; - let message: VersionedXcm> = memory.read_as_unbounded(msg_ptr, msg_len)?; - - let execute_weight = - <::Xcm as ExecuteController<_, _>>::WeightInfo::execute(); - let weight = self.ext.gas_meter().gas_left().max(execute_weight); - let dispatch_info = DispatchInfo { call_weight: weight, ..Default::default() }; - - self.call_dispatchable::( - dispatch_info, - RuntimeCosts::CallXcmExecute, - |runtime| { - let origin = crate::RawOrigin::Signed(runtime.ext.account_id().clone()).into(); - let weight_used = <::Xcm>::execute( - origin, - Box::new(message), - weight.saturating_sub(execute_weight), - )?; - - Ok(Some(weight_used.saturating_add(execute_weight)).into()) - }, - ) - } - - /// Send an XCM program from the contract to the specified destination. - /// See [`pallet_revive_uapi::HostFn::send_xcm`]. - #[mutating] - fn xcm_send( + /// Stores data returned by the last call, starting from `offset`, into the supplied buffer. + /// See [`pallet_revive_uapi::HostFn::return_data`]. + #[api_version(0)] + fn return_data_copy( &mut self, memory: &mut M, - dest_ptr: u32, - dest_len: u32, - msg_ptr: u32, - msg_len: u32, - output_ptr: u32, - ) -> Result { - use xcm::{VersionedLocation, VersionedXcm}; - use xcm_builder::{SendController, SendControllerWeightInfo}; - - self.charge_gas(RuntimeCosts::CopyFromContract(dest_len))?; - let dest: VersionedLocation = memory.read_as_unbounded(dest_ptr, dest_len)?; - - self.charge_gas(RuntimeCosts::CopyFromContract(msg_len))?; - let message: VersionedXcm<()> = memory.read_as_unbounded(msg_ptr, msg_len)?; - - let weight = <::Xcm as SendController<_>>::WeightInfo::send(); - self.charge_gas(RuntimeCosts::CallRuntime(weight))?; - let origin = crate::RawOrigin::Signed(self.ext.account_id().clone()).into(); - - match <::Xcm>::send(origin, dest.into(), message.into()) { - Ok(message_id) => { - memory.write(output_ptr, &message_id.encode())?; - Ok(ReturnErrorCode::Success) - }, - Err(e) => { - if self.ext.append_debug_buffer("") { - self.ext.append_debug_buffer("seal0::xcm_send failed with: "); - self.ext.append_debug_buffer(e.into()); - }; - Ok(ReturnErrorCode::XcmSendFailed) - }, - } + out_ptr: u32, + out_len_ptr: u32, + offset: u32, + ) -> Result<(), TrapReason> { + let output = mem::take(self.ext.last_frame_output_mut()); + let result = if offset as usize > output.data.len() { + Err(Error::::OutOfBounds.into()) + } else { + self.write_sandbox_output( + memory, + out_ptr, + out_len_ptr, + &output.data[offset as usize..], + false, + |len| Some(RuntimeCosts::CopyToContract(len)), + ) + }; + *self.ext.last_frame_output_mut() = output; + Ok(result?) } } diff --git a/substrate/frame/revive/src/weights.rs b/substrate/frame/revive/src/weights.rs index e35ba5ca0766..3c6a0be6ee75 100644 --- a/substrate/frame/revive/src/weights.rs +++ b/substrate/frame/revive/src/weights.rs @@ -18,28 +18,26 @@ //! Autogenerated weights for `pallet_revive` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-12-19, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-10-30, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `19e0eeaa3bc2`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-wmcgzesc-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: // target/production/substrate-node // benchmark // pallet -// --extrinsic=* -// --chain=dev -// --pallet=pallet_revive -// --header=/__w/polkadot-sdk/polkadot-sdk/substrate/HEADER-APACHE2 -// --output=/__w/polkadot-sdk/polkadot-sdk/substrate/frame/revive/src/weights.rs -// --wasm-execution=compiled // --steps=50 // --repeat=20 +// --extrinsic=* +// --wasm-execution=compiled // --heap-pages=4096 -// --template=substrate/.maintain/frame-weight-template.hbs -// --no-storage-info -// --no-min-squares -// --no-median-slopes +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_revive +// --chain=dev +// --header=./substrate/HEADER-APACHE2 +// --output=./substrate/frame/revive/src/weights.rs +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -74,25 +72,17 @@ pub trait WeightInfo { fn seal_caller_is_root() -> Weight; fn seal_address() -> Weight; fn seal_weight_left() -> Weight; - fn seal_ref_time_left() -> Weight; fn seal_balance() -> Weight; fn seal_balance_of() -> Weight; fn seal_get_immutable_data(n: u32, ) -> Weight; fn seal_set_immutable_data(n: u32, ) -> Weight; fn seal_value_transferred() -> Weight; fn seal_minimum_balance() -> Weight; - fn seal_return_data_size() -> Weight; - fn seal_call_data_size() -> Weight; - fn seal_gas_limit() -> Weight; - fn seal_gas_price() -> Weight; - fn seal_base_fee() -> Weight; fn seal_block_number() -> Weight; fn seal_block_hash() -> Weight; fn seal_now() -> Weight; fn seal_weight_to_fee() -> Weight; - fn seal_copy_to_contract(n: u32, ) -> Weight; - fn seal_call_data_load() -> Weight; - fn seal_call_data_copy(n: u32, ) -> Weight; + fn seal_input(n: u32, ) -> Weight; fn seal_return(n: u32, ) -> Weight; fn seal_terminate(n: u32, ) -> Weight; fn seal_deposit_event(t: u32, n: u32, ) -> Weight; @@ -141,8 +131,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `109` // Estimated: `1594` - // Minimum execution time: 2_859_000 picoseconds. - Weight::from_parts(3_007_000, 1594) + // Minimum execution time: 2_649_000 picoseconds. + Weight::from_parts(2_726_000, 1594) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: `Skipped::Metadata` (r:0 w:0) @@ -152,10 +142,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `425 + k * (69 ±0)` // Estimated: `415 + k * (70 ±0)` - // Minimum execution time: 15_640_000 picoseconds. - Weight::from_parts(1_609_026, 415) - // Standard Error: 1_359 - .saturating_add(Weight::from_parts(1_204_420, 0).saturating_mul(k.into())) + // Minimum execution time: 12_756_000 picoseconds. + Weight::from_parts(13_112_000, 415) + // Standard Error: 988 + .saturating_add(Weight::from_parts(1_131_927, 0).saturating_mul(k.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(k.into()))) .saturating_add(T::DbWeight::get().writes(2_u64)) @@ -177,10 +167,10 @@ impl WeightInfo for SubstrateWeight { /// The range of component `c` is `[0, 262144]`. fn call_with_code_per_byte(_c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1463` - // Estimated: `7403` - // Minimum execution time: 89_437_000 picoseconds. - Weight::from_parts(94_285_182, 7403) + // Measured: `1465` + // Estimated: `7405` + // Minimum execution time: 86_553_000 picoseconds. + Weight::from_parts(89_689_079, 7405) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -200,16 +190,14 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Revive::PristineCode` (`max_values`: None, `max_size`: Some(262180), added: 264655, mode: `Measured`) /// The range of component `c` is `[0, 262144]`. /// The range of component `i` is `[0, 262144]`. - fn instantiate_with_code(c: u32, i: u32, ) -> Weight { + fn instantiate_with_code(_c: u32, i: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `364` - // Estimated: `6327` - // Minimum execution time: 187_904_000 picoseconds. - Weight::from_parts(153_252_081, 6327) + // Measured: `416` + // Estimated: `6333` + // Minimum execution time: 180_721_000 picoseconds. + Weight::from_parts(155_866_981, 6333) // Standard Error: 11 - .saturating_add(Weight::from_parts(49, 0).saturating_mul(c.into())) - // Standard Error: 11 - .saturating_add(Weight::from_parts(4_528, 0).saturating_mul(i.into())) + .saturating_add(Weight::from_parts(4_514, 0).saturating_mul(i.into())) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -231,11 +219,11 @@ impl WeightInfo for SubstrateWeight { fn instantiate(i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `1296` - // Estimated: `4758` - // Minimum execution time: 154_656_000 picoseconds. - Weight::from_parts(139_308_398, 4758) + // Estimated: `4741` + // Minimum execution time: 151_590_000 picoseconds. + Weight::from_parts(128_110_988, 4741) // Standard Error: 16 - .saturating_add(Weight::from_parts(4_421, 0).saturating_mul(i.into())) + .saturating_add(Weight::from_parts(4_453, 0).saturating_mul(i.into())) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -253,10 +241,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) fn call() -> Weight { // Proof Size summary in bytes: - // Measured: `1463` - // Estimated: `7403` - // Minimum execution time: 138_815_000 picoseconds. - Weight::from_parts(149_067_000, 7403) + // Measured: `1465` + // Estimated: `7405` + // Minimum execution time: 136_371_000 picoseconds. + Weight::from_parts(140_508_000, 7405) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -271,8 +259,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `109` // Estimated: `3574` - // Minimum execution time: 49_978_000 picoseconds. - Weight::from_parts(51_789_325, 3574) + // Minimum execution time: 51_255_000 picoseconds. + Weight::from_parts(52_668_809, 3574) // Standard Error: 0 .saturating_add(Weight::from_parts(1, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) @@ -288,8 +276,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `285` // Estimated: `3750` - // Minimum execution time: 43_833_000 picoseconds. - Weight::from_parts(44_660_000, 3750) + // Minimum execution time: 41_664_000 picoseconds. + Weight::from_parts(42_981_000, 3750) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -301,8 +289,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `529` // Estimated: `6469` - // Minimum execution time: 26_717_000 picoseconds. - Weight::from_parts(28_566_000, 6469) + // Minimum execution time: 27_020_000 picoseconds. + Weight::from_parts(27_973_000, 6469) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -314,8 +302,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `109` // Estimated: `3574` - // Minimum execution time: 39_401_000 picoseconds. - Weight::from_parts(40_542_000, 3574) + // Minimum execution time: 42_342_000 picoseconds. + Weight::from_parts(43_210_000, 3574) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -327,8 +315,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `56` // Estimated: `3521` - // Minimum execution time: 31_570_000 picoseconds. - Weight::from_parts(32_302_000, 3521) + // Minimum execution time: 31_881_000 picoseconds. + Weight::from_parts(32_340_000, 3521) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -340,8 +328,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 13_607_000 picoseconds. - Weight::from_parts(13_903_000, 3610) + // Minimum execution time: 11_087_000 picoseconds. + Weight::from_parts(11_416_000, 3610) .saturating_add(T::DbWeight::get().reads(2_u64)) } /// The range of component `r` is `[0, 1600]`. @@ -349,24 +337,24 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_400_000 picoseconds. - Weight::from_parts(8_388_251, 0) - // Standard Error: 283 - .saturating_add(Weight::from_parts(165_630, 0).saturating_mul(r.into())) + // Minimum execution time: 6_403_000 picoseconds. + Weight::from_parts(7_751_101, 0) + // Standard Error: 99 + .saturating_add(Weight::from_parts(179_467, 0).saturating_mul(r.into())) } fn seal_caller() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 275_000 picoseconds. - Weight::from_parts(305_000, 0) + // Minimum execution time: 272_000 picoseconds. + Weight::from_parts(306_000, 0) } fn seal_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 224_000 picoseconds. - Weight::from_parts(265_000, 0) + // Minimum execution time: 226_000 picoseconds. + Weight::from_parts(261_000, 0) } /// Storage: `Revive::ContractInfoOf` (r:1 w:0) /// Proof: `Revive::ContractInfoOf` (`max_values`: None, `max_size`: Some(1779), added: 4254, mode: `Measured`) @@ -374,8 +362,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `306` // Estimated: `3771` - // Minimum execution time: 10_004_000 picoseconds. - Weight::from_parts(10_336_000, 3771) + // Minimum execution time: 6_727_000 picoseconds. + Weight::from_parts(7_122_000, 3771) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: `Revive::ContractInfoOf` (r:1 w:0) @@ -384,16 +372,16 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `403` // Estimated: `3868` - // Minimum execution time: 11_054_000 picoseconds. - Weight::from_parts(11_651_000, 3868) + // Minimum execution time: 7_542_000 picoseconds. + Weight::from_parts(7_846_000, 3868) .saturating_add(T::DbWeight::get().reads(1_u64)) } fn seal_own_code_hash() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 252_000 picoseconds. - Weight::from_parts(305_000, 0) + // Minimum execution time: 243_000 picoseconds. + Weight::from_parts(275_000, 0) } /// Storage: `Revive::ContractInfoOf` (r:1 w:0) /// Proof: `Revive::ContractInfoOf` (`max_values`: None, `max_size`: Some(1779), added: 4254, mode: `Measured`) @@ -403,51 +391,44 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `473` // Estimated: `3938` - // Minimum execution time: 14_461_000 picoseconds. - Weight::from_parts(15_049_000, 3938) + // Minimum execution time: 11_948_000 picoseconds. + Weight::from_parts(12_406_000, 3938) .saturating_add(T::DbWeight::get().reads(2_u64)) } fn seal_caller_is_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 312_000 picoseconds. - Weight::from_parts(338_000, 0) + // Minimum execution time: 329_000 picoseconds. + Weight::from_parts(362_000, 0) } fn seal_caller_is_root() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 243_000 picoseconds. - Weight::from_parts(299_000, 0) + // Minimum execution time: 276_000 picoseconds. + Weight::from_parts(303_000, 0) } fn seal_address() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 231_000 picoseconds. - Weight::from_parts(271_000, 0) + // Minimum execution time: 251_000 picoseconds. + Weight::from_parts(286_000, 0) } fn seal_weight_left() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 683_000 picoseconds. - Weight::from_parts(732_000, 0) - } - fn seal_ref_time_left() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 226_000 picoseconds. - Weight::from_parts(273_000, 0) + // Minimum execution time: 611_000 picoseconds. + Weight::from_parts(669_000, 0) } fn seal_balance() -> Weight { // Proof Size summary in bytes: - // Measured: `102` + // Measured: `103` // Estimated: `0` - // Minimum execution time: 4_626_000 picoseconds. - Weight::from_parts(4_842_000, 0) + // Minimum execution time: 4_439_000 picoseconds. + Weight::from_parts(4_572_000, 0) } /// Storage: `Revive::AddressSuffix` (r:1 w:0) /// Proof: `Revive::AddressSuffix` (`max_values`: None, `max_size`: Some(32), added: 2507, mode: `Measured`) @@ -457,8 +438,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `264` // Estimated: `3729` - // Minimum execution time: 12_309_000 picoseconds. - Weight::from_parts(12_653_000, 3729) + // Minimum execution time: 9_336_000 picoseconds. + Weight::from_parts(9_622_000, 3729) .saturating_add(T::DbWeight::get().reads(2_u64)) } /// Storage: `Revive::ImmutableDataOf` (r:1 w:0) @@ -468,10 +449,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `238 + n * (1 ±0)` // Estimated: `3703 + n * (1 ±0)` - // Minimum execution time: 5_838_000 picoseconds. - Weight::from_parts(9_570_778, 3703) - // Standard Error: 19 - .saturating_add(Weight::from_parts(721, 0).saturating_mul(n.into())) + // Minimum execution time: 5_660_000 picoseconds. + Weight::from_parts(6_291_437, 3703) + // Standard Error: 4 + .saturating_add(Weight::from_parts(741, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } @@ -482,67 +463,32 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_910_000 picoseconds. - Weight::from_parts(2_205_396, 0) + // Minimum execution time: 1_909_000 picoseconds. + Weight::from_parts(2_154_705, 0) // Standard Error: 2 - .saturating_add(Weight::from_parts(538, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(643, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().writes(1_u64)) } fn seal_value_transferred() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 224_000 picoseconds. - Weight::from_parts(274_000, 0) + // Minimum execution time: 241_000 picoseconds. + Weight::from_parts(283_000, 0) } fn seal_minimum_balance() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 231_000 picoseconds. - Weight::from_parts(279_000, 0) - } - fn seal_return_data_size() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 229_000 picoseconds. - Weight::from_parts(267_000, 0) - } - fn seal_call_data_size() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 218_000 picoseconds. - Weight::from_parts(267_000, 0) - } - fn seal_gas_limit() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 225_000 picoseconds. - Weight::from_parts(280_000, 0) - } - fn seal_gas_price() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 274_000 picoseconds. - Weight::from_parts(323_000, 0) - } - fn seal_base_fee() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 239_000 picoseconds. - Weight::from_parts(290_000, 0) + // Minimum execution time: 263_000 picoseconds. + Weight::from_parts(294_000, 0) } fn seal_block_number() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 224_000 picoseconds. - Weight::from_parts(274_000, 0) + // Minimum execution time: 218_000 picoseconds. + Weight::from_parts(281_000, 0) } /// Storage: `System::BlockHash` (r:1 w:0) /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `Measured`) @@ -550,50 +496,36 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `30` // Estimated: `3495` - // Minimum execution time: 3_430_000 picoseconds. - Weight::from_parts(3_692_000, 3495) + // Minimum execution time: 3_373_000 picoseconds. + Weight::from_parts(3_610_000, 3495) .saturating_add(T::DbWeight::get().reads(1_u64)) } fn seal_now() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 241_000 picoseconds. - Weight::from_parts(290_000, 0) + // Minimum execution time: 247_000 picoseconds. + Weight::from_parts(299_000, 0) } + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `Measured`) fn seal_weight_to_fee() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_355_000 picoseconds. - Weight::from_parts(1_493_000, 0) + // Measured: `67` + // Estimated: `1552` + // Minimum execution time: 5_523_000 picoseconds. + Weight::from_parts(5_757_000, 1552) + .saturating_add(T::DbWeight::get().reads(1_u64)) } /// The range of component `n` is `[0, 262140]`. - fn seal_copy_to_contract(n: u32, ) -> Weight { + fn seal_input(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 348_000 picoseconds. - Weight::from_parts(1_004_890, 0) + // Minimum execution time: 450_000 picoseconds. + Weight::from_parts(584_658, 0) // Standard Error: 0 - .saturating_add(Weight::from_parts(202, 0).saturating_mul(n.into())) - } - fn seal_call_data_load() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 222_000 picoseconds. - Weight::from_parts(256_000, 0) - } - /// The range of component `n` is `[0, 262144]`. - fn seal_call_data_copy(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 240_000 picoseconds. - Weight::from_parts(330_609, 0) - // Standard Error: 0 - .saturating_add(Weight::from_parts(114, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(147, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 262140]`. fn seal_return(n: u32, ) -> Weight { @@ -601,9 +533,9 @@ impl WeightInfo for SubstrateWeight { // Measured: `0` // Estimated: `0` // Minimum execution time: 232_000 picoseconds. - Weight::from_parts(264_000, 0) + Weight::from_parts(611_960, 0) // Standard Error: 0 - .saturating_add(Weight::from_parts(208, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(294, 0).saturating_mul(n.into())) } /// Storage: `Revive::AddressSuffix` (r:1 w:0) /// Proof: `Revive::AddressSuffix` (`max_values`: None, `max_size`: Some(32), added: 2507, mode: `Measured`) @@ -618,12 +550,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[0, 32]`. fn seal_terminate(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `322 + n * (88 ±0)` + // Measured: `321 + n * (88 ±0)` // Estimated: `3787 + n * (2563 ±0)` - // Minimum execution time: 21_920_000 picoseconds. - Weight::from_parts(21_725_868, 3787) - // Standard Error: 11_165 - .saturating_add(Weight::from_parts(4_317_986, 0).saturating_mul(n.into())) + // Minimum execution time: 19_158_000 picoseconds. + Weight::from_parts(20_900_189, 3787) + // Standard Error: 9_648 + .saturating_add(Weight::from_parts(4_239_910, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes(4_u64)) @@ -631,56 +563,56 @@ impl WeightInfo for SubstrateWeight { .saturating_add(Weight::from_parts(0, 2563).saturating_mul(n.into())) } /// The range of component `t` is `[0, 4]`. - /// The range of component `n` is `[0, 448]`. + /// The range of component `n` is `[0, 512]`. fn seal_deposit_event(t: u32, n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_140_000 picoseconds. - Weight::from_parts(4_259_301, 0) - // Standard Error: 3_362 - .saturating_add(Weight::from_parts(194_546, 0).saturating_mul(t.into())) - // Standard Error: 34 - .saturating_add(Weight::from_parts(774, 0).saturating_mul(n.into())) + // Minimum execution time: 4_097_000 picoseconds. + Weight::from_parts(3_956_608, 0) + // Standard Error: 2_678 + .saturating_add(Weight::from_parts(178_555, 0).saturating_mul(t.into())) + // Standard Error: 23 + .saturating_add(Weight::from_parts(1_127, 0).saturating_mul(n.into())) } /// The range of component `i` is `[0, 262144]`. fn seal_debug_message(i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 340_000 picoseconds. - Weight::from_parts(306_527, 0) - // Standard Error: 1 - .saturating_add(Weight::from_parts(728, 0).saturating_mul(i.into())) + // Minimum execution time: 277_000 picoseconds. + Weight::from_parts(1_044_051, 0) + // Standard Error: 0 + .saturating_add(Weight::from_parts(794, 0).saturating_mul(i.into())) } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) fn get_storage_empty() -> Weight { // Proof Size summary in bytes: - // Measured: `680` - // Estimated: `680` - // Minimum execution time: 10_747_000 picoseconds. - Weight::from_parts(11_276_000, 680) + // Measured: `744` + // Estimated: `744` + // Minimum execution time: 7_745_000 picoseconds. + Weight::from_parts(8_370_000, 744) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) fn get_storage_full() -> Weight { // Proof Size summary in bytes: - // Measured: `10690` - // Estimated: `10690` - // Minimum execution time: 42_076_000 picoseconds. - Weight::from_parts(43_381_000, 10690) + // Measured: `10754` + // Estimated: `10754` + // Minimum execution time: 43_559_000 picoseconds. + Weight::from_parts(44_310_000, 10754) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) fn set_storage_empty() -> Weight { // Proof Size summary in bytes: - // Measured: `680` - // Estimated: `680` - // Minimum execution time: 11_703_000 picoseconds. - Weight::from_parts(12_308_000, 680) + // Measured: `744` + // Estimated: `744` + // Minimum execution time: 8_866_000 picoseconds. + Weight::from_parts(9_072_000, 744) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -688,85 +620,85 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) fn set_storage_full() -> Weight { // Proof Size summary in bytes: - // Measured: `10690` - // Estimated: `10690` - // Minimum execution time: 43_460_000 picoseconds. - Weight::from_parts(45_165_000, 10690) + // Measured: `10754` + // Estimated: `10754` + // Minimum execution time: 44_481_000 picoseconds. + Weight::from_parts(45_157_000, 10754) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `n` is `[0, 448]`. - /// The range of component `o` is `[0, 448]`. + /// The range of component `n` is `[0, 512]`. + /// The range of component `o` is `[0, 512]`. fn seal_set_storage(n: u32, o: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `248 + o * (1 ±0)` // Estimated: `247 + o * (1 ±0)` - // Minimum execution time: 9_087_000 picoseconds. - Weight::from_parts(11_787_486, 247) - // Standard Error: 179 - .saturating_add(Weight::from_parts(976, 0).saturating_mul(n.into())) - // Standard Error: 179 - .saturating_add(Weight::from_parts(3_151, 0).saturating_mul(o.into())) + // Minimum execution time: 9_130_000 picoseconds. + Weight::from_parts(9_709_648, 247) + // Standard Error: 40 + .saturating_add(Weight::from_parts(435, 0).saturating_mul(n.into())) + // Standard Error: 40 + .saturating_add(Weight::from_parts(384, 0).saturating_mul(o.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(o.into())) } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `n` is `[0, 448]`. + /// The range of component `n` is `[0, 512]`. fn seal_clear_storage(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `248 + n * (1 ±0)` // Estimated: `247 + n * (1 ±0)` - // Minimum execution time: 8_611_000 picoseconds. - Weight::from_parts(11_791_390, 247) - // Standard Error: 308 - .saturating_add(Weight::from_parts(3_943, 0).saturating_mul(n.into())) + // Minimum execution time: 8_753_000 picoseconds. + Weight::from_parts(9_558_399, 247) + // Standard Error: 56 + .saturating_add(Weight::from_parts(483, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `n` is `[0, 448]`. + /// The range of component `n` is `[0, 512]`. fn seal_get_storage(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `248 + n * (1 ±0)` // Estimated: `247 + n * (1 ±0)` - // Minimum execution time: 8_389_000 picoseconds. - Weight::from_parts(11_625_480, 247) - // Standard Error: 315 - .saturating_add(Weight::from_parts(4_487, 0).saturating_mul(n.into())) + // Minimum execution time: 8_328_000 picoseconds. + Weight::from_parts(9_120_157, 247) + // Standard Error: 58 + .saturating_add(Weight::from_parts(1_637, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `n` is `[0, 448]`. + /// The range of component `n` is `[0, 512]`. fn seal_contains_storage(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `248 + n * (1 ±0)` // Estimated: `247 + n * (1 ±0)` - // Minimum execution time: 7_947_000 picoseconds. - Weight::from_parts(10_970_587, 247) - // Standard Error: 310 - .saturating_add(Weight::from_parts(3_675, 0).saturating_mul(n.into())) + // Minimum execution time: 7_977_000 picoseconds. + Weight::from_parts(8_582_869, 247) + // Standard Error: 52 + .saturating_add(Weight::from_parts(854, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `n` is `[0, 448]`. + /// The range of component `n` is `[0, 512]`. fn seal_take_storage(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `248 + n * (1 ±0)` // Estimated: `247 + n * (1 ±0)` - // Minimum execution time: 9_071_000 picoseconds. - Weight::from_parts(12_525_027, 247) - // Standard Error: 328 - .saturating_add(Weight::from_parts(4_427, 0).saturating_mul(n.into())) + // Minimum execution time: 9_193_000 picoseconds. + Weight::from_parts(10_112_966, 247) + // Standard Error: 63 + .saturating_add(Weight::from_parts(1_320, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) @@ -775,89 +707,87 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_487_000 picoseconds. - Weight::from_parts(1_611_000, 0) + // Minimum execution time: 1_398_000 picoseconds. + Weight::from_parts(1_490_000, 0) } fn set_transient_storage_full() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_852_000 picoseconds. - Weight::from_parts(1_982_000, 0) + // Minimum execution time: 1_762_000 picoseconds. + Weight::from_parts(1_926_000, 0) } fn get_transient_storage_empty() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_467_000 picoseconds. - Weight::from_parts(1_529_000, 0) + // Minimum execution time: 1_413_000 picoseconds. + Weight::from_parts(1_494_000, 0) } fn get_transient_storage_full() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_630_000 picoseconds. - Weight::from_parts(1_712_000, 0) + // Minimum execution time: 1_606_000 picoseconds. + Weight::from_parts(1_659_000, 0) } fn rollback_transient_storage() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_188_000 picoseconds. - Weight::from_parts(1_268_000, 0) + // Minimum execution time: 1_010_000 picoseconds. + Weight::from_parts(1_117_000, 0) } - /// The range of component `n` is `[0, 448]`. - /// The range of component `o` is `[0, 448]`. + /// The range of component `n` is `[0, 512]`. + /// The range of component `o` is `[0, 512]`. fn seal_set_transient_storage(n: u32, o: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_197_000 picoseconds. - Weight::from_parts(2_464_654, 0) - // Standard Error: 17 - .saturating_add(Weight::from_parts(296, 0).saturating_mul(n.into())) - // Standard Error: 17 - .saturating_add(Weight::from_parts(342, 0).saturating_mul(o.into())) + // Minimum execution time: 2_194_000 picoseconds. + Weight::from_parts(2_290_633, 0) + // Standard Error: 11 + .saturating_add(Weight::from_parts(341, 0).saturating_mul(n.into())) + // Standard Error: 11 + .saturating_add(Weight::from_parts(377, 0).saturating_mul(o.into())) } - /// The range of component `n` is `[0, 448]`. + /// The range of component `n` is `[0, 512]`. fn seal_clear_transient_storage(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_005_000 picoseconds. - Weight::from_parts(2_381_053, 0) - // Standard Error: 23 - .saturating_add(Weight::from_parts(322, 0).saturating_mul(n.into())) + // Minimum execution time: 1_896_000 picoseconds. + Weight::from_parts(2_254_323, 0) + // Standard Error: 17 + .saturating_add(Weight::from_parts(439, 0).saturating_mul(n.into())) } - /// The range of component `n` is `[0, 448]`. + /// The range of component `n` is `[0, 512]`. fn seal_get_transient_storage(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_853_000 picoseconds. - Weight::from_parts(2_082_772, 0) - // Standard Error: 20 - .saturating_add(Weight::from_parts(322, 0).saturating_mul(n.into())) + // Minimum execution time: 1_800_000 picoseconds. + Weight::from_parts(1_948_552, 0) + // Standard Error: 11 + .saturating_add(Weight::from_parts(360, 0).saturating_mul(n.into())) } - /// The range of component `n` is `[0, 448]`. + /// The range of component `n` is `[0, 512]`. fn seal_contains_transient_storage(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_711_000 picoseconds. - Weight::from_parts(1_899_649, 0) - // Standard Error: 16 - .saturating_add(Weight::from_parts(208, 0).saturating_mul(n.into())) + // Minimum execution time: 1_615_000 picoseconds. + Weight::from_parts(1_812_731, 0) + // Standard Error: 11 + .saturating_add(Weight::from_parts(177, 0).saturating_mul(n.into())) } - /// The range of component `n` is `[0, 448]`. - fn seal_take_transient_storage(n: u32, ) -> Weight { + /// The range of component `n` is `[0, 512]`. + fn seal_take_transient_storage(_n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_460_000 picoseconds. - Weight::from_parts(2_684_364, 0) - // Standard Error: 22 - .saturating_add(Weight::from_parts(56, 0).saturating_mul(n.into())) + // Minimum execution time: 2_430_000 picoseconds. + Weight::from_parts(2_669_757, 0) } /// Storage: `Revive::AddressSuffix` (r:1 w:0) /// Proof: `Revive::AddressSuffix` (`max_values`: None, `max_size`: Some(32), added: 2507, mode: `Measured`) @@ -867,38 +797,31 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Revive::CodeInfoOf` (`max_values`: None, `max_size`: Some(96), added: 2571, mode: `Measured`) /// Storage: `Revive::PristineCode` (r:1 w:0) /// Proof: `Revive::PristineCode` (`max_values`: None, `max_size`: Some(262180), added: 264655, mode: `Measured`) - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) /// The range of component `t` is `[0, 1]`. /// The range of component `i` is `[0, 262144]`. fn seal_call(t: u32, i: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1292 + t * (203 ±0)` - // Estimated: `4757 + t * (2480 ±0)` - // Minimum execution time: 40_031_000 picoseconds. - Weight::from_parts(41_527_691, 4757) - // Standard Error: 50_351 - .saturating_add(Weight::from_parts(1_112_950, 0).saturating_mul(t.into())) + // Measured: `1292 + t * (103 ±0)` + // Estimated: `4757 + t * (103 ±0)` + // Minimum execution time: 37_280_000 picoseconds. + Weight::from_parts(41_639_379, 4757) // Standard Error: 0 - .saturating_add(Weight::from_parts(1, 0).saturating_mul(i.into())) + .saturating_add(Weight::from_parts(2, 0).saturating_mul(i.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(t.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) - .saturating_add(Weight::from_parts(0, 2480).saturating_mul(t.into())) + .saturating_add(Weight::from_parts(0, 103).saturating_mul(t.into())) } - /// Storage: `Revive::ContractInfoOf` (r:1 w:0) - /// Proof: `Revive::ContractInfoOf` (`max_values`: None, `max_size`: Some(1779), added: 4254, mode: `Measured`) /// Storage: `Revive::CodeInfoOf` (r:1 w:0) /// Proof: `Revive::CodeInfoOf` (`max_values`: None, `max_size`: Some(96), added: 2571, mode: `Measured`) /// Storage: `Revive::PristineCode` (r:1 w:0) /// Proof: `Revive::PristineCode` (`max_values`: None, `max_size`: Some(262180), added: 264655, mode: `Measured`) fn seal_delegate_call() -> Weight { // Proof Size summary in bytes: - // Measured: `1237` - // Estimated: `4702` - // Minimum execution time: 35_759_000 picoseconds. - Weight::from_parts(37_086_000, 4702) - .saturating_add(T::DbWeight::get().reads(3_u64)) + // Measured: `1064` + // Estimated: `4529` + // Minimum execution time: 27_564_000 picoseconds. + Weight::from_parts(28_809_000, 4529) + .saturating_add(T::DbWeight::get().reads(2_u64)) } /// Storage: `Revive::CodeInfoOf` (r:1 w:1) /// Proof: `Revive::CodeInfoOf` (`max_values`: None, `max_size`: Some(96), added: 2571, mode: `Measured`) @@ -911,12 +834,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `i` is `[0, 262144]`. fn seal_instantiate(i: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1271` - // Estimated: `4710` - // Minimum execution time: 116_485_000 picoseconds. - Weight::from_parts(108_907_717, 4710) - // Standard Error: 12 - .saturating_add(Weight::from_parts(4_125, 0).saturating_mul(i.into())) + // Measured: `1273` + // Estimated: `4732` + // Minimum execution time: 115_581_000 picoseconds. + Weight::from_parts(105_196_218, 4732) + // Standard Error: 11 + .saturating_add(Weight::from_parts(4_134, 0).saturating_mul(i.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -925,64 +848,64 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 651_000 picoseconds. - Weight::from_parts(3_867_609, 0) - // Standard Error: 3 - .saturating_add(Weight::from_parts(1_384, 0).saturating_mul(n.into())) + // Minimum execution time: 605_000 picoseconds. + Weight::from_parts(3_425_431, 0) + // Standard Error: 2 + .saturating_add(Weight::from_parts(1_461, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 262144]`. fn seal_hash_keccak_256(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_090_000 picoseconds. - Weight::from_parts(5_338_460, 0) + // Minimum execution time: 1_113_000 picoseconds. + Weight::from_parts(4_611_854, 0) // Standard Error: 3 - .saturating_add(Weight::from_parts(3_601, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(3_652, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 262144]`. fn seal_hash_blake2_256(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 717_000 picoseconds. - Weight::from_parts(2_629_461, 0) - // Standard Error: 3 - .saturating_add(Weight::from_parts(1_528, 0).saturating_mul(n.into())) + // Minimum execution time: 610_000 picoseconds. + Weight::from_parts(3_872_321, 0) + // Standard Error: 2 + .saturating_add(Weight::from_parts(1_584, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 262144]`. fn seal_hash_blake2_128(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 660_000 picoseconds. - Weight::from_parts(4_807_814, 0) + // Minimum execution time: 559_000 picoseconds. + Weight::from_parts(4_721_584, 0) // Standard Error: 3 - .saturating_add(Weight::from_parts(1_509, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(1_570, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 261889]`. fn seal_sr25519_verify(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 42_829_000 picoseconds. - Weight::from_parts(24_650_992, 0) - // Standard Error: 14 - .saturating_add(Weight::from_parts(5_212, 0).saturating_mul(n.into())) + // Minimum execution time: 47_467_000 picoseconds. + Weight::from_parts(36_639_352, 0) + // Standard Error: 11 + .saturating_add(Weight::from_parts(5_216, 0).saturating_mul(n.into())) } fn seal_ecdsa_recover() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 46_902_000 picoseconds. - Weight::from_parts(48_072_000, 0) + // Minimum execution time: 48_106_000 picoseconds. + Weight::from_parts(49_352_000, 0) } fn seal_ecdsa_to_eth_address() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 12_713_000 picoseconds. - Weight::from_parts(12_847_000, 0) + // Minimum execution time: 12_616_000 picoseconds. + Weight::from_parts(12_796_000, 0) } /// Storage: `Revive::CodeInfoOf` (r:1 w:1) /// Proof: `Revive::CodeInfoOf` (`max_values`: None, `max_size`: Some(96), added: 2571, mode: `Measured`) @@ -990,8 +913,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `300` // Estimated: `3765` - // Minimum execution time: 17_657_000 picoseconds. - Weight::from_parts(18_419_000, 3765) + // Minimum execution time: 14_055_000 picoseconds. + Weight::from_parts(14_526_000, 3765) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -999,10 +922,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Revive::CodeInfoOf` (`max_values`: None, `max_size`: Some(96), added: 2571, mode: `Measured`) fn lock_delegate_dependency() -> Weight { // Proof Size summary in bytes: - // Measured: `338` - // Estimated: `3803` - // Minimum execution time: 13_650_000 picoseconds. - Weight::from_parts(14_209_000, 3803) + // Measured: `337` + // Estimated: `3802` + // Minimum execution time: 10_338_000 picoseconds. + Weight::from_parts(10_677_000, 3802) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -1010,10 +933,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Revive::CodeInfoOf` (`max_values`: None, `max_size`: Some(96), added: 2571, mode: `MaxEncodedLen`) fn unlock_delegate_dependency() -> Weight { // Proof Size summary in bytes: - // Measured: `338` + // Measured: `337` // Estimated: `3561` - // Minimum execution time: 12_341_000 picoseconds. - Weight::from_parts(13_011_000, 3561) + // Minimum execution time: 8_740_000 picoseconds. + Weight::from_parts(9_329_000, 3561) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -1022,10 +945,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_899_000 picoseconds. - Weight::from_parts(10_489_171, 0) - // Standard Error: 104 - .saturating_add(Weight::from_parts(73_814, 0).saturating_mul(r.into())) + // Minimum execution time: 7_846_000 picoseconds. + Weight::from_parts(9_717_991, 0) + // Standard Error: 49 + .saturating_add(Weight::from_parts(72_062, 0).saturating_mul(r.into())) } } @@ -1037,8 +960,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `109` // Estimated: `1594` - // Minimum execution time: 2_859_000 picoseconds. - Weight::from_parts(3_007_000, 1594) + // Minimum execution time: 2_649_000 picoseconds. + Weight::from_parts(2_726_000, 1594) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: `Skipped::Metadata` (r:0 w:0) @@ -1048,10 +971,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `425 + k * (69 ±0)` // Estimated: `415 + k * (70 ±0)` - // Minimum execution time: 15_640_000 picoseconds. - Weight::from_parts(1_609_026, 415) - // Standard Error: 1_359 - .saturating_add(Weight::from_parts(1_204_420, 0).saturating_mul(k.into())) + // Minimum execution time: 12_756_000 picoseconds. + Weight::from_parts(13_112_000, 415) + // Standard Error: 988 + .saturating_add(Weight::from_parts(1_131_927, 0).saturating_mul(k.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(k.into()))) .saturating_add(RocksDbWeight::get().writes(2_u64)) @@ -1073,10 +996,10 @@ impl WeightInfo for () { /// The range of component `c` is `[0, 262144]`. fn call_with_code_per_byte(_c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1463` - // Estimated: `7403` - // Minimum execution time: 89_437_000 picoseconds. - Weight::from_parts(94_285_182, 7403) + // Measured: `1465` + // Estimated: `7405` + // Minimum execution time: 86_553_000 picoseconds. + Weight::from_parts(89_689_079, 7405) .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -1096,16 +1019,14 @@ impl WeightInfo for () { /// Proof: `Revive::PristineCode` (`max_values`: None, `max_size`: Some(262180), added: 264655, mode: `Measured`) /// The range of component `c` is `[0, 262144]`. /// The range of component `i` is `[0, 262144]`. - fn instantiate_with_code(c: u32, i: u32, ) -> Weight { + fn instantiate_with_code(_c: u32, i: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `364` - // Estimated: `6327` - // Minimum execution time: 187_904_000 picoseconds. - Weight::from_parts(153_252_081, 6327) - // Standard Error: 11 - .saturating_add(Weight::from_parts(49, 0).saturating_mul(c.into())) + // Measured: `416` + // Estimated: `6333` + // Minimum execution time: 180_721_000 picoseconds. + Weight::from_parts(155_866_981, 6333) // Standard Error: 11 - .saturating_add(Weight::from_parts(4_528, 0).saturating_mul(i.into())) + .saturating_add(Weight::from_parts(4_514, 0).saturating_mul(i.into())) .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -1127,11 +1048,11 @@ impl WeightInfo for () { fn instantiate(i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `1296` - // Estimated: `4758` - // Minimum execution time: 154_656_000 picoseconds. - Weight::from_parts(139_308_398, 4758) + // Estimated: `4741` + // Minimum execution time: 151_590_000 picoseconds. + Weight::from_parts(128_110_988, 4741) // Standard Error: 16 - .saturating_add(Weight::from_parts(4_421, 0).saturating_mul(i.into())) + .saturating_add(Weight::from_parts(4_453, 0).saturating_mul(i.into())) .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -1149,10 +1070,10 @@ impl WeightInfo for () { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) fn call() -> Weight { // Proof Size summary in bytes: - // Measured: `1463` - // Estimated: `7403` - // Minimum execution time: 138_815_000 picoseconds. - Weight::from_parts(149_067_000, 7403) + // Measured: `1465` + // Estimated: `7405` + // Minimum execution time: 136_371_000 picoseconds. + Weight::from_parts(140_508_000, 7405) .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -1167,8 +1088,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `109` // Estimated: `3574` - // Minimum execution time: 49_978_000 picoseconds. - Weight::from_parts(51_789_325, 3574) + // Minimum execution time: 51_255_000 picoseconds. + Weight::from_parts(52_668_809, 3574) // Standard Error: 0 .saturating_add(Weight::from_parts(1, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) @@ -1184,8 +1105,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `285` // Estimated: `3750` - // Minimum execution time: 43_833_000 picoseconds. - Weight::from_parts(44_660_000, 3750) + // Minimum execution time: 41_664_000 picoseconds. + Weight::from_parts(42_981_000, 3750) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -1197,8 +1118,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `529` // Estimated: `6469` - // Minimum execution time: 26_717_000 picoseconds. - Weight::from_parts(28_566_000, 6469) + // Minimum execution time: 27_020_000 picoseconds. + Weight::from_parts(27_973_000, 6469) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -1210,8 +1131,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `109` // Estimated: `3574` - // Minimum execution time: 39_401_000 picoseconds. - Weight::from_parts(40_542_000, 3574) + // Minimum execution time: 42_342_000 picoseconds. + Weight::from_parts(43_210_000, 3574) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -1223,8 +1144,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `56` // Estimated: `3521` - // Minimum execution time: 31_570_000 picoseconds. - Weight::from_parts(32_302_000, 3521) + // Minimum execution time: 31_881_000 picoseconds. + Weight::from_parts(32_340_000, 3521) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -1236,8 +1157,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 13_607_000 picoseconds. - Weight::from_parts(13_903_000, 3610) + // Minimum execution time: 11_087_000 picoseconds. + Weight::from_parts(11_416_000, 3610) .saturating_add(RocksDbWeight::get().reads(2_u64)) } /// The range of component `r` is `[0, 1600]`. @@ -1245,24 +1166,24 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_400_000 picoseconds. - Weight::from_parts(8_388_251, 0) - // Standard Error: 283 - .saturating_add(Weight::from_parts(165_630, 0).saturating_mul(r.into())) + // Minimum execution time: 6_403_000 picoseconds. + Weight::from_parts(7_751_101, 0) + // Standard Error: 99 + .saturating_add(Weight::from_parts(179_467, 0).saturating_mul(r.into())) } fn seal_caller() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 275_000 picoseconds. - Weight::from_parts(305_000, 0) + // Minimum execution time: 272_000 picoseconds. + Weight::from_parts(306_000, 0) } fn seal_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 224_000 picoseconds. - Weight::from_parts(265_000, 0) + // Minimum execution time: 226_000 picoseconds. + Weight::from_parts(261_000, 0) } /// Storage: `Revive::ContractInfoOf` (r:1 w:0) /// Proof: `Revive::ContractInfoOf` (`max_values`: None, `max_size`: Some(1779), added: 4254, mode: `Measured`) @@ -1270,8 +1191,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `306` // Estimated: `3771` - // Minimum execution time: 10_004_000 picoseconds. - Weight::from_parts(10_336_000, 3771) + // Minimum execution time: 6_727_000 picoseconds. + Weight::from_parts(7_122_000, 3771) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: `Revive::ContractInfoOf` (r:1 w:0) @@ -1280,16 +1201,16 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `403` // Estimated: `3868` - // Minimum execution time: 11_054_000 picoseconds. - Weight::from_parts(11_651_000, 3868) + // Minimum execution time: 7_542_000 picoseconds. + Weight::from_parts(7_846_000, 3868) .saturating_add(RocksDbWeight::get().reads(1_u64)) } fn seal_own_code_hash() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 252_000 picoseconds. - Weight::from_parts(305_000, 0) + // Minimum execution time: 243_000 picoseconds. + Weight::from_parts(275_000, 0) } /// Storage: `Revive::ContractInfoOf` (r:1 w:0) /// Proof: `Revive::ContractInfoOf` (`max_values`: None, `max_size`: Some(1779), added: 4254, mode: `Measured`) @@ -1299,51 +1220,44 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `473` // Estimated: `3938` - // Minimum execution time: 14_461_000 picoseconds. - Weight::from_parts(15_049_000, 3938) + // Minimum execution time: 11_948_000 picoseconds. + Weight::from_parts(12_406_000, 3938) .saturating_add(RocksDbWeight::get().reads(2_u64)) } fn seal_caller_is_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 312_000 picoseconds. - Weight::from_parts(338_000, 0) + // Minimum execution time: 329_000 picoseconds. + Weight::from_parts(362_000, 0) } fn seal_caller_is_root() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 243_000 picoseconds. - Weight::from_parts(299_000, 0) + // Minimum execution time: 276_000 picoseconds. + Weight::from_parts(303_000, 0) } fn seal_address() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 231_000 picoseconds. - Weight::from_parts(271_000, 0) + // Minimum execution time: 251_000 picoseconds. + Weight::from_parts(286_000, 0) } fn seal_weight_left() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 683_000 picoseconds. - Weight::from_parts(732_000, 0) - } - fn seal_ref_time_left() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 226_000 picoseconds. - Weight::from_parts(273_000, 0) + // Minimum execution time: 611_000 picoseconds. + Weight::from_parts(669_000, 0) } fn seal_balance() -> Weight { // Proof Size summary in bytes: - // Measured: `102` + // Measured: `103` // Estimated: `0` - // Minimum execution time: 4_626_000 picoseconds. - Weight::from_parts(4_842_000, 0) + // Minimum execution time: 4_439_000 picoseconds. + Weight::from_parts(4_572_000, 0) } /// Storage: `Revive::AddressSuffix` (r:1 w:0) /// Proof: `Revive::AddressSuffix` (`max_values`: None, `max_size`: Some(32), added: 2507, mode: `Measured`) @@ -1353,8 +1267,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `264` // Estimated: `3729` - // Minimum execution time: 12_309_000 picoseconds. - Weight::from_parts(12_653_000, 3729) + // Minimum execution time: 9_336_000 picoseconds. + Weight::from_parts(9_622_000, 3729) .saturating_add(RocksDbWeight::get().reads(2_u64)) } /// Storage: `Revive::ImmutableDataOf` (r:1 w:0) @@ -1364,10 +1278,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `238 + n * (1 ±0)` // Estimated: `3703 + n * (1 ±0)` - // Minimum execution time: 5_838_000 picoseconds. - Weight::from_parts(9_570_778, 3703) - // Standard Error: 19 - .saturating_add(Weight::from_parts(721, 0).saturating_mul(n.into())) + // Minimum execution time: 5_660_000 picoseconds. + Weight::from_parts(6_291_437, 3703) + // Standard Error: 4 + .saturating_add(Weight::from_parts(741, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } @@ -1378,67 +1292,32 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_910_000 picoseconds. - Weight::from_parts(2_205_396, 0) + // Minimum execution time: 1_909_000 picoseconds. + Weight::from_parts(2_154_705, 0) // Standard Error: 2 - .saturating_add(Weight::from_parts(538, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(643, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().writes(1_u64)) } fn seal_value_transferred() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 224_000 picoseconds. - Weight::from_parts(274_000, 0) + // Minimum execution time: 241_000 picoseconds. + Weight::from_parts(283_000, 0) } fn seal_minimum_balance() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 231_000 picoseconds. - Weight::from_parts(279_000, 0) - } - fn seal_return_data_size() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 229_000 picoseconds. - Weight::from_parts(267_000, 0) - } - fn seal_call_data_size() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 218_000 picoseconds. - Weight::from_parts(267_000, 0) - } - fn seal_gas_limit() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 225_000 picoseconds. - Weight::from_parts(280_000, 0) - } - fn seal_gas_price() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 274_000 picoseconds. - Weight::from_parts(323_000, 0) - } - fn seal_base_fee() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 239_000 picoseconds. - Weight::from_parts(290_000, 0) + // Minimum execution time: 263_000 picoseconds. + Weight::from_parts(294_000, 0) } fn seal_block_number() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 224_000 picoseconds. - Weight::from_parts(274_000, 0) + // Minimum execution time: 218_000 picoseconds. + Weight::from_parts(281_000, 0) } /// Storage: `System::BlockHash` (r:1 w:0) /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `Measured`) @@ -1446,50 +1325,36 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `30` // Estimated: `3495` - // Minimum execution time: 3_430_000 picoseconds. - Weight::from_parts(3_692_000, 3495) + // Minimum execution time: 3_373_000 picoseconds. + Weight::from_parts(3_610_000, 3495) .saturating_add(RocksDbWeight::get().reads(1_u64)) } fn seal_now() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 241_000 picoseconds. - Weight::from_parts(290_000, 0) + // Minimum execution time: 247_000 picoseconds. + Weight::from_parts(299_000, 0) } + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `Measured`) fn seal_weight_to_fee() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_355_000 picoseconds. - Weight::from_parts(1_493_000, 0) + // Measured: `67` + // Estimated: `1552` + // Minimum execution time: 5_523_000 picoseconds. + Weight::from_parts(5_757_000, 1552) + .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// The range of component `n` is `[0, 262140]`. - fn seal_copy_to_contract(n: u32, ) -> Weight { + fn seal_input(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 348_000 picoseconds. - Weight::from_parts(1_004_890, 0) + // Minimum execution time: 450_000 picoseconds. + Weight::from_parts(584_658, 0) // Standard Error: 0 - .saturating_add(Weight::from_parts(202, 0).saturating_mul(n.into())) - } - fn seal_call_data_load() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 222_000 picoseconds. - Weight::from_parts(256_000, 0) - } - /// The range of component `n` is `[0, 262144]`. - fn seal_call_data_copy(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 240_000 picoseconds. - Weight::from_parts(330_609, 0) - // Standard Error: 0 - .saturating_add(Weight::from_parts(114, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(147, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 262140]`. fn seal_return(n: u32, ) -> Weight { @@ -1497,9 +1362,9 @@ impl WeightInfo for () { // Measured: `0` // Estimated: `0` // Minimum execution time: 232_000 picoseconds. - Weight::from_parts(264_000, 0) + Weight::from_parts(611_960, 0) // Standard Error: 0 - .saturating_add(Weight::from_parts(208, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(294, 0).saturating_mul(n.into())) } /// Storage: `Revive::AddressSuffix` (r:1 w:0) /// Proof: `Revive::AddressSuffix` (`max_values`: None, `max_size`: Some(32), added: 2507, mode: `Measured`) @@ -1514,12 +1379,12 @@ impl WeightInfo for () { /// The range of component `n` is `[0, 32]`. fn seal_terminate(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `322 + n * (88 ±0)` + // Measured: `321 + n * (88 ±0)` // Estimated: `3787 + n * (2563 ±0)` - // Minimum execution time: 21_920_000 picoseconds. - Weight::from_parts(21_725_868, 3787) - // Standard Error: 11_165 - .saturating_add(Weight::from_parts(4_317_986, 0).saturating_mul(n.into())) + // Minimum execution time: 19_158_000 picoseconds. + Weight::from_parts(20_900_189, 3787) + // Standard Error: 9_648 + .saturating_add(Weight::from_parts(4_239_910, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes(4_u64)) @@ -1527,56 +1392,56 @@ impl WeightInfo for () { .saturating_add(Weight::from_parts(0, 2563).saturating_mul(n.into())) } /// The range of component `t` is `[0, 4]`. - /// The range of component `n` is `[0, 448]`. + /// The range of component `n` is `[0, 512]`. fn seal_deposit_event(t: u32, n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_140_000 picoseconds. - Weight::from_parts(4_259_301, 0) - // Standard Error: 3_362 - .saturating_add(Weight::from_parts(194_546, 0).saturating_mul(t.into())) - // Standard Error: 34 - .saturating_add(Weight::from_parts(774, 0).saturating_mul(n.into())) + // Minimum execution time: 4_097_000 picoseconds. + Weight::from_parts(3_956_608, 0) + // Standard Error: 2_678 + .saturating_add(Weight::from_parts(178_555, 0).saturating_mul(t.into())) + // Standard Error: 23 + .saturating_add(Weight::from_parts(1_127, 0).saturating_mul(n.into())) } /// The range of component `i` is `[0, 262144]`. fn seal_debug_message(i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 340_000 picoseconds. - Weight::from_parts(306_527, 0) - // Standard Error: 1 - .saturating_add(Weight::from_parts(728, 0).saturating_mul(i.into())) + // Minimum execution time: 277_000 picoseconds. + Weight::from_parts(1_044_051, 0) + // Standard Error: 0 + .saturating_add(Weight::from_parts(794, 0).saturating_mul(i.into())) } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) fn get_storage_empty() -> Weight { // Proof Size summary in bytes: - // Measured: `680` - // Estimated: `680` - // Minimum execution time: 10_747_000 picoseconds. - Weight::from_parts(11_276_000, 680) + // Measured: `744` + // Estimated: `744` + // Minimum execution time: 7_745_000 picoseconds. + Weight::from_parts(8_370_000, 744) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) fn get_storage_full() -> Weight { // Proof Size summary in bytes: - // Measured: `10690` - // Estimated: `10690` - // Minimum execution time: 42_076_000 picoseconds. - Weight::from_parts(43_381_000, 10690) + // Measured: `10754` + // Estimated: `10754` + // Minimum execution time: 43_559_000 picoseconds. + Weight::from_parts(44_310_000, 10754) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) fn set_storage_empty() -> Weight { // Proof Size summary in bytes: - // Measured: `680` - // Estimated: `680` - // Minimum execution time: 11_703_000 picoseconds. - Weight::from_parts(12_308_000, 680) + // Measured: `744` + // Estimated: `744` + // Minimum execution time: 8_866_000 picoseconds. + Weight::from_parts(9_072_000, 744) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1584,85 +1449,85 @@ impl WeightInfo for () { /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) fn set_storage_full() -> Weight { // Proof Size summary in bytes: - // Measured: `10690` - // Estimated: `10690` - // Minimum execution time: 43_460_000 picoseconds. - Weight::from_parts(45_165_000, 10690) + // Measured: `10754` + // Estimated: `10754` + // Minimum execution time: 44_481_000 picoseconds. + Weight::from_parts(45_157_000, 10754) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `n` is `[0, 448]`. - /// The range of component `o` is `[0, 448]`. + /// The range of component `n` is `[0, 512]`. + /// The range of component `o` is `[0, 512]`. fn seal_set_storage(n: u32, o: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `248 + o * (1 ±0)` // Estimated: `247 + o * (1 ±0)` - // Minimum execution time: 9_087_000 picoseconds. - Weight::from_parts(11_787_486, 247) - // Standard Error: 179 - .saturating_add(Weight::from_parts(976, 0).saturating_mul(n.into())) - // Standard Error: 179 - .saturating_add(Weight::from_parts(3_151, 0).saturating_mul(o.into())) + // Minimum execution time: 9_130_000 picoseconds. + Weight::from_parts(9_709_648, 247) + // Standard Error: 40 + .saturating_add(Weight::from_parts(435, 0).saturating_mul(n.into())) + // Standard Error: 40 + .saturating_add(Weight::from_parts(384, 0).saturating_mul(o.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(o.into())) } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `n` is `[0, 448]`. + /// The range of component `n` is `[0, 512]`. fn seal_clear_storage(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `248 + n * (1 ±0)` // Estimated: `247 + n * (1 ±0)` - // Minimum execution time: 8_611_000 picoseconds. - Weight::from_parts(11_791_390, 247) - // Standard Error: 308 - .saturating_add(Weight::from_parts(3_943, 0).saturating_mul(n.into())) + // Minimum execution time: 8_753_000 picoseconds. + Weight::from_parts(9_558_399, 247) + // Standard Error: 56 + .saturating_add(Weight::from_parts(483, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `n` is `[0, 448]`. + /// The range of component `n` is `[0, 512]`. fn seal_get_storage(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `248 + n * (1 ±0)` // Estimated: `247 + n * (1 ±0)` - // Minimum execution time: 8_389_000 picoseconds. - Weight::from_parts(11_625_480, 247) - // Standard Error: 315 - .saturating_add(Weight::from_parts(4_487, 0).saturating_mul(n.into())) + // Minimum execution time: 8_328_000 picoseconds. + Weight::from_parts(9_120_157, 247) + // Standard Error: 58 + .saturating_add(Weight::from_parts(1_637, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `n` is `[0, 448]`. + /// The range of component `n` is `[0, 512]`. fn seal_contains_storage(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `248 + n * (1 ±0)` // Estimated: `247 + n * (1 ±0)` - // Minimum execution time: 7_947_000 picoseconds. - Weight::from_parts(10_970_587, 247) - // Standard Error: 310 - .saturating_add(Weight::from_parts(3_675, 0).saturating_mul(n.into())) + // Minimum execution time: 7_977_000 picoseconds. + Weight::from_parts(8_582_869, 247) + // Standard Error: 52 + .saturating_add(Weight::from_parts(854, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `n` is `[0, 448]`. + /// The range of component `n` is `[0, 512]`. fn seal_take_storage(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `248 + n * (1 ±0)` // Estimated: `247 + n * (1 ±0)` - // Minimum execution time: 9_071_000 picoseconds. - Weight::from_parts(12_525_027, 247) - // Standard Error: 328 - .saturating_add(Weight::from_parts(4_427, 0).saturating_mul(n.into())) + // Minimum execution time: 9_193_000 picoseconds. + Weight::from_parts(10_112_966, 247) + // Standard Error: 63 + .saturating_add(Weight::from_parts(1_320, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) @@ -1671,89 +1536,87 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_487_000 picoseconds. - Weight::from_parts(1_611_000, 0) + // Minimum execution time: 1_398_000 picoseconds. + Weight::from_parts(1_490_000, 0) } fn set_transient_storage_full() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_852_000 picoseconds. - Weight::from_parts(1_982_000, 0) + // Minimum execution time: 1_762_000 picoseconds. + Weight::from_parts(1_926_000, 0) } fn get_transient_storage_empty() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_467_000 picoseconds. - Weight::from_parts(1_529_000, 0) + // Minimum execution time: 1_413_000 picoseconds. + Weight::from_parts(1_494_000, 0) } fn get_transient_storage_full() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_630_000 picoseconds. - Weight::from_parts(1_712_000, 0) + // Minimum execution time: 1_606_000 picoseconds. + Weight::from_parts(1_659_000, 0) } fn rollback_transient_storage() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_188_000 picoseconds. - Weight::from_parts(1_268_000, 0) + // Minimum execution time: 1_010_000 picoseconds. + Weight::from_parts(1_117_000, 0) } - /// The range of component `n` is `[0, 448]`. - /// The range of component `o` is `[0, 448]`. + /// The range of component `n` is `[0, 512]`. + /// The range of component `o` is `[0, 512]`. fn seal_set_transient_storage(n: u32, o: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_197_000 picoseconds. - Weight::from_parts(2_464_654, 0) - // Standard Error: 17 - .saturating_add(Weight::from_parts(296, 0).saturating_mul(n.into())) - // Standard Error: 17 - .saturating_add(Weight::from_parts(342, 0).saturating_mul(o.into())) + // Minimum execution time: 2_194_000 picoseconds. + Weight::from_parts(2_290_633, 0) + // Standard Error: 11 + .saturating_add(Weight::from_parts(341, 0).saturating_mul(n.into())) + // Standard Error: 11 + .saturating_add(Weight::from_parts(377, 0).saturating_mul(o.into())) } - /// The range of component `n` is `[0, 448]`. + /// The range of component `n` is `[0, 512]`. fn seal_clear_transient_storage(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_005_000 picoseconds. - Weight::from_parts(2_381_053, 0) - // Standard Error: 23 - .saturating_add(Weight::from_parts(322, 0).saturating_mul(n.into())) + // Minimum execution time: 1_896_000 picoseconds. + Weight::from_parts(2_254_323, 0) + // Standard Error: 17 + .saturating_add(Weight::from_parts(439, 0).saturating_mul(n.into())) } - /// The range of component `n` is `[0, 448]`. + /// The range of component `n` is `[0, 512]`. fn seal_get_transient_storage(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_853_000 picoseconds. - Weight::from_parts(2_082_772, 0) - // Standard Error: 20 - .saturating_add(Weight::from_parts(322, 0).saturating_mul(n.into())) + // Minimum execution time: 1_800_000 picoseconds. + Weight::from_parts(1_948_552, 0) + // Standard Error: 11 + .saturating_add(Weight::from_parts(360, 0).saturating_mul(n.into())) } - /// The range of component `n` is `[0, 448]`. + /// The range of component `n` is `[0, 512]`. fn seal_contains_transient_storage(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_711_000 picoseconds. - Weight::from_parts(1_899_649, 0) - // Standard Error: 16 - .saturating_add(Weight::from_parts(208, 0).saturating_mul(n.into())) + // Minimum execution time: 1_615_000 picoseconds. + Weight::from_parts(1_812_731, 0) + // Standard Error: 11 + .saturating_add(Weight::from_parts(177, 0).saturating_mul(n.into())) } - /// The range of component `n` is `[0, 448]`. - fn seal_take_transient_storage(n: u32, ) -> Weight { + /// The range of component `n` is `[0, 512]`. + fn seal_take_transient_storage(_n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_460_000 picoseconds. - Weight::from_parts(2_684_364, 0) - // Standard Error: 22 - .saturating_add(Weight::from_parts(56, 0).saturating_mul(n.into())) + // Minimum execution time: 2_430_000 picoseconds. + Weight::from_parts(2_669_757, 0) } /// Storage: `Revive::AddressSuffix` (r:1 w:0) /// Proof: `Revive::AddressSuffix` (`max_values`: None, `max_size`: Some(32), added: 2507, mode: `Measured`) @@ -1763,38 +1626,31 @@ impl WeightInfo for () { /// Proof: `Revive::CodeInfoOf` (`max_values`: None, `max_size`: Some(96), added: 2571, mode: `Measured`) /// Storage: `Revive::PristineCode` (r:1 w:0) /// Proof: `Revive::PristineCode` (`max_values`: None, `max_size`: Some(262180), added: 264655, mode: `Measured`) - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) /// The range of component `t` is `[0, 1]`. /// The range of component `i` is `[0, 262144]`. fn seal_call(t: u32, i: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1292 + t * (203 ±0)` - // Estimated: `4757 + t * (2480 ±0)` - // Minimum execution time: 40_031_000 picoseconds. - Weight::from_parts(41_527_691, 4757) - // Standard Error: 50_351 - .saturating_add(Weight::from_parts(1_112_950, 0).saturating_mul(t.into())) + // Measured: `1292 + t * (103 ±0)` + // Estimated: `4757 + t * (103 ±0)` + // Minimum execution time: 37_280_000 picoseconds. + Weight::from_parts(41_639_379, 4757) // Standard Error: 0 - .saturating_add(Weight::from_parts(1, 0).saturating_mul(i.into())) + .saturating_add(Weight::from_parts(2, 0).saturating_mul(i.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) - .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(t.into()))) .saturating_add(RocksDbWeight::get().writes(1_u64)) - .saturating_add(Weight::from_parts(0, 2480).saturating_mul(t.into())) + .saturating_add(Weight::from_parts(0, 103).saturating_mul(t.into())) } - /// Storage: `Revive::ContractInfoOf` (r:1 w:0) - /// Proof: `Revive::ContractInfoOf` (`max_values`: None, `max_size`: Some(1779), added: 4254, mode: `Measured`) /// Storage: `Revive::CodeInfoOf` (r:1 w:0) /// Proof: `Revive::CodeInfoOf` (`max_values`: None, `max_size`: Some(96), added: 2571, mode: `Measured`) /// Storage: `Revive::PristineCode` (r:1 w:0) /// Proof: `Revive::PristineCode` (`max_values`: None, `max_size`: Some(262180), added: 264655, mode: `Measured`) fn seal_delegate_call() -> Weight { // Proof Size summary in bytes: - // Measured: `1237` - // Estimated: `4702` - // Minimum execution time: 35_759_000 picoseconds. - Weight::from_parts(37_086_000, 4702) - .saturating_add(RocksDbWeight::get().reads(3_u64)) + // Measured: `1064` + // Estimated: `4529` + // Minimum execution time: 27_564_000 picoseconds. + Weight::from_parts(28_809_000, 4529) + .saturating_add(RocksDbWeight::get().reads(2_u64)) } /// Storage: `Revive::CodeInfoOf` (r:1 w:1) /// Proof: `Revive::CodeInfoOf` (`max_values`: None, `max_size`: Some(96), added: 2571, mode: `Measured`) @@ -1807,12 +1663,12 @@ impl WeightInfo for () { /// The range of component `i` is `[0, 262144]`. fn seal_instantiate(i: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1271` - // Estimated: `4710` - // Minimum execution time: 116_485_000 picoseconds. - Weight::from_parts(108_907_717, 4710) - // Standard Error: 12 - .saturating_add(Weight::from_parts(4_125, 0).saturating_mul(i.into())) + // Measured: `1273` + // Estimated: `4732` + // Minimum execution time: 115_581_000 picoseconds. + Weight::from_parts(105_196_218, 4732) + // Standard Error: 11 + .saturating_add(Weight::from_parts(4_134, 0).saturating_mul(i.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -1821,64 +1677,64 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 651_000 picoseconds. - Weight::from_parts(3_867_609, 0) - // Standard Error: 3 - .saturating_add(Weight::from_parts(1_384, 0).saturating_mul(n.into())) + // Minimum execution time: 605_000 picoseconds. + Weight::from_parts(3_425_431, 0) + // Standard Error: 2 + .saturating_add(Weight::from_parts(1_461, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 262144]`. fn seal_hash_keccak_256(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_090_000 picoseconds. - Weight::from_parts(5_338_460, 0) + // Minimum execution time: 1_113_000 picoseconds. + Weight::from_parts(4_611_854, 0) // Standard Error: 3 - .saturating_add(Weight::from_parts(3_601, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(3_652, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 262144]`. fn seal_hash_blake2_256(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 717_000 picoseconds. - Weight::from_parts(2_629_461, 0) - // Standard Error: 3 - .saturating_add(Weight::from_parts(1_528, 0).saturating_mul(n.into())) + // Minimum execution time: 610_000 picoseconds. + Weight::from_parts(3_872_321, 0) + // Standard Error: 2 + .saturating_add(Weight::from_parts(1_584, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 262144]`. fn seal_hash_blake2_128(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 660_000 picoseconds. - Weight::from_parts(4_807_814, 0) + // Minimum execution time: 559_000 picoseconds. + Weight::from_parts(4_721_584, 0) // Standard Error: 3 - .saturating_add(Weight::from_parts(1_509, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(1_570, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 261889]`. fn seal_sr25519_verify(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 42_829_000 picoseconds. - Weight::from_parts(24_650_992, 0) - // Standard Error: 14 - .saturating_add(Weight::from_parts(5_212, 0).saturating_mul(n.into())) + // Minimum execution time: 47_467_000 picoseconds. + Weight::from_parts(36_639_352, 0) + // Standard Error: 11 + .saturating_add(Weight::from_parts(5_216, 0).saturating_mul(n.into())) } fn seal_ecdsa_recover() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 46_902_000 picoseconds. - Weight::from_parts(48_072_000, 0) + // Minimum execution time: 48_106_000 picoseconds. + Weight::from_parts(49_352_000, 0) } fn seal_ecdsa_to_eth_address() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 12_713_000 picoseconds. - Weight::from_parts(12_847_000, 0) + // Minimum execution time: 12_616_000 picoseconds. + Weight::from_parts(12_796_000, 0) } /// Storage: `Revive::CodeInfoOf` (r:1 w:1) /// Proof: `Revive::CodeInfoOf` (`max_values`: None, `max_size`: Some(96), added: 2571, mode: `Measured`) @@ -1886,8 +1742,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `300` // Estimated: `3765` - // Minimum execution time: 17_657_000 picoseconds. - Weight::from_parts(18_419_000, 3765) + // Minimum execution time: 14_055_000 picoseconds. + Weight::from_parts(14_526_000, 3765) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1895,10 +1751,10 @@ impl WeightInfo for () { /// Proof: `Revive::CodeInfoOf` (`max_values`: None, `max_size`: Some(96), added: 2571, mode: `Measured`) fn lock_delegate_dependency() -> Weight { // Proof Size summary in bytes: - // Measured: `338` - // Estimated: `3803` - // Minimum execution time: 13_650_000 picoseconds. - Weight::from_parts(14_209_000, 3803) + // Measured: `337` + // Estimated: `3802` + // Minimum execution time: 10_338_000 picoseconds. + Weight::from_parts(10_677_000, 3802) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1906,10 +1762,10 @@ impl WeightInfo for () { /// Proof: `Revive::CodeInfoOf` (`max_values`: None, `max_size`: Some(96), added: 2571, mode: `MaxEncodedLen`) fn unlock_delegate_dependency() -> Weight { // Proof Size summary in bytes: - // Measured: `338` + // Measured: `337` // Estimated: `3561` - // Minimum execution time: 12_341_000 picoseconds. - Weight::from_parts(13_011_000, 3561) + // Minimum execution time: 8_740_000 picoseconds. + Weight::from_parts(9_329_000, 3561) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1918,9 +1774,9 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_899_000 picoseconds. - Weight::from_parts(10_489_171, 0) - // Standard Error: 104 - .saturating_add(Weight::from_parts(73_814, 0).saturating_mul(r.into())) + // Minimum execution time: 7_846_000 picoseconds. + Weight::from_parts(9_717_991, 0) + // Standard Error: 49 + .saturating_add(Weight::from_parts(72_062, 0).saturating_mul(r.into())) } } diff --git a/substrate/frame/revive/uapi/Cargo.toml b/substrate/frame/revive/uapi/Cargo.toml index 7241d667fcdc..0c7461a35d69 100644 --- a/substrate/frame/revive/uapi/Cargo.toml +++ b/substrate/frame/revive/uapi/Cargo.toml @@ -12,23 +12,20 @@ description = "Exposes all the host functions that a contract can import." workspace = true [dependencies] +paste = { workspace = true } bitflags = { workspace = true } +scale-info = { features = ["derive"], optional = true, workspace = true } codec = { features = [ "derive", "max-encoded-len", ], optional = true, workspace = true } -pallet-revive-proc-macro = { workspace = true } -paste = { workspace = true } -scale-info = { features = ["derive"], optional = true, workspace = true } -[target.'cfg(target_arch = "riscv64")'.dependencies] -polkavm-derive = { version = "0.18.0" } +[target.'cfg(target_arch = "riscv32")'.dependencies] +polkavm-derive = { version = "0.14.0" } [package.metadata.docs.rs] -features = ["unstable-hostfn"] -targets = ["riscv64imac-unknown-none-elf"] +default-target = ["wasm32-unknown-unknown"] [features] default = ["scale"] scale = ["dep:codec", "scale-info"] -unstable-hostfn = [] diff --git a/substrate/frame/revive/uapi/src/flags.rs b/substrate/frame/revive/uapi/src/flags.rs index 6a0f47c38c2c..763a89d6c030 100644 --- a/substrate/frame/revive/uapi/src/flags.rs +++ b/substrate/frame/revive/uapi/src/flags.rs @@ -38,7 +38,7 @@ bitflags! { /// /// A forwarding call will consume the current contracts input. Any attempt to /// access the input after this call returns will lead to [`Error::InputForwarded`]. - /// It does not matter if this is due to calling `call_data_copy` or trying another + /// It does not matter if this is due to calling `seal_input` or trying another /// forwarding call. Consider using [`Self::CLONE_INPUT`] in order to preserve /// the input. const FORWARD_INPUT = 0b0000_0001; diff --git a/substrate/frame/revive/uapi/src/host.rs b/substrate/frame/revive/uapi/src/host.rs index eced4843b552..cb52cf93540b 100644 --- a/substrate/frame/revive/uapi/src/host.rs +++ b/substrate/frame/revive/uapi/src/host.rs @@ -12,10 +12,26 @@ // See the License for the specific language governing permissions and // limitations under the License. use crate::{CallFlags, Result, ReturnFlags, StorageFlags}; -use pallet_revive_proc_macro::unstable_hostfn; - -#[cfg(target_arch = "riscv64")] -mod riscv64; +use paste::paste; + +#[cfg(target_arch = "riscv32")] +mod riscv32; + +macro_rules! hash_fn { + ( $name:ident, $bytes:literal ) => { + paste! { + #[doc = "Computes the " $name " " $bytes "-bit hash on the given input buffer."] + #[doc = "\n# Notes\n"] + #[doc = "- The `input` and `output` buffer may overlap."] + #[doc = "- The output buffer is expected to hold at least " $bytes " bits."] + #[doc = "- It is the callers responsibility to provide an output buffer that is large enough to hold the expected amount of bytes returned by the hash function."] + #[doc = "\n# Parameters\n"] + #[doc = "- `input`: The input data buffer."] + #[doc = "- `output`: The output buffer to write the hash result to."] + fn [](input: &[u8], output: &mut [u8; $bytes]); + } + }; +} /// Implements [`HostFn`] when compiled on supported architectures (RISC-V). pub enum HostFnImpl {} @@ -29,6 +45,17 @@ pub trait HostFn: private::Sealed { /// - `output`: A reference to the output data buffer to write the address. fn address(output: &mut [u8; 20]); + /// Lock a new delegate dependency to the contract. + /// + /// Traps if the maximum number of delegate_dependencies is reached or if + /// the delegate dependency already exists. + /// + /// # Parameters + /// + /// - `code_hash`: The code hash of the dependency. Should be decodable as an `T::Hash`. Traps + /// otherwise. + fn lock_delegate_dependency(code_hash: &[u8; 32]); + /// Get the contract immutable data. /// /// Traps if: @@ -71,16 +98,20 @@ pub trait HostFn: private::Sealed { /// Returns the [EIP-155](https://eips.ethereum.org/EIPS/eip-155) chain ID. fn chain_id(output: &mut [u8; 32]); - /// Returns the price per ref_time, akin to the EVM - /// [GASPRICE](https://www.evm.codes/?fork=cancun#3a) opcode. - fn gas_price() -> u64; - - /// Returns the base fee, akin to the EVM - /// [BASEFEE](https://www.evm.codes/?fork=cancun#48) opcode. - fn base_fee(output: &mut [u8; 32]); + /// Stores the current block number of the current contract into the supplied buffer. + /// + /// # Parameters + /// + /// - `output`: A reference to the output data buffer to write the block number. + fn block_number(output: &mut [u8; 32]); - /// Returns the call data size. - fn call_data_size() -> u64; + /// Stores the block hash of the given block number into the supplied buffer. + /// + /// # Parameters + /// + /// - `block_number`: A reference to the block number buffer. + /// - `output`: A reference to the output data buffer to write the block number. + fn block_hash(block_number: &[u8; 32], output: &mut [u8; 32]); /// Call (possibly transferring some amount of funds) into the specified account. /// @@ -104,316 +135,20 @@ pub trait HostFn: private::Sealed { /// An error means that the call wasn't successful output buffer is returned unless /// stated otherwise. /// - /// - [CalleeReverted][`crate::ReturnErrorCode::CalleeReverted]: Output buffer is returned. - /// - [CalleeTrapped][`crate::ReturnErrorCode::CalleeTrapped] - /// - [TransferFailed][`crate::ReturnErrorCode::TransferFailed] - /// - [OutOfResources][`crate::ReturnErrorCode::OutOfResources] - fn call( - flags: CallFlags, - callee: &[u8; 20], - ref_time_limit: u64, - proof_size_limit: u64, - deposit: Option<&[u8; 32]>, - value: &[u8; 32], - input_data: &[u8], - output: Option<&mut &mut [u8]>, - ) -> Result; - - /// Stores the address of the caller into the supplied buffer. - /// - /// If this is a top-level call (i.e. initiated by an extrinsic) the origin address of the - /// extrinsic will be returned. Otherwise, if this call is initiated by another contract then - /// the address of the contract will be returned. - /// - /// If there is no address associated with the caller (e.g. because the caller is root) then - /// it traps with `BadOrigin`. - /// - /// # Parameters - /// - /// - `output`: A reference to the output data buffer to write the caller address. - fn caller(output: &mut [u8; 20]); - - /// Stores the origin address (initator of the call stack) into the supplied buffer. - /// - /// If there is no address associated with the origin (e.g. because the origin is root) then - /// it traps with `BadOrigin`. This can only happen through on-chain governance actions or - /// customized runtimes. - /// - /// # Parameters - /// - /// - `output`: A reference to the output data buffer to write the origin's address. - fn origin(output: &mut [u8; 20]); - - /// Retrieve the code hash for a specified contract address. - /// - /// # Parameters - /// - /// - `addr`: The address of the contract. - /// - `output`: A reference to the output data buffer to write the code hash. - /// - /// # Note - /// - /// If `addr` is not a contract but the account exists then the hash of empty data - /// `0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470` is written, - /// otherwise `zero`. - fn code_hash(addr: &[u8; 20], output: &mut [u8; 32]); - - /// Returns the code size for a specified contract address. - /// - /// # Parameters - /// - /// - `addr`: The address of the contract. - /// - /// # Note - /// - /// If `addr` is not a contract the `output` will be zero. - fn code_size(addr: &[u8; 20]) -> u64; - - /// Execute code in the context (storage, caller, value) of the current contract. - /// - /// Reentrancy protection is always disabled since the callee is allowed - /// to modify the callers storage. This makes going through a reentrancy attack - /// unnecessary for the callee when it wants to exploit the caller. - /// - /// # Parameters - /// - /// - `flags`: See [`CallFlags`] for a documentation of the supported flags. - /// - `address`: The address of the code to be executed. Should be decodable as an - /// `T::AccountId`. Traps otherwise. - /// - `ref_time_limit`: how much *ref_time* Weight to devote to the execution. - /// - `proof_size_limit`: how much *proof_size* Weight to devote to the execution. - /// - `deposit_limit`: The storage deposit limit for delegate call. Passing `None` means setting - /// no specific limit for the call, which implies storage usage up to the limit of the parent - /// call. - /// - `input`: The input data buffer used to call the contract. - /// - `output`: A reference to the output data buffer to write the call output buffer. If `None` - /// is provided then the output buffer is not copied. - /// - /// # Errors - /// - /// An error means that the call wasn't successful and no output buffer is returned unless - /// stated otherwise. - /// - /// - [CalleeReverted][`crate::ReturnErrorCode::CalleeReverted]: Output buffer is returned. - /// - [CalleeTrapped][`crate::ReturnErrorCode::CalleeTrapped] - /// - [OutOfResources][`crate::ReturnErrorCode::OutOfResources] - fn delegate_call( - flags: CallFlags, - address: &[u8; 20], - ref_time_limit: u64, - proof_size_limit: u64, - deposit_limit: Option<&[u8; 32]>, - input_data: &[u8], - output: Option<&mut &mut [u8]>, - ) -> Result; - - /// Deposit a contract event with the data buffer and optional list of topics. There is a limit - /// on the maximum number of topics specified by `event_topics`. - /// - /// There should not be any duplicates in `topics`. - /// - /// # Parameters - /// - /// - `topics`: The topics list. It can't contain duplicates. - fn deposit_event(topics: &[[u8; 32]], data: &[u8]); - - /// Retrieve the value under the given key from storage. - /// - /// The key length must not exceed the maximum defined by the contracts module parameter. - /// - /// # Parameters - /// - `key`: The storage key. - /// - `output`: A reference to the output data buffer to write the storage entry. - /// - /// # Errors - /// - /// [KeyNotFound][`crate::ReturnErrorCode::KeyNotFound] - fn get_storage(flags: StorageFlags, key: &[u8], output: &mut &mut [u8]) -> Result; - - /// Computes the keccak_256 32-bit hash on the given input buffer. - /// - /// - The `input` and `output` buffer may overlap. - /// - The output buffer is expected to hold at least 32 bits. - /// - It is the callers responsibility to provide an output buffer that is large enough to hold - /// the expected amount of bytes returned by the hash function. - /// - /// # Parameters - /// - /// - `input`: The input data buffer. - /// - `output`: The output buffer to write the hash result to. - fn hash_keccak_256(input: &[u8], output: &mut [u8; 32]); - - /// Stores the input data passed by the caller into the supplied `output` buffer, - /// starting from the given input data `offset`. - /// - /// The `output` buffer is guaranteed to always be fully populated: - /// - If the call data (starting from the given `offset`) is larger than the `output` buffer, - /// only what fits into the `output` buffer is written. - /// - If the `output` buffer size exceeds the call data size (starting from `offset`), remaining - /// bytes in the `output` buffer are zeroed out. - /// - If the provided call data `offset` is out-of-bounds, the whole `output` buffer is zeroed - /// out. - /// - /// # Note - /// - /// This function traps if: - /// - the input was previously forwarded by a [`call()`][`Self::call()`]. - /// - the `output` buffer is located in an PolkaVM invalid memory range. - /// - /// # Parameters - /// - /// - `output`: A reference to the output data buffer to write the call data. - /// - `offset`: The offset index into the call data from where to start copying. - fn call_data_copy(output: &mut [u8], offset: u32); - - /// Stores the U256 value at given `offset` from the input passed by the caller - /// into the supplied buffer. - /// - /// # Note - /// - If `offset` is out of bounds, a value of zero will be returned. - /// - If `offset` is in bounds but there is not enough call data, the available data - /// is right-padded in order to fill a whole U256 value. - /// - The data written to `output` is a little endian U256 integer value. - /// - /// # Parameters - /// - /// - `output`: A reference to the fixed output data buffer to write the value. - /// - `offset`: The offset (index) into the call data. - fn call_data_load(output: &mut [u8; 32], offset: u32); - - /// Instantiate a contract with the specified code hash. - /// - /// This function creates an account and executes the constructor defined in the code specified - /// by the code hash. - /// - /// # Parameters - /// - /// - `code_hash`: The hash of the code to be instantiated. - /// - `ref_time_limit`: how much *ref_time* Weight to devote to the execution. - /// - `proof_size_limit`: how much *proof_size* Weight to devote to the execution. - /// - `deposit`: The storage deposit limit for instantiation. Passing `None` means setting no - /// specific limit for the call, which implies storage usage up to the limit of the parent - /// call. - /// - `value`: The value to transfer into the contract. - /// - `input`: The input data buffer. - /// - `address`: A reference to the address buffer to write the address of the contract. If - /// `None` is provided then the output buffer is not copied. - /// - `output`: A reference to the return value buffer to write the constructor output buffer. - /// If `None` is provided then the output buffer is not copied. - /// - `salt`: The salt bytes to use for this instantiation. - /// - /// # Errors - /// - /// Please consult the [ReturnErrorCode][`crate::ReturnErrorCode`] enum declaration for more - /// information on those errors. Here we only note things specific to this function. - /// - /// An error means that the account wasn't created and no address or output buffer - /// is returned unless stated otherwise. - /// - /// - [CalleeReverted][`crate::ReturnErrorCode::CalleeReverted]: Output buffer is returned. - /// - [CalleeTrapped][`crate::ReturnErrorCode::CalleeTrapped] - /// - [TransferFailed][`crate::ReturnErrorCode::TransferFailed] - /// - [OutOfResources][`crate::ReturnErrorCode::OutOfResources] - fn instantiate( - code_hash: &[u8; 32], - ref_time_limit: u64, - proof_size_limit: u64, - deposit: Option<&[u8; 32]>, - value: &[u8; 32], - input: &[u8], - address: Option<&mut [u8; 20]>, - output: Option<&mut &mut [u8]>, - salt: Option<&[u8; 32]>, - ) -> Result; - - /// Load the latest block timestamp into the supplied buffer - /// - /// # Parameters - /// - /// - `output`: A reference to the output data buffer to write the timestamp. - fn now(output: &mut [u8; 32]); - - /// Returns the block ref_time limit. - fn gas_limit() -> u64; - - /// Cease contract execution and save a data buffer as a result of the execution. - /// - /// This function never returns as it stops execution of the caller. - /// This is the only way to return a data buffer to the caller. Returning from - /// execution without calling this function is equivalent to calling: - /// ```nocompile - /// return_value(ReturnFlags::empty(), &[]) - /// ``` - /// - /// Using an unnamed non empty `ReturnFlags` triggers a trap. - /// - /// # Parameters - /// - /// - `flags`: Flag used to signal special return conditions to the supervisor. See - /// [`ReturnFlags`] for a documentation of the supported flags. - /// - `return_value`: The return value buffer. - fn return_value(flags: ReturnFlags, return_value: &[u8]) -> !; - - /// Set the value at the given key in the contract storage. - /// - /// The key and value lengths must not exceed the maximums defined by the contracts module - /// parameters. - /// - /// # Parameters - /// - /// - `key`: The storage key. - /// - `encoded_value`: The storage value. - /// - /// # Return - /// - /// Returns the size of the pre-existing value at the specified key if any. - fn set_storage(flags: StorageFlags, key: &[u8], value: &[u8]) -> Option; - - /// Stores the value transferred along with this call/instantiate into the supplied buffer. - /// - /// # Parameters - /// - /// - `output`: A reference to the output data buffer to write the transferred value. - fn value_transferred(output: &mut [u8; 32]); - - /// Stores the price for the specified amount of gas into the supplied buffer. - /// - /// # Parameters - /// - /// - `ref_time_limit`: The *ref_time* Weight limit to query the price for. - /// - `proof_size_limit`: The *proof_size* Weight limit to query the price for. - /// - `output`: A reference to the output data buffer to write the price. - fn weight_to_fee(ref_time_limit: u64, proof_size_limit: u64, output: &mut [u8; 32]); - - /// Returns the size of the returned data of the last contract call or instantiation. - fn return_data_size() -> u64; - - /// Stores the returned data of the last contract call or contract instantiation. - /// - /// # Parameters - /// - `output`: A reference to the output buffer to write the data. - /// - `offset`: Byte offset into the returned data - fn return_data_copy(output: &mut &mut [u8], offset: u32); - - /// Returns the amount of ref_time left. - fn ref_time_left() -> u64; - - /// Stores the current block number of the current contract into the supplied buffer. - /// - /// # Parameters - /// - /// - `output`: A reference to the output data buffer to write the block number. - #[unstable_hostfn] - fn block_number(output: &mut [u8; 32]); - - /// Stores the block hash of the given block number into the supplied buffer. - /// - /// # Parameters - /// - /// - `block_number`: A reference to the block number buffer. - /// - `output`: A reference to the output data buffer to write the block number. - #[unstable_hostfn] - fn block_hash(block_number: &[u8; 32], output: &mut [u8; 32]); + /// - [CalleeReverted][`crate::ReturnErrorCode::CalleeReverted]: Output buffer is returned. + /// - [CalleeTrapped][`crate::ReturnErrorCode::CalleeTrapped] + /// - [TransferFailed][`crate::ReturnErrorCode::TransferFailed] + /// - [NotCallable][`crate::ReturnErrorCode::NotCallable] + fn call( + flags: CallFlags, + callee: &[u8; 20], + ref_time_limit: u64, + proof_size_limit: u64, + deposit: Option<&[u8; 32]>, + value: &[u8; 32], + input_data: &[u8], + output: Option<&mut &mut [u8]>, + ) -> Result; /// Call into the chain extension provided by the chain if any. /// @@ -437,7 +172,6 @@ pub trait HostFn: private::Sealed { /// # Return /// /// The chain extension returned value, if executed successfully. - #[unstable_hostfn] fn call_chain_extension(func_id: u32, input: &[u8], output: Option<&mut &mut [u8]>) -> u32; /// Call some dispatchable of the runtime. @@ -464,9 +198,33 @@ pub trait HostFn: private::Sealed { /// - Provide functionality **exclusively** to contracts. /// - Provide custom weights. /// - Avoid the need to keep the `Call` data structure stable. - #[unstable_hostfn] fn call_runtime(call: &[u8]) -> Result; + /// Stores the address of the caller into the supplied buffer. + /// + /// If this is a top-level call (i.e. initiated by an extrinsic) the origin address of the + /// extrinsic will be returned. Otherwise, if this call is initiated by another contract then + /// the address of the contract will be returned. + /// + /// If there is no address associated with the caller (e.g. because the caller is root) then + /// it traps with `BadOrigin`. + /// + /// # Parameters + /// + /// - `output`: A reference to the output data buffer to write the caller address. + fn caller(output: &mut [u8; 20]); + + /// Stores the origin address (initator of the call stack) into the supplied buffer. + /// + /// If there is no address associated with the origin (e.g. because the origin is root) then + /// it traps with `BadOrigin`. This can only happen through on-chain governance actions or + /// customized runtimes. + /// + /// # Parameters + /// + /// - `output`: A reference to the output data buffer to write the origin's address. + fn origin(output: &mut [u8; 20]); + /// Checks whether the caller of the current contract is the origin of the whole call stack. /// /// Prefer this over [`is_contract()`][`Self::is_contract`] when checking whether your contract @@ -477,7 +235,6 @@ pub trait HostFn: private::Sealed { /// /// A return value of `true` indicates that this contract is being called by a plain account /// and `false` indicates that the caller is another contract. - #[unstable_hostfn] fn caller_is_origin() -> bool; /// Checks whether the caller of the current contract is root. @@ -487,7 +244,6 @@ pub trait HostFn: private::Sealed { /// /// A return value of `true` indicates that this contract is being called by a root origin, /// and `false` indicates that the caller is a signed origin. - #[unstable_hostfn] fn caller_is_root() -> u32; /// Clear the value at the given key in the contract storage. @@ -499,9 +255,34 @@ pub trait HostFn: private::Sealed { /// # Return /// /// Returns the size of the pre-existing value at the specified key if any. - #[unstable_hostfn] fn clear_storage(flags: StorageFlags, key: &[u8]) -> Option; + /// Retrieve the code hash for a specified contract address. + /// + /// # Parameters + /// + /// - `addr`: The address of the contract. + /// - `output`: A reference to the output data buffer to write the code hash. + /// + /// # Note + /// + /// If `addr` is not a contract but the account exists then the hash of empty data + /// `0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470` is written, + /// otherwise `zero`. + fn code_hash(addr: &[u8; 20], output: &mut [u8; 32]); + + /// Retrieve the code size for a specified contract address. + /// + /// # Parameters + /// + /// - `addr`: The address of the contract. + /// - `output`: A reference to the output data buffer to write the code size. + /// + /// # Note + /// + /// If `addr` is not a contract the `output` will be zero. + fn code_size(addr: &[u8; 20], output: &mut [u8; 32]); + /// Checks whether there is a value stored under the given key. /// /// The key length must not exceed the maximum defined by the contracts module parameter. @@ -512,7 +293,6 @@ pub trait HostFn: private::Sealed { /// # Return /// /// Returns the size of the pre-existing value at the specified key if any. - #[unstable_hostfn] fn contains_storage(flags: StorageFlags, key: &[u8]) -> Option; /// Emit a custom debug message. @@ -532,9 +312,47 @@ pub trait HostFn: private::Sealed { /// not being executed as an RPC. For example, they could allow users to disable logging /// through compile time flags (cargo features) for on-chain deployment. Additionally, the /// return value of this function can be cached in order to prevent further calls at runtime. - #[unstable_hostfn] fn debug_message(str: &[u8]) -> Result; + /// Execute code in the context (storage, caller, value) of the current contract. + /// + /// Reentrancy protection is always disabled since the callee is allowed + /// to modify the callers storage. This makes going through a reentrancy attack + /// unnecessary for the callee when it wants to exploit the caller. + /// + /// # Parameters + /// + /// - `flags`: See [`CallFlags`] for a documentation of the supported flags. + /// - `code_hash`: The hash of the code to be executed. + /// - `input`: The input data buffer used to call the contract. + /// - `output`: A reference to the output data buffer to write the call output buffer. If `None` + /// is provided then the output buffer is not copied. + /// + /// # Errors + /// + /// An error means that the call wasn't successful and no output buffer is returned unless + /// stated otherwise. + /// + /// - [CalleeReverted][`crate::ReturnErrorCode::CalleeReverted]: Output buffer is returned. + /// - [CalleeTrapped][`crate::ReturnErrorCode::CalleeTrapped] + /// - [CodeNotFound][`crate::ReturnErrorCode::CodeNotFound] + fn delegate_call( + flags: CallFlags, + code_hash: &[u8; 32], + input_data: &[u8], + output: Option<&mut &mut [u8]>, + ) -> Result; + + /// Deposit a contract event with the data buffer and optional list of topics. There is a limit + /// on the maximum number of topics specified by `event_topics`. + /// + /// There should not be any duplicates in `topics`. + /// + /// # Parameters + /// + /// - `topics`: The topics list. It can't contain duplicates. + fn deposit_event(topics: &[[u8; 32]], data: &[u8]); + /// Recovers the ECDSA public key from the given message hash and signature. /// /// Writes the public key into the given output buffer. @@ -549,7 +367,6 @@ pub trait HostFn: private::Sealed { /// # Errors /// /// - [EcdsaRecoveryFailed][`crate::ReturnErrorCode::EcdsaRecoveryFailed] - #[unstable_hostfn] fn ecdsa_recover( signature: &[u8; 65], message_hash: &[u8; 32], @@ -567,49 +384,93 @@ pub trait HostFn: private::Sealed { /// # Errors /// /// - [EcdsaRecoveryFailed][`crate::ReturnErrorCode::EcdsaRecoveryFailed] - #[unstable_hostfn] fn ecdsa_to_eth_address(pubkey: &[u8; 33], output: &mut [u8; 20]) -> Result; - /// Computes the sha2_256 32-bit hash on the given input buffer. + /// Stores the amount of weight left into the supplied buffer. + /// The data is encoded as Weight. /// - /// - The `input` and `output` buffer may overlap. - /// - The output buffer is expected to hold at least 32 bits. - /// - It is the callers responsibility to provide an output buffer that is large enough to hold - /// the expected amount of bytes returned by the hash function. + /// If the available space in `output` is less than the size of the value a trap is triggered. /// /// # Parameters /// - /// - `input`: The input data buffer. - /// - `output`: The output buffer to write the hash result to. - #[unstable_hostfn] - fn hash_sha2_256(input: &[u8], output: &mut [u8; 32]); + /// - `output`: A reference to the output data buffer to write the weight left. + fn weight_left(output: &mut &mut [u8]); - /// Computes the blake2_256 32-bit hash on the given input buffer. + /// Retrieve the value under the given key from storage. /// - /// - The `input` and `output` buffer may overlap. - /// - The output buffer is expected to hold at least 32 bits. - /// - It is the callers responsibility to provide an output buffer that is large enough to hold - /// the expected amount of bytes returned by the hash function. + /// The key length must not exceed the maximum defined by the contracts module parameter. /// /// # Parameters - /// */ - /// - `input`: The input data buffer. - /// - `output`: The output buffer to write the hash result to. - #[unstable_hostfn] - fn hash_blake2_256(input: &[u8], output: &mut [u8; 32]); + /// - `key`: The storage key. + /// - `output`: A reference to the output data buffer to write the storage entry. + /// + /// # Errors + /// + /// [KeyNotFound][`crate::ReturnErrorCode::KeyNotFound] + fn get_storage(flags: StorageFlags, key: &[u8], output: &mut &mut [u8]) -> Result; + + hash_fn!(sha2_256, 32); + hash_fn!(keccak_256, 32); + hash_fn!(blake2_256, 32); + hash_fn!(blake2_128, 16); + + /// Stores the input passed by the caller into the supplied buffer. + /// + /// # Note + /// + /// This function traps if: + /// - the input is larger than the available space. + /// - the input was previously forwarded by a [`call()`][`Self::call()`]. + /// + /// # Parameters + /// + /// - `output`: A reference to the output data buffer to write the input data. + fn input(output: &mut &mut [u8]); - /// Computes the blake2_128 16-bit hash on the given input buffer. + /// Instantiate a contract with the specified code hash. + /// + /// This function creates an account and executes the constructor defined in the code specified + /// by the code hash. /// - /// - The `input` and `output` buffer may overlap. - /// - The output buffer is expected to hold at least 16 bits. - /// - It is the callers responsibility to provide an output buffer that is large enough to hold - /// the expected amount of bytes returned by the hash function. /// # Parameters /// + /// - `code_hash`: The hash of the code to be instantiated. + /// - `ref_time_limit`: how much *ref_time* Weight to devote to the execution. + /// - `proof_size_limit`: how much *proof_size* Weight to devote to the execution. + /// - `deposit`: The storage deposit limit for instantiation. Passing `None` means setting no + /// specific limit for the call, which implies storage usage up to the limit of the parent + /// call. + /// - `value`: The value to transfer into the contract. /// - `input`: The input data buffer. - /// - `output`: The output buffer to write the hash result to. - #[unstable_hostfn] - fn hash_blake2_128(input: &[u8], output: &mut [u8; 16]); + /// - `address`: A reference to the address buffer to write the address of the contract. If + /// `None` is provided then the output buffer is not copied. + /// - `output`: A reference to the return value buffer to write the constructor output buffer. + /// If `None` is provided then the output buffer is not copied. + /// - `salt`: The salt bytes to use for this instantiation. + /// + /// # Errors + /// + /// Please consult the [ReturnErrorCode][`crate::ReturnErrorCode`] enum declaration for more + /// information on those errors. Here we only note things specific to this function. + /// + /// An error means that the account wasn't created and no address or output buffer + /// is returned unless stated otherwise. + /// + /// - [CalleeReverted][`crate::ReturnErrorCode::CalleeReverted]: Output buffer is returned. + /// - [CalleeTrapped][`crate::ReturnErrorCode::CalleeTrapped] + /// - [TransferFailed][`crate::ReturnErrorCode::TransferFailed] + /// - [CodeNotFound][`crate::ReturnErrorCode::CodeNotFound] + fn instantiate( + code_hash: &[u8; 32], + ref_time_limit: u64, + proof_size_limit: u64, + deposit: Option<&[u8; 32]>, + value: &[u8; 32], + input: &[u8], + address: Option<&mut [u8; 20]>, + output: Option<&mut &mut [u8]>, + salt: Option<&[u8; 32]>, + ) -> Result; /// Checks whether a specified address belongs to a contract. /// @@ -620,27 +481,13 @@ pub trait HostFn: private::Sealed { /// # Return /// /// Returns `true` if the address belongs to a contract. - #[unstable_hostfn] fn is_contract(address: &[u8; 20]) -> bool; - /// Lock a new delegate dependency to the contract. - /// - /// Traps if the maximum number of delegate_dependencies is reached or if - /// the delegate dependency already exists. - /// - /// # Parameters - /// - /// - `code_hash`: The code hash of the dependency. Should be decodable as an `T::Hash`. Traps - /// otherwise. - #[unstable_hostfn] - fn lock_delegate_dependency(code_hash: &[u8; 32]); - /// Stores the minimum balance (a.k.a. existential deposit) into the supplied buffer. /// /// # Parameters /// /// - `output`: A reference to the output data buffer to write the minimum balance. - #[unstable_hostfn] fn minimum_balance(output: &mut [u8; 32]); /// Retrieve the code hash of the currently executing contract. @@ -648,9 +495,43 @@ pub trait HostFn: private::Sealed { /// # Parameters /// /// - `output`: A reference to the output data buffer to write the code hash. - #[unstable_hostfn] fn own_code_hash(output: &mut [u8; 32]); + /// Load the latest block timestamp into the supplied buffer + /// + /// # Parameters + /// + /// - `output`: A reference to the output data buffer to write the timestamp. + fn now(output: &mut [u8; 32]); + + /// Removes the delegate dependency from the contract. + /// + /// Traps if the delegate dependency does not exist. + /// + /// # Parameters + /// + /// - `code_hash`: The code hash of the dependency. Should be decodable as an `T::Hash`. Traps + /// otherwise. + fn unlock_delegate_dependency(code_hash: &[u8; 32]); + + /// Cease contract execution and save a data buffer as a result of the execution. + /// + /// This function never returns as it stops execution of the caller. + /// This is the only way to return a data buffer to the caller. Returning from + /// execution without calling this function is equivalent to calling: + /// ```nocompile + /// return_value(ReturnFlags::empty(), &[]) + /// ``` + /// + /// Using an unnamed non empty `ReturnFlags` triggers a trap. + /// + /// # Parameters + /// + /// - `flags`: Flag used to signal special return conditions to the supervisor. See + /// [`ReturnFlags`] for a documentation of the supported flags. + /// - `return_value`: The return value buffer. + fn return_value(flags: ReturnFlags, return_value: &[u8]) -> !; + /// Replace the contract code at the specified address with new code. /// /// # Note @@ -676,11 +557,25 @@ pub trait HostFn: private::Sealed { /// - `code_hash`: The hash of the new code. Should be decodable as an `T::Hash`. Traps /// otherwise. /// - /// # Panics + /// # Errors + /// + /// - [CodeNotFound][`crate::ReturnErrorCode::CodeNotFound] + fn set_code_hash(code_hash: &[u8; 32]) -> Result; + + /// Set the value at the given key in the contract storage. + /// + /// The key and value lengths must not exceed the maximums defined by the contracts module + /// parameters. + /// + /// # Parameters + /// + /// - `key`: The storage key. + /// - `encoded_value`: The storage value. + /// + /// # Return /// - /// Panics if there is no code on-chain with the specified hash. - #[unstable_hostfn] - fn set_code_hash(code_hash: &[u8; 32]); + /// Returns the size of the pre-existing value at the specified key if any. + fn set_storage(flags: StorageFlags, key: &[u8], value: &[u8]) -> Option; /// Verify a sr25519 signature /// @@ -692,7 +587,6 @@ pub trait HostFn: private::Sealed { /// # Errors /// /// - [Sr25519VerifyFailed][`crate::ReturnErrorCode::Sr25519VerifyFailed] - #[unstable_hostfn] fn sr25519_verify(signature: &[u8; 64], message: &[u8], pub_key: &[u8; 32]) -> Result; /// Retrieve and remove the value under the given key from storage. @@ -704,7 +598,6 @@ pub trait HostFn: private::Sealed { /// # Errors /// /// [KeyNotFound][`crate::ReturnErrorCode::KeyNotFound] - #[unstable_hostfn] fn take_storage(flags: StorageFlags, key: &[u8], output: &mut &mut [u8]) -> Result; /// Remove the calling account and transfer remaining **free** balance. @@ -722,30 +615,23 @@ pub trait HostFn: private::Sealed { /// - The contract is live i.e is already on the call stack. /// - Failed to send the balance to the beneficiary. /// - The deletion queue is full. - #[unstable_hostfn] fn terminate(beneficiary: &[u8; 20]) -> !; - /// Removes the delegate dependency from the contract. - /// - /// Traps if the delegate dependency does not exist. + /// Stores the value transferred along with this call/instantiate into the supplied buffer. /// /// # Parameters /// - /// - `code_hash`: The code hash of the dependency. Should be decodable as an `T::Hash`. Traps - /// otherwise. - #[unstable_hostfn] - fn unlock_delegate_dependency(code_hash: &[u8; 32]); + /// - `output`: A reference to the output data buffer to write the transferred value. + fn value_transferred(output: &mut [u8; 32]); - /// Stores the amount of weight left into the supplied buffer. - /// The data is encoded as Weight. - /// - /// If the available space in `output` is less than the size of the value a trap is triggered. + /// Stores the price for the specified amount of gas into the supplied buffer. /// /// # Parameters /// - /// - `output`: A reference to the output data buffer to write the weight left. - #[unstable_hostfn] - fn weight_left(output: &mut &mut [u8]); + /// - `ref_time_limit`: The *ref_time* Weight limit to query the price for. + /// - `proof_size_limit`: The *proof_size* Weight limit to query the price for. + /// - `output`: A reference to the output data buffer to write the price. + fn weight_to_fee(ref_time_limit: u64, proof_size_limit: u64, output: &mut [u8; 32]); /// Execute an XCM program locally, using the contract's address as the origin. /// This is equivalent to dispatching `pallet_xcm::execute` through call_runtime, except that @@ -761,7 +647,6 @@ pub trait HostFn: private::Sealed { /// /// Returns `Error::Success` when the XCM execution attempt is successful. When the XCM /// execution fails, `ReturnCode::XcmExecutionFailed` is returned - #[unstable_hostfn] fn xcm_execute(msg: &[u8]) -> Result; /// Send an XCM program from the contract to the specified destination. @@ -779,8 +664,21 @@ pub trait HostFn: private::Sealed { /// /// Returns `ReturnCode::Success` when the message was successfully sent. When the XCM /// execution fails, `ReturnErrorCode::XcmSendFailed` is returned. - #[unstable_hostfn] fn xcm_send(dest: &[u8], msg: &[u8], output: &mut [u8; 32]) -> Result; + + /// Stores the size of the returned data of the last contract call or instantiation. + /// + /// # Parameters + /// + /// - `output`: A reference to the output buffer to write the size. + fn return_data_size(output: &mut [u8; 32]); + + /// Stores the returned data of the last contract call or contract instantiation. + /// + /// # Parameters + /// - `output`: A reference to the output buffer to write the data. + /// - `offset`: Byte offset into the returned data + fn return_data_copy(output: &mut &mut [u8], offset: u32); } mod private { diff --git a/substrate/frame/revive/uapi/src/host/riscv64.rs b/substrate/frame/revive/uapi/src/host/riscv32.rs similarity index 72% rename from substrate/frame/revive/uapi/src/host/riscv64.rs rename to substrate/frame/revive/uapi/src/host/riscv32.rs index 6fdda86892d5..199a0abc3ddc 100644 --- a/substrate/frame/revive/uapi/src/host/riscv64.rs +++ b/substrate/frame/revive/uapi/src/host/riscv32.rs @@ -18,7 +18,6 @@ use crate::{ host::{CallFlags, HostFn, HostFnImpl, Result, StorageFlags}, ReturnFlags, }; -use pallet_revive_proc_macro::unstable_hostfn; mod sys { use crate::ReturnCode; @@ -27,10 +26,10 @@ mod sys { mod abi {} impl abi::FromHost for ReturnCode { - type Regs = (u64,); + type Regs = (u32,); fn from_host((a0,): Self::Regs) -> Self { - ReturnCode(a0 as _) + ReturnCode(a0) } } @@ -60,24 +59,29 @@ mod sys { out_len_ptr: *mut u32, ) -> ReturnCode; pub fn call(ptr: *const u8) -> ReturnCode; - pub fn delegate_call(ptr: *const u8) -> ReturnCode; + pub fn delegate_call( + flags: u32, + code_hash_ptr: *const u8, + input_data_ptr: *const u8, + input_data_len: u32, + out_ptr: *mut u8, + out_len_ptr: *mut u32, + ) -> ReturnCode; pub fn instantiate(ptr: *const u8) -> ReturnCode; pub fn terminate(beneficiary_ptr: *const u8); - pub fn call_data_copy(out_ptr: *mut u8, out_len: u32, offset: u32); - pub fn call_data_load(out_ptr: *mut u8, offset: u32); + pub fn input(out_ptr: *mut u8, out_len_ptr: *mut u32); pub fn seal_return(flags: u32, data_ptr: *const u8, data_len: u32); pub fn caller(out_ptr: *mut u8); pub fn origin(out_ptr: *mut u8); pub fn is_contract(account_ptr: *const u8) -> ReturnCode; pub fn code_hash(address_ptr: *const u8, out_ptr: *mut u8); - pub fn code_size(address_ptr: *const u8) -> u64; + pub fn code_size(address_ptr: *const u8, out_ptr: *mut u8); pub fn own_code_hash(out_ptr: *mut u8); pub fn caller_is_origin() -> ReturnCode; pub fn caller_is_root() -> ReturnCode; pub fn address(out_ptr: *mut u8); pub fn weight_to_fee(ref_time: u64, proof_size: u64, out_ptr: *mut u8); pub fn weight_left(out_ptr: *mut u8, out_len_ptr: *mut u32); - pub fn ref_time_left() -> u64; pub fn get_immutable_data(out_ptr: *mut u8, out_len_ptr: *mut u32); pub fn set_immutable_data(ptr: *const u8, len: u32); pub fn balance(out_ptr: *mut u8); @@ -85,7 +89,6 @@ mod sys { pub fn chain_id(out_ptr: *mut u8); pub fn value_transferred(out_ptr: *mut u8); pub fn now(out_ptr: *mut u8); - pub fn gas_limit() -> u64; pub fn minimum_balance(out_ptr: *mut u8); pub fn deposit_event( topics_ptr: *const [u8; 32], @@ -93,9 +96,6 @@ mod sys { data_ptr: *const u8, data_len: u32, ); - pub fn gas_price() -> u64; - pub fn base_fee(out_ptr: *mut u8); - pub fn call_data_size() -> u64; pub fn block_number(out_ptr: *mut u8); pub fn block_hash(block_number_ptr: *const u8, out_ptr: *mut u8); pub fn hash_sha2_256(input_ptr: *const u8, input_len: u32, out_ptr: *mut u8); @@ -122,7 +122,7 @@ mod sys { message_len: u32, message_ptr: *const u8, ) -> ReturnCode; - pub fn set_code_hash(code_hash_ptr: *const u8); + pub fn set_code_hash(code_hash_ptr: *const u8) -> ReturnCode; pub fn ecdsa_to_eth_address(key_ptr: *const u8, out_ptr: *mut u8) -> ReturnCode; pub fn instantiation_nonce() -> u64; pub fn lock_delegate_dependency(code_hash_ptr: *const u8); @@ -135,11 +135,43 @@ mod sys { msg_len: u32, out_ptr: *mut u8, ) -> ReturnCode; - pub fn return_data_size() -> u64; + pub fn return_data_size(out_ptr: *mut u8); pub fn return_data_copy(out_ptr: *mut u8, out_len_ptr: *mut u32, offset: u32); } } +/// A macro to implement all Host functions with a signature of `fn(&mut [u8; n])`. +macro_rules! impl_wrapper_for { + (@impl_fn $name:ident, $n: literal) => { + fn $name(output: &mut [u8; $n]) { + unsafe { sys::$name(output.as_mut_ptr()) } + } + }; + + () => {}; + + ([u8; $n: literal] => $($name:ident),*; $($tail:tt)*) => { + $(impl_wrapper_for!(@impl_fn $name, $n);)* + impl_wrapper_for!($($tail)*); + }; +} + +macro_rules! impl_hash_fn { + ( $name:ident, $bytes_result:literal ) => { + paste::item! { + fn [](input: &[u8], output: &mut [u8; $bytes_result]) { + unsafe { + sys::[]( + input.as_ptr(), + input.len() as u32, + output.as_mut_ptr(), + ) + } + } + } + }; +} + #[inline(always)] fn extract_from_slice(output: &mut &mut [u8], new_len: usize) { debug_assert!(new_len <= output.len()); @@ -182,33 +214,33 @@ impl HostFn for HostFnImpl { let (output_ptr, mut output_len) = ptr_len_or_sentinel(&mut output); let deposit_limit_ptr = ptr_or_sentinel(&deposit_limit); let salt_ptr = ptr_or_sentinel(&salt); - #[repr(C)] + #[repr(packed)] #[allow(dead_code)] struct Args { - code_hash: u32, + code_hash: *const u8, ref_time_limit: u64, proof_size_limit: u64, - deposit_limit: u32, - value: u32, - input: u32, + deposit_limit: *const u8, + value: *const u8, + input: *const u8, input_len: u32, - address: u32, - output: u32, - output_len: u32, - salt: u32, + address: *const u8, + output: *mut u8, + output_len: *mut u32, + salt: *const u8, } let args = Args { - code_hash: code_hash.as_ptr() as _, + code_hash: code_hash.as_ptr(), ref_time_limit, proof_size_limit, - deposit_limit: deposit_limit_ptr as _, - value: value.as_ptr() as _, - input: input.as_ptr() as _, + deposit_limit: deposit_limit_ptr, + value: value.as_ptr(), + input: input.as_ptr(), input_len: input.len() as _, - address: address as _, - output: output_ptr as _, - output_len: &mut output_len as *mut _ as _, - salt: salt_ptr as _, + address, + output: output_ptr, + output_len: &mut output_len as *mut _, + salt: salt_ptr, }; let ret_code = { unsafe { sys::instantiate(&args as *const Args as *const _) } }; @@ -232,31 +264,31 @@ impl HostFn for HostFnImpl { ) -> Result { let (output_ptr, mut output_len) = ptr_len_or_sentinel(&mut output); let deposit_limit_ptr = ptr_or_sentinel(&deposit_limit); - #[repr(C)] + #[repr(packed)] #[allow(dead_code)] struct Args { flags: u32, - callee: u32, + callee: *const u8, ref_time_limit: u64, proof_size_limit: u64, - deposit_limit: u32, - value: u32, - input: u32, + deposit_limit: *const u8, + value: *const u8, + input: *const u8, input_len: u32, - output: u32, - output_len: u32, + output: *mut u8, + output_len: *mut u32, } let args = Args { flags: flags.bits(), - callee: callee.as_ptr() as _, + callee: callee.as_ptr(), ref_time_limit, proof_size_limit, - deposit_limit: deposit_limit_ptr as _, - value: value.as_ptr() as _, - input: input.as_ptr() as _, + deposit_limit: deposit_limit_ptr, + value: value.as_ptr(), + input: input.as_ptr(), input_len: input.len() as _, - output: output_ptr as _, - output_len: &mut output_len as *mut _ as _, + output: output_ptr, + output_len: &mut output_len as *mut _, }; let ret_code = { unsafe { sys::call(&args as *const Args as *const _) } }; @@ -268,44 +300,30 @@ impl HostFn for HostFnImpl { ret_code.into() } + fn caller_is_root() -> u32 { + unsafe { sys::caller_is_root() }.into_u32() + } + fn delegate_call( flags: CallFlags, - address: &[u8; 20], - ref_time_limit: u64, - proof_size_limit: u64, - deposit_limit: Option<&[u8; 32]>, + code_hash: &[u8; 32], input: &[u8], mut output: Option<&mut &mut [u8]>, ) -> Result { let (output_ptr, mut output_len) = ptr_len_or_sentinel(&mut output); - let deposit_limit_ptr = ptr_or_sentinel(&deposit_limit); - #[repr(C)] - #[allow(dead_code)] - struct Args { - flags: u32, - address: u32, - ref_time_limit: u64, - proof_size_limit: u64, - deposit_limit: u32, - input: u32, - input_len: u32, - output: u32, - output_len: u32, - } - let args = Args { - flags: flags.bits(), - address: address.as_ptr() as _, - ref_time_limit, - proof_size_limit, - deposit_limit: deposit_limit_ptr as _, - input: input.as_ptr() as _, - input_len: input.len() as _, - output: output_ptr as _, - output_len: &mut output_len as *mut _ as _, + let ret_code = { + unsafe { + sys::delegate_call( + flags.bits(), + code_hash.as_ptr(), + input.as_ptr(), + input.len() as u32, + output_ptr, + &mut output_len, + ) + } }; - let ret_code = { unsafe { sys::delegate_call(&args as *const Args as *const _) } }; - if let Some(ref mut output) = output { extract_from_slice(output, output_len as usize); } @@ -337,6 +355,17 @@ impl HostFn for HostFnImpl { ret_code.into() } + fn clear_storage(flags: StorageFlags, key: &[u8]) -> Option { + let ret_code = unsafe { sys::clear_storage(flags.bits(), key.as_ptr(), key.len() as u32) }; + ret_code.into() + } + + fn contains_storage(flags: StorageFlags, key: &[u8]) -> Option { + let ret_code = + unsafe { sys::contains_storage(flags.bits(), key.as_ptr(), key.len() as u32) }; + ret_code.into() + } + fn get_storage(flags: StorageFlags, key: &[u8], output: &mut &mut [u8]) -> Result { let mut output_len = output.len() as u32; let ret_code = { @@ -354,115 +383,33 @@ impl HostFn for HostFnImpl { ret_code.into() } - fn call_data_load(out_ptr: &mut [u8; 32], offset: u32) { - unsafe { sys::call_data_load(out_ptr.as_mut_ptr(), offset) }; - } - - fn gas_limit() -> u64 { - unsafe { sys::gas_limit() } - } - - fn call_data_size() -> u64 { - unsafe { sys::call_data_size() } - } - - fn return_value(flags: ReturnFlags, return_value: &[u8]) -> ! { - unsafe { sys::seal_return(flags.bits(), return_value.as_ptr(), return_value.len() as u32) } - panic!("seal_return does not return"); - } - - fn gas_price() -> u64 { - unsafe { sys::gas_price() } - } - - fn base_fee(output: &mut [u8; 32]) { - unsafe { sys::base_fee(output.as_mut_ptr()) } - } - - fn balance(output: &mut [u8; 32]) { - unsafe { sys::balance(output.as_mut_ptr()) } - } - - fn value_transferred(output: &mut [u8; 32]) { - unsafe { sys::value_transferred(output.as_mut_ptr()) } - } - - fn now(output: &mut [u8; 32]) { - unsafe { sys::now(output.as_mut_ptr()) } - } - - fn chain_id(output: &mut [u8; 32]) { - unsafe { sys::chain_id(output.as_mut_ptr()) } - } - - fn address(output: &mut [u8; 20]) { - unsafe { sys::address(output.as_mut_ptr()) } - } - - fn caller(output: &mut [u8; 20]) { - unsafe { sys::caller(output.as_mut_ptr()) } - } - - fn origin(output: &mut [u8; 20]) { - unsafe { sys::origin(output.as_mut_ptr()) } - } - - fn block_number(output: &mut [u8; 32]) { - unsafe { sys::block_number(output.as_mut_ptr()) } - } - - fn weight_to_fee(ref_time_limit: u64, proof_size_limit: u64, output: &mut [u8; 32]) { - unsafe { sys::weight_to_fee(ref_time_limit, proof_size_limit, output.as_mut_ptr()) }; - } - - fn hash_keccak_256(input: &[u8], output: &mut [u8; 32]) { - unsafe { sys::hash_keccak_256(input.as_ptr(), input.len() as u32, output.as_mut_ptr()) } - } - - fn get_immutable_data(output: &mut &mut [u8]) { - let mut output_len = output.len() as u32; - unsafe { sys::get_immutable_data(output.as_mut_ptr(), &mut output_len) }; - extract_from_slice(output, output_len as usize); - } - - fn set_immutable_data(data: &[u8]) { - unsafe { sys::set_immutable_data(data.as_ptr(), data.len() as u32) } - } - - fn balance_of(address: &[u8; 20], output: &mut [u8; 32]) { - unsafe { sys::balance_of(address.as_ptr(), output.as_mut_ptr()) }; - } - - fn code_hash(address: &[u8; 20], output: &mut [u8; 32]) { - unsafe { sys::code_hash(address.as_ptr(), output.as_mut_ptr()) } - } - - fn code_size(address: &[u8; 20]) -> u64 { - unsafe { sys::code_size(address.as_ptr()) } - } - - fn return_data_size() -> u64 { - unsafe { sys::return_data_size() } - } - - fn return_data_copy(output: &mut &mut [u8], offset: u32) { + fn take_storage(flags: StorageFlags, key: &[u8], output: &mut &mut [u8]) -> Result { let mut output_len = output.len() as u32; - { - unsafe { sys::return_data_copy(output.as_mut_ptr(), &mut output_len, offset) }; - } + let ret_code = { + unsafe { + sys::take_storage( + flags.bits(), + key.as_ptr(), + key.len() as u32, + output.as_mut_ptr(), + &mut output_len, + ) + } + }; extract_from_slice(output, output_len as usize); + ret_code.into() } - fn ref_time_left() -> u64 { - unsafe { sys::ref_time_left() } + fn debug_message(str: &[u8]) -> Result { + let ret_code = unsafe { sys::debug_message(str.as_ptr(), str.len() as u32) }; + ret_code.into() } - #[unstable_hostfn] - fn block_hash(block_number_ptr: &[u8; 32], output: &mut [u8; 32]) { - unsafe { sys::block_hash(block_number_ptr.as_ptr(), output.as_mut_ptr()) }; + fn terminate(beneficiary: &[u8; 20]) -> ! { + unsafe { sys::terminate(beneficiary.as_ptr()) } + panic!("terminate does not return"); } - #[unstable_hostfn] fn call_chain_extension(func_id: u32, input: &[u8], mut output: Option<&mut &mut [u8]>) -> u32 { let (output_ptr, mut output_len) = ptr_len_or_sentinel(&mut output); let ret_code = { @@ -483,48 +430,44 @@ impl HostFn for HostFnImpl { ret_code.into_u32() } - fn call_data_copy(output: &mut [u8], offset: u32) { - let len = output.len() as u32; - unsafe { sys::call_data_copy(output.as_mut_ptr(), len, offset) }; + fn input(output: &mut &mut [u8]) { + let mut output_len = output.len() as u32; + { + unsafe { sys::input(output.as_mut_ptr(), &mut output_len) }; + } + extract_from_slice(output, output_len as usize); + } + + fn return_value(flags: ReturnFlags, return_value: &[u8]) -> ! { + unsafe { sys::seal_return(flags.bits(), return_value.as_ptr(), return_value.len() as u32) } + panic!("seal_return does not return"); } - #[unstable_hostfn] fn call_runtime(call: &[u8]) -> Result { let ret_code = unsafe { sys::call_runtime(call.as_ptr(), call.len() as u32) }; ret_code.into() } - #[unstable_hostfn] - fn caller_is_origin() -> bool { - let ret_val = unsafe { sys::caller_is_origin() }; - ret_val.into_bool() - } - - #[unstable_hostfn] - fn caller_is_root() -> u32 { - unsafe { sys::caller_is_root() }.into_u32() + impl_wrapper_for! { + [u8; 32] => block_number, balance, value_transferred, now, minimum_balance, chain_id; + [u8; 20] => address, caller, origin; } - #[unstable_hostfn] - fn clear_storage(flags: StorageFlags, key: &[u8]) -> Option { - let ret_code = unsafe { sys::clear_storage(flags.bits(), key.as_ptr(), key.len() as u32) }; - ret_code.into() + fn weight_left(output: &mut &mut [u8]) { + let mut output_len = output.len() as u32; + unsafe { sys::weight_left(output.as_mut_ptr(), &mut output_len) } + extract_from_slice(output, output_len as usize) } - #[unstable_hostfn] - fn contains_storage(flags: StorageFlags, key: &[u8]) -> Option { - let ret_code = - unsafe { sys::contains_storage(flags.bits(), key.as_ptr(), key.len() as u32) }; - ret_code.into() + fn weight_to_fee(ref_time_limit: u64, proof_size_limit: u64, output: &mut [u8; 32]) { + unsafe { sys::weight_to_fee(ref_time_limit, proof_size_limit, output.as_mut_ptr()) }; } - #[unstable_hostfn] - fn debug_message(str: &[u8]) -> Result { - let ret_code = unsafe { sys::debug_message(str.as_ptr(), str.len() as u32) }; - ret_code.into() - } + impl_hash_fn!(sha2_256, 32); + impl_hash_fn!(keccak_256, 32); + impl_hash_fn!(blake2_256, 32); + impl_hash_fn!(blake2_128, 16); - #[unstable_hostfn] fn ecdsa_recover( signature: &[u8; 65], message_hash: &[u8; 32], @@ -536,109 +479,77 @@ impl HostFn for HostFnImpl { ret_code.into() } - #[unstable_hostfn] fn ecdsa_to_eth_address(pubkey: &[u8; 33], output: &mut [u8; 20]) -> Result { let ret_code = unsafe { sys::ecdsa_to_eth_address(pubkey.as_ptr(), output.as_mut_ptr()) }; ret_code.into() } - #[unstable_hostfn] - fn hash_sha2_256(input: &[u8], output: &mut [u8; 32]) { - unsafe { sys::hash_sha2_256(input.as_ptr(), input.len() as u32, output.as_mut_ptr()) } + fn sr25519_verify(signature: &[u8; 64], message: &[u8], pub_key: &[u8; 32]) -> Result { + let ret_code = unsafe { + sys::sr25519_verify( + signature.as_ptr(), + pub_key.as_ptr(), + message.len() as u32, + message.as_ptr(), + ) + }; + ret_code.into() } - #[unstable_hostfn] - fn hash_blake2_256(input: &[u8], output: &mut [u8; 32]) { - unsafe { sys::hash_blake2_256(input.as_ptr(), input.len() as u32, output.as_mut_ptr()) } + fn is_contract(address: &[u8; 20]) -> bool { + let ret_val = unsafe { sys::is_contract(address.as_ptr()) }; + ret_val.into_bool() } - #[unstable_hostfn] - fn hash_blake2_128(input: &[u8], output: &mut [u8; 16]) { - unsafe { sys::hash_blake2_128(input.as_ptr(), input.len() as u32, output.as_mut_ptr()) } + fn get_immutable_data(output: &mut &mut [u8]) { + let mut output_len = output.len() as u32; + unsafe { sys::get_immutable_data(output.as_mut_ptr(), &mut output_len) }; + extract_from_slice(output, output_len as usize); } - #[unstable_hostfn] - fn is_contract(address: &[u8; 20]) -> bool { - let ret_val = unsafe { sys::is_contract(address.as_ptr()) }; - ret_val.into_bool() + fn set_immutable_data(data: &[u8]) { + unsafe { sys::set_immutable_data(data.as_ptr(), data.len() as u32) } } - #[unstable_hostfn] - fn lock_delegate_dependency(code_hash: &[u8; 32]) { - unsafe { sys::lock_delegate_dependency(code_hash.as_ptr()) } + fn balance_of(address: &[u8; 20], output: &mut [u8; 32]) { + unsafe { sys::balance_of(address.as_ptr(), output.as_mut_ptr()) }; } - #[unstable_hostfn] - fn minimum_balance(output: &mut [u8; 32]) { - unsafe { sys::minimum_balance(output.as_mut_ptr()) } + fn caller_is_origin() -> bool { + let ret_val = unsafe { sys::caller_is_origin() }; + ret_val.into_bool() } - #[unstable_hostfn] - fn own_code_hash(output: &mut [u8; 32]) { - unsafe { sys::own_code_hash(output.as_mut_ptr()) } + fn set_code_hash(code_hash: &[u8; 32]) -> Result { + let ret_val = unsafe { sys::set_code_hash(code_hash.as_ptr()) }; + ret_val.into() } - #[unstable_hostfn] - fn set_code_hash(code_hash: &[u8; 32]) { - unsafe { sys::set_code_hash(code_hash.as_ptr()) } + fn code_hash(address: &[u8; 20], output: &mut [u8; 32]) { + unsafe { sys::code_hash(address.as_ptr(), output.as_mut_ptr()) } } - #[unstable_hostfn] - fn sr25519_verify(signature: &[u8; 64], message: &[u8], pub_key: &[u8; 32]) -> Result { - let ret_code = unsafe { - sys::sr25519_verify( - signature.as_ptr(), - pub_key.as_ptr(), - message.len() as u32, - message.as_ptr(), - ) - }; - ret_code.into() + fn code_size(address: &[u8; 20], output: &mut [u8; 32]) { + unsafe { sys::code_size(address.as_ptr(), output.as_mut_ptr()) } } - #[unstable_hostfn] - fn take_storage(flags: StorageFlags, key: &[u8], output: &mut &mut [u8]) -> Result { - let mut output_len = output.len() as u32; - let ret_code = { - unsafe { - sys::take_storage( - flags.bits(), - key.as_ptr(), - key.len() as u32, - output.as_mut_ptr(), - &mut output_len, - ) - } - }; - extract_from_slice(output, output_len as usize); - ret_code.into() + fn own_code_hash(output: &mut [u8; 32]) { + unsafe { sys::own_code_hash(output.as_mut_ptr()) } } - #[unstable_hostfn] - fn terminate(beneficiary: &[u8; 20]) -> ! { - unsafe { sys::terminate(beneficiary.as_ptr()) } - panic!("terminate does not return"); + fn lock_delegate_dependency(code_hash: &[u8; 32]) { + unsafe { sys::lock_delegate_dependency(code_hash.as_ptr()) } } - #[unstable_hostfn] fn unlock_delegate_dependency(code_hash: &[u8; 32]) { unsafe { sys::unlock_delegate_dependency(code_hash.as_ptr()) } } - #[unstable_hostfn] - fn weight_left(output: &mut &mut [u8]) { - let mut output_len = output.len() as u32; - unsafe { sys::weight_left(output.as_mut_ptr(), &mut output_len) } - extract_from_slice(output, output_len as usize) - } - - #[unstable_hostfn] fn xcm_execute(msg: &[u8]) -> Result { let ret_code = unsafe { sys::xcm_execute(msg.as_ptr(), msg.len() as _) }; ret_code.into() } - #[unstable_hostfn] fn xcm_send(dest: &[u8], msg: &[u8], output: &mut [u8; 32]) -> Result { let ret_code = unsafe { sys::xcm_send( @@ -651,4 +562,20 @@ impl HostFn for HostFnImpl { }; ret_code.into() } + + fn return_data_size(output: &mut [u8; 32]) { + unsafe { sys::return_data_size(output.as_mut_ptr()) }; + } + + fn return_data_copy(output: &mut &mut [u8], offset: u32) { + let mut output_len = output.len() as u32; + { + unsafe { sys::return_data_copy(output.as_mut_ptr(), &mut output_len, offset) }; + } + extract_from_slice(output, output_len as usize); + } + + fn block_hash(block_number_ptr: &[u8; 32], output: &mut [u8; 32]) { + unsafe { sys::block_hash(block_number_ptr.as_ptr(), output.as_mut_ptr()) }; + } } diff --git a/substrate/frame/revive/uapi/src/lib.rs b/substrate/frame/revive/uapi/src/lib.rs index ef1798b4bf61..e660ce36ef75 100644 --- a/substrate/frame/revive/uapi/src/lib.rs +++ b/substrate/frame/revive/uapi/src/lib.rs @@ -17,7 +17,6 @@ //! Refer to substrate FRAME contract module for more documentation. #![no_std] -#![cfg_attr(docsrs, feature(doc_cfg))] mod flags; pub use flags::*; @@ -66,12 +65,6 @@ impl From for u32 { } } -impl From for u64 { - fn from(error: ReturnErrorCode) -> Self { - u32::from(error).into() - } -} - define_error_codes! { /// The called function trapped and has its state changes reverted. /// In this case no output buffer is returned. @@ -86,21 +79,23 @@ define_error_codes! { /// Transfer failed for other not further specified reason. Most probably /// reserved or locked balance of the sender that was preventing the transfer. TransferFailed = 4, + /// No code could be found at the supplied code hash. + CodeNotFound = 5, + /// The account that was called is no contract. + NotCallable = 6, /// The call to `debug_message` had no effect because debug message /// recording was disabled. - LoggingDisabled = 5, + LoggingDisabled = 7, /// The call dispatched by `call_runtime` was executed but returned an error. - CallRuntimeFailed = 6, + CallRuntimeFailed = 8, /// ECDSA public key recovery failed. Most probably wrong recovery id or signature. - EcdsaRecoveryFailed = 7, + EcdsaRecoveryFailed = 9, /// sr25519 signature verification failed. - Sr25519VerifyFailed = 8, + Sr25519VerifyFailed = 10, /// The `xcm_execute` call failed. - XcmExecutionFailed = 9, + XcmExecutionFailed = 11, /// The `xcm_send` call failed. - XcmSendFailed = 10, - /// The subcall ran out of weight or storage deposit. - OutOfResources = 11, + XcmSendFailed = 12, } /// The raw return code returned by the host side. diff --git a/substrate/frame/root-offences/Cargo.toml b/substrate/frame/root-offences/Cargo.toml index dedde9956b6f..f80fed11b971 100644 --- a/substrate/frame/root-offences/Cargo.toml +++ b/substrate/frame/root-offences/Cargo.toml @@ -29,8 +29,8 @@ sp-staking = { workspace = true } [dev-dependencies] pallet-balances = { workspace = true, default-features = true } -pallet-staking-reward-curve = { workspace = true, default-features = true } pallet-timestamp = { workspace = true, default-features = true } +pallet-staking-reward-curve = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } sp-io = { workspace = true } diff --git a/substrate/frame/root-offences/src/lib.rs b/substrate/frame/root-offences/src/lib.rs index fd6ffc55e40c..6531080b8d10 100644 --- a/substrate/frame/root-offences/src/lib.rs +++ b/substrate/frame/root-offences/src/lib.rs @@ -106,7 +106,7 @@ pub mod pallet { fn get_offence_details( offenders: Vec<(T::AccountId, Perbill)>, ) -> Result>, DispatchError> { - let now = pallet_staking::ActiveEra::::get() + let now = Staking::::active_era() .map(|e| e.index) .ok_or(Error::::FailedToGetActiveEra)?; diff --git a/substrate/frame/root-offences/src/mock.rs b/substrate/frame/root-offences/src/mock.rs index a27fb36f64a6..af073d7672cf 100644 --- a/substrate/frame/root-offences/src/mock.rs +++ b/substrate/frame/root-offences/src/mock.rs @@ -296,5 +296,5 @@ pub(crate) fn run_to_block(n: BlockNumber) { } pub(crate) fn active_era() -> EraIndex { - pallet_staking::ActiveEra::::get().unwrap().index + Staking::active_era().unwrap().index } diff --git a/substrate/frame/root-testing/Cargo.toml b/substrate/frame/root-testing/Cargo.toml index fd0f4da2e80c..ee3ce8011009 100644 --- a/substrate/frame/root-testing/Cargo.toml +++ b/substrate/frame/root-testing/Cargo.toml @@ -17,9 +17,9 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } -scale-info = { features = ["derive"], workspace = true } sp-core = { workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } diff --git a/substrate/frame/safe-mode/Cargo.toml b/substrate/frame/safe-mode/Cargo.toml index 3f1f6bc1f1d6..e7f165ae67d8 100644 --- a/substrate/frame/safe-mode/Cargo.toml +++ b/substrate/frame/safe-mode/Cargo.toml @@ -20,20 +20,20 @@ docify = { workspace = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } -pallet-balances = { optional = true, workspace = true } -pallet-proxy = { optional = true, workspace = true } -pallet-utility = { optional = true, workspace = true } scale-info = { features = ["derive"], workspace = true } sp-arithmetic = { workspace = true } sp-runtime = { workspace = true } +pallet-balances = { optional = true, workspace = true } +pallet-utility = { optional = true, workspace = true } +pallet-proxy = { optional = true, workspace = true } [dev-dependencies] -frame-support = { features = ["experimental"], workspace = true, default-features = true } -pallet-balances = { workspace = true, default-features = true } -pallet-proxy = { workspace = true, default-features = true } -pallet-utility = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } sp-io = { workspace = true, default-features = true } +pallet-balances = { workspace = true, default-features = true } +pallet-utility = { workspace = true, default-features = true } +pallet-proxy = { workspace = true, default-features = true } +frame-support = { features = ["experimental"], workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/frame/safe-mode/src/mock.rs b/substrate/frame/safe-mode/src/mock.rs index aaf3456272fa..ec1ad8249514 100644 --- a/substrate/frame/safe-mode/src/mock.rs +++ b/substrate/frame/safe-mode/src/mock.rs @@ -138,7 +138,6 @@ impl pallet_proxy::Config for Test { type MaxPending = ConstU32<2>; type AnnouncementDepositBase = ConstU64<1>; type AnnouncementDepositFactor = ConstU64<1>; - type BlockNumberProvider = frame_system::Pallet; } /// The calls that can always bypass safe-mode. diff --git a/substrate/frame/safe-mode/src/weights.rs b/substrate/frame/safe-mode/src/weights.rs index 631853b19462..c2ce2cfab9b9 100644 --- a/substrate/frame/safe-mode/src/weights.rs +++ b/substrate/frame/safe-mode/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_safe_mode` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-04-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -72,8 +72,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `1489` - // Minimum execution time: 2_982_000 picoseconds. - Weight::from_parts(3_104_000, 1489) + // Minimum execution time: 2_152_000 picoseconds. + Weight::from_parts(2_283_000, 1489) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: `SafeMode::EnteredUntil` (r:1 w:1) @@ -82,23 +82,23 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `169` // Estimated: `1489` - // Minimum execution time: 7_338_000 picoseconds. - Weight::from_parts(7_813_000, 1489) + // Minimum execution time: 6_657_000 picoseconds. + Weight::from_parts(6_955_000, 1489) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `SafeMode::EnteredUntil` (r:1 w:1) /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) /// Storage: `SafeMode::Deposits` (r:0 w:1) /// Proof: `SafeMode::Deposits` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) fn enter() -> Weight { // Proof Size summary in bytes: // Measured: `142` - // Estimated: `3820` - // Minimum execution time: 48_807_000 picoseconds. - Weight::from_parts(49_731_000, 3820) + // Estimated: `3658` + // Minimum execution time: 49_366_000 picoseconds. + Weight::from_parts(50_506_000, 3658) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -108,23 +108,23 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `1489` - // Minimum execution time: 8_207_000 picoseconds. - Weight::from_parts(8_645_000, 1489) + // Minimum execution time: 7_843_000 picoseconds. + Weight::from_parts(8_205_000, 1489) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `SafeMode::EnteredUntil` (r:1 w:1) /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) /// Storage: `SafeMode::Deposits` (r:0 w:1) /// Proof: `SafeMode::Deposits` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) fn extend() -> Weight { // Proof Size summary in bytes: // Measured: `169` - // Estimated: `3820` - // Minimum execution time: 53_540_000 picoseconds. - Weight::from_parts(54_315_000, 3820) + // Estimated: `3658` + // Minimum execution time: 50_487_000 picoseconds. + Weight::from_parts(52_101_000, 3658) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -134,8 +134,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `169` // Estimated: `1489` - // Minimum execution time: 9_494_000 picoseconds. - Weight::from_parts(9_751_000, 1489) + // Minimum execution time: 8_517_000 picoseconds. + Weight::from_parts(8_894_000, 1489) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -145,8 +145,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `169` // Estimated: `1489` - // Minimum execution time: 8_970_000 picoseconds. - Weight::from_parts(9_318_000, 1489) + // Minimum execution time: 8_451_000 picoseconds. + Weight::from_parts(8_745_000, 1489) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -155,39 +155,39 @@ impl WeightInfo for SubstrateWeight { /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) fn release_deposit() -> Weight { // Proof Size summary in bytes: // Measured: `292` - // Estimated: `3820` - // Minimum execution time: 46_187_000 picoseconds. - Weight::from_parts(47_068_000, 3820) + // Estimated: `3658` + // Minimum execution time: 42_504_000 picoseconds. + Weight::from_parts(45_493_000, 3658) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } /// Storage: `SafeMode::Deposits` (r:1 w:1) /// Proof: `SafeMode::Deposits` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) fn force_release_deposit() -> Weight { // Proof Size summary in bytes: // Measured: `292` - // Estimated: `3820` - // Minimum execution time: 44_809_000 picoseconds. - Weight::from_parts(45_501_000, 3820) + // Estimated: `3658` + // Minimum execution time: 40_864_000 picoseconds. + Weight::from_parts(41_626_000, 3658) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } /// Storage: `SafeMode::Deposits` (r:1 w:1) /// Proof: `SafeMode::Deposits` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) fn force_slash_deposit() -> Weight { // Proof Size summary in bytes: // Measured: `292` - // Estimated: `3820` - // Minimum execution time: 36_977_000 picoseconds. - Weight::from_parts(37_694_000, 3820) + // Estimated: `3658` + // Minimum execution time: 31_943_000 picoseconds. + Weight::from_parts(33_033_000, 3658) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -201,8 +201,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `1489` - // Minimum execution time: 2_982_000 picoseconds. - Weight::from_parts(3_104_000, 1489) + // Minimum execution time: 2_152_000 picoseconds. + Weight::from_parts(2_283_000, 1489) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: `SafeMode::EnteredUntil` (r:1 w:1) @@ -211,23 +211,23 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `169` // Estimated: `1489` - // Minimum execution time: 7_338_000 picoseconds. - Weight::from_parts(7_813_000, 1489) + // Minimum execution time: 6_657_000 picoseconds. + Weight::from_parts(6_955_000, 1489) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `SafeMode::EnteredUntil` (r:1 w:1) /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) /// Storage: `SafeMode::Deposits` (r:0 w:1) /// Proof: `SafeMode::Deposits` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) fn enter() -> Weight { // Proof Size summary in bytes: // Measured: `142` - // Estimated: `3820` - // Minimum execution time: 48_807_000 picoseconds. - Weight::from_parts(49_731_000, 3820) + // Estimated: `3658` + // Minimum execution time: 49_366_000 picoseconds. + Weight::from_parts(50_506_000, 3658) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -237,23 +237,23 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `1489` - // Minimum execution time: 8_207_000 picoseconds. - Weight::from_parts(8_645_000, 1489) + // Minimum execution time: 7_843_000 picoseconds. + Weight::from_parts(8_205_000, 1489) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `SafeMode::EnteredUntil` (r:1 w:1) /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) /// Storage: `SafeMode::Deposits` (r:0 w:1) /// Proof: `SafeMode::Deposits` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) fn extend() -> Weight { // Proof Size summary in bytes: // Measured: `169` - // Estimated: `3820` - // Minimum execution time: 53_540_000 picoseconds. - Weight::from_parts(54_315_000, 3820) + // Estimated: `3658` + // Minimum execution time: 50_487_000 picoseconds. + Weight::from_parts(52_101_000, 3658) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -263,8 +263,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `169` // Estimated: `1489` - // Minimum execution time: 9_494_000 picoseconds. - Weight::from_parts(9_751_000, 1489) + // Minimum execution time: 8_517_000 picoseconds. + Weight::from_parts(8_894_000, 1489) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -274,8 +274,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `169` // Estimated: `1489` - // Minimum execution time: 8_970_000 picoseconds. - Weight::from_parts(9_318_000, 1489) + // Minimum execution time: 8_451_000 picoseconds. + Weight::from_parts(8_745_000, 1489) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -284,39 +284,39 @@ impl WeightInfo for () { /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) fn release_deposit() -> Weight { // Proof Size summary in bytes: // Measured: `292` - // Estimated: `3820` - // Minimum execution time: 46_187_000 picoseconds. - Weight::from_parts(47_068_000, 3820) + // Estimated: `3658` + // Minimum execution time: 42_504_000 picoseconds. + Weight::from_parts(45_493_000, 3658) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } /// Storage: `SafeMode::Deposits` (r:1 w:1) /// Proof: `SafeMode::Deposits` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) fn force_release_deposit() -> Weight { // Proof Size summary in bytes: // Measured: `292` - // Estimated: `3820` - // Minimum execution time: 44_809_000 picoseconds. - Weight::from_parts(45_501_000, 3820) + // Estimated: `3658` + // Minimum execution time: 40_864_000 picoseconds. + Weight::from_parts(41_626_000, 3658) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } /// Storage: `SafeMode::Deposits` (r:1 w:1) /// Proof: `SafeMode::Deposits` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) fn force_slash_deposit() -> Weight { // Proof Size summary in bytes: // Measured: `292` - // Estimated: `3820` - // Minimum execution time: 36_977_000 picoseconds. - Weight::from_parts(37_694_000, 3820) + // Estimated: `3658` + // Minimum execution time: 31_943_000 picoseconds. + Weight::from_parts(33_033_000, 3658) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } diff --git a/substrate/frame/salary/Cargo.toml b/substrate/frame/salary/Cargo.toml index b3ed95bf1de5..9e4cf06288dd 100644 --- a/substrate/frame/salary/Cargo.toml +++ b/substrate/frame/salary/Cargo.toml @@ -17,16 +17,16 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { features = ["derive"], workspace = true } +log = { workspace = true } +scale-info = { features = ["derive"], workspace = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } -log = { workspace = true } -pallet-ranked-collective = { optional = true, workspace = true } -scale-info = { features = ["derive"], workspace = true } sp-arithmetic = { workspace = true } sp-core = { workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } +pallet-ranked-collective = { optional = true, workspace = true } [features] default = ["std"] diff --git a/substrate/frame/salary/src/tests/integration.rs b/substrate/frame/salary/src/tests/integration.rs index 0c1fb8bbdcba..69f218943ade 100644 --- a/substrate/frame/salary/src/tests/integration.rs +++ b/substrate/frame/salary/src/tests/integration.rs @@ -17,21 +17,22 @@ //! The crate's tests. -use crate as pallet_salary; -use crate::*; use frame_support::{ assert_noop, assert_ok, derive_impl, hypothetically, pallet_prelude::Weight, parameter_types, - traits::{ConstU64, EitherOf, MapSuccess, NoOpPoll}, + traits::{ConstU64, EitherOf, MapSuccess, PollStatus, Polling}, }; -use pallet_ranked_collective::{EnsureRanked, Geometric}; +use pallet_ranked_collective::{EnsureRanked, Geometric, TallyOf, Votes}; use sp_core::{ConstU16, Get}; use sp_runtime::{ traits::{Convert, ReduceBy, ReplaceWithDefault}, - BuildStorage, + BuildStorage, DispatchError, }; +use crate as pallet_salary; +use crate::*; + type Rank = u16; type Block = frame_system::mocking::MockBlock; @@ -54,6 +55,45 @@ impl frame_system::Config for Test { type Block = Block; } +pub struct TestPolls; +impl Polling> for TestPolls { + type Index = u8; + type Votes = Votes; + type Moment = u64; + type Class = Rank; + + fn classes() -> Vec { + unimplemented!() + } + fn as_ongoing(_index: u8) -> Option<(TallyOf, Self::Class)> { + unimplemented!() + } + fn access_poll( + _index: Self::Index, + _f: impl FnOnce(PollStatus<&mut TallyOf, Self::Moment, Self::Class>) -> R, + ) -> R { + unimplemented!() + } + fn try_access_poll( + _index: Self::Index, + _f: impl FnOnce( + PollStatus<&mut TallyOf, Self::Moment, Self::Class>, + ) -> Result, + ) -> Result { + unimplemented!() + } + + #[cfg(feature = "runtime-benchmarks")] + fn create_ongoing(_class: Self::Class) -> Result { + unimplemented!() + } + + #[cfg(feature = "runtime-benchmarks")] + fn end_ongoing(_index: Self::Index, _approved: bool) -> Result<(), ()> { + unimplemented!() + } +} + pub struct MinRankOfClass(PhantomData); impl> Convert for MinRankOfClass { fn convert(a: u16) -> Rank { @@ -136,7 +176,7 @@ impl pallet_ranked_collective::Config for Test { // Members can exchange up to the rank of 2 below them. MapSuccess, ReduceBy>>, >; - type Polls = NoOpPoll; + type Polls = TestPolls; type MinRankOfClass = MinRankOfClass; type MemberSwappedHandler = Salary; type VoteWeight = Geometric; diff --git a/substrate/frame/salary/src/weights.rs b/substrate/frame/salary/src/weights.rs index f1cdaaa225a4..d4e6331919b6 100644 --- a/substrate/frame/salary/src/weights.rs +++ b/substrate/frame/salary/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_salary` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-04-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -69,8 +69,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `4` // Estimated: `1541` - // Minimum execution time: 7_583_000 picoseconds. - Weight::from_parts(8_073_000, 1541) + // Minimum execution time: 7_382_000 picoseconds. + Weight::from_parts(7_793_000, 1541) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -80,8 +80,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `86` // Estimated: `1541` - // Minimum execution time: 9_648_000 picoseconds. - Weight::from_parts(10_016_000, 1541) + // Minimum execution time: 8_744_000 picoseconds. + Weight::from_parts(9_216_000, 1541) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -95,8 +95,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `395` // Estimated: `3543` - // Minimum execution time: 22_534_000 picoseconds. - Weight::from_parts(23_265_000, 3543) + // Minimum execution time: 16_728_000 picoseconds. + Weight::from_parts(17_387_000, 3543) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -110,8 +110,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `462` // Estimated: `3543` - // Minimum execution time: 25_764_000 picoseconds. - Weight::from_parts(26_531_000, 3543) + // Minimum execution time: 19_744_000 picoseconds. + Weight::from_parts(20_225_000, 3543) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -125,8 +125,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `462` // Estimated: `3543` - // Minimum execution time: 62_575_000 picoseconds. - Weight::from_parts(63_945_000, 3543) + // Minimum execution time: 56_084_000 picoseconds. + Weight::from_parts(58_484_000, 3543) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -140,10 +140,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn payout_other() -> Weight { // Proof Size summary in bytes: - // Measured: `514` + // Measured: `462` // Estimated: `3593` - // Minimum execution time: 64_043_000 picoseconds. - Weight::from_parts(65_938_000, 3593) + // Minimum execution time: 57_341_000 picoseconds. + Weight::from_parts(59_882_000, 3593) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -155,8 +155,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `170` // Estimated: `3543` - // Minimum execution time: 12_303_000 picoseconds. - Weight::from_parts(12_797_000, 3543) + // Minimum execution time: 10_788_000 picoseconds. + Weight::from_parts(11_109_000, 3543) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -170,8 +170,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `4` // Estimated: `1541` - // Minimum execution time: 7_583_000 picoseconds. - Weight::from_parts(8_073_000, 1541) + // Minimum execution time: 7_382_000 picoseconds. + Weight::from_parts(7_793_000, 1541) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -181,8 +181,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `86` // Estimated: `1541` - // Minimum execution time: 9_648_000 picoseconds. - Weight::from_parts(10_016_000, 1541) + // Minimum execution time: 8_744_000 picoseconds. + Weight::from_parts(9_216_000, 1541) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -196,8 +196,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `395` // Estimated: `3543` - // Minimum execution time: 22_534_000 picoseconds. - Weight::from_parts(23_265_000, 3543) + // Minimum execution time: 16_728_000 picoseconds. + Weight::from_parts(17_387_000, 3543) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -211,8 +211,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `462` // Estimated: `3543` - // Minimum execution time: 25_764_000 picoseconds. - Weight::from_parts(26_531_000, 3543) + // Minimum execution time: 19_744_000 picoseconds. + Weight::from_parts(20_225_000, 3543) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -226,8 +226,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `462` // Estimated: `3543` - // Minimum execution time: 62_575_000 picoseconds. - Weight::from_parts(63_945_000, 3543) + // Minimum execution time: 56_084_000 picoseconds. + Weight::from_parts(58_484_000, 3543) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -241,10 +241,10 @@ impl WeightInfo for () { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn payout_other() -> Weight { // Proof Size summary in bytes: - // Measured: `514` + // Measured: `462` // Estimated: `3593` - // Minimum execution time: 64_043_000 picoseconds. - Weight::from_parts(65_938_000, 3593) + // Minimum execution time: 57_341_000 picoseconds. + Weight::from_parts(59_882_000, 3593) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -256,8 +256,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `170` // Estimated: `3543` - // Minimum execution time: 12_303_000 picoseconds. - Weight::from_parts(12_797_000, 3543) + // Minimum execution time: 10_788_000 picoseconds. + Weight::from_parts(11_109_000, 3543) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } diff --git a/substrate/frame/sassafras/Cargo.toml b/substrate/frame/sassafras/Cargo.toml index dd091b6f8ed7..7eb2bda96ffc 100644 --- a/substrate/frame/sassafras/Cargo.toml +++ b/substrate/frame/sassafras/Cargo.toml @@ -18,11 +18,11 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } log = { workspace = true } -scale-info = { features = ["derive"], workspace = true } sp-consensus-sassafras = { features = ["serde"], workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } diff --git a/substrate/frame/scheduler/Cargo.toml b/substrate/frame/scheduler/Cargo.toml index 0506470e72c3..1432ada91335 100644 --- a/substrate/frame/scheduler/Cargo.toml +++ b/substrate/frame/scheduler/Cargo.toml @@ -14,15 +14,15 @@ workspace = true [dependencies] codec = { features = ["derive"], workspace = true } -docify = { workspace = true } +log = { workspace = true } +scale-info = { features = ["derive"], workspace = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } -log = { workspace = true } -scale-info = { features = ["derive"], workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } sp-weights = { workspace = true } +docify = { workspace = true } [dev-dependencies] pallet-preimage = { workspace = true, default-features = true } diff --git a/substrate/frame/scheduler/src/benchmarking.rs b/substrate/frame/scheduler/src/benchmarking.rs index ff40e8ef8abf..d0a14fc73d64 100644 --- a/substrate/frame/scheduler/src/benchmarking.rs +++ b/substrate/frame/scheduler/src/benchmarking.rs @@ -17,23 +17,25 @@ //! Scheduler pallet benchmarking. +use super::*; use alloc::vec; -use frame_benchmarking::v2::*; +use frame_benchmarking::v1::{account, benchmarks, BenchmarkError}; use frame_support::{ ensure, traits::{schedule::Priority, BoundedInline}, weights::WeightMeter, }; -use frame_system::{EventRecord, RawOrigin}; +use frame_system::{pallet_prelude::BlockNumberFor, RawOrigin}; -use crate::*; - -type SystemCall = frame_system::Call; -type SystemOrigin = ::RuntimeOrigin; +use crate::Pallet as Scheduler; +use frame_system::{Call as SystemCall, EventRecord}; const SEED: u32 = 0; + const BLOCK_NUMBER: u32 = 2; +type SystemOrigin = ::RuntimeOrigin; + fn assert_last_event(generic_event: ::RuntimeEvent) { let events = frame_system::Pallet::::events(); let system_event: ::RuntimeEvent = generic_event.into(); @@ -59,7 +61,7 @@ fn fill_schedule( let call = make_call::(None); let period = Some(((i + 100).into(), 100)); let name = u32_to_name(i); - Pallet::::do_schedule_named(name, t, period, 0, origin.clone(), call)?; + Scheduler::::do_schedule_named(name, t, period, 0, origin.clone(), call)?; } ensure!(Agenda::::get(when).len() == n as usize, "didn't fill schedule"); Ok(()) @@ -132,160 +134,107 @@ fn make_origin(signed: bool) -> ::PalletsOrigin { } } -#[benchmarks] -mod benchmarks { - use super::*; - +benchmarks! { // `service_agendas` when no work is done. - #[benchmark] - fn service_agendas_base() { - let now = BLOCK_NUMBER.into(); + service_agendas_base { + let now = BlockNumberFor::::from(BLOCK_NUMBER); IncompleteSince::::put(now - One::one()); - - #[block] - { - Pallet::::service_agendas(&mut WeightMeter::new(), now, 0); - } - + }: { + Scheduler::::service_agendas(&mut WeightMeter::new(), now, 0); + } verify { assert_eq!(IncompleteSince::::get(), Some(now - One::one())); } // `service_agenda` when no work is done. - #[benchmark] - fn service_agenda_base( - s: Linear<0, { T::MaxScheduledPerBlock::get() }>, - ) -> Result<(), BenchmarkError> { + service_agenda_base { let now = BLOCK_NUMBER.into(); + let s in 0 .. T::MaxScheduledPerBlock::get(); fill_schedule::(now, s)?; let mut executed = 0; - - #[block] - { - Pallet::::service_agenda(&mut WeightMeter::new(), &mut executed, now, now, 0); - } - + }: { + Scheduler::::service_agenda(&mut WeightMeter::new(), &mut executed, now, now, 0); + } verify { assert_eq!(executed, 0); - - Ok(()) } // `service_task` when the task is a non-periodic, non-named, non-fetched call which is not // dispatched (e.g. due to being overweight). - #[benchmark] - fn service_task_base() { + service_task_base { let now = BLOCK_NUMBER.into(); let task = make_task::(false, false, false, None, 0); // prevent any tasks from actually being executed as we only want the surrounding weight. let mut counter = WeightMeter::with_limit(Weight::zero()); - let _result; - - #[block] - { - _result = Pallet::::service_task(&mut counter, now, now, 0, true, task); - } - - // assert!(_result.is_ok()); + }: { + let result = Scheduler::::service_task(&mut counter, now, now, 0, true, task); + } verify { + //assert_eq!(result, Ok(())); } // `service_task` when the task is a non-periodic, non-named, fetched call (with a known // preimage length) and which is not dispatched (e.g. due to being overweight). - #[benchmark(pov_mode = MaxEncodedLen { + #[pov_mode = MaxEncodedLen { // Use measured PoV size for the Preimages since we pass in a length witness. Preimage::PreimageFor: Measured - })] - fn service_task_fetched( - s: Linear<{ BoundedInline::bound() as u32 }, { T::Preimages::MAX_LENGTH as u32 }>, - ) { + }] + service_task_fetched { + let s in (BoundedInline::bound() as u32) .. (T::Preimages::MAX_LENGTH as u32); let now = BLOCK_NUMBER.into(); let task = make_task::(false, false, false, Some(s), 0); // prevent any tasks from actually being executed as we only want the surrounding weight. let mut counter = WeightMeter::with_limit(Weight::zero()); - let _result; - - #[block] - { - _result = Pallet::::service_task(&mut counter, now, now, 0, true, task); - } - - // assert!(result.is_ok()); + }: { + let result = Scheduler::::service_task(&mut counter, now, now, 0, true, task); + } verify { } // `service_task` when the task is a non-periodic, named, non-fetched call which is not // dispatched (e.g. due to being overweight). - #[benchmark] - fn service_task_named() { + service_task_named { let now = BLOCK_NUMBER.into(); let task = make_task::(false, true, false, None, 0); // prevent any tasks from actually being executed as we only want the surrounding weight. let mut counter = WeightMeter::with_limit(Weight::zero()); - let _result; - - #[block] - { - _result = Pallet::::service_task(&mut counter, now, now, 0, true, task); - } - - // assert!(result.is_ok()); + }: { + let result = Scheduler::::service_task(&mut counter, now, now, 0, true, task); + } verify { } // `service_task` when the task is a periodic, non-named, non-fetched call which is not // dispatched (e.g. due to being overweight). - #[benchmark] - fn service_task_periodic() { + service_task_periodic { let now = BLOCK_NUMBER.into(); let task = make_task::(true, false, false, None, 0); // prevent any tasks from actually being executed as we only want the surrounding weight. let mut counter = WeightMeter::with_limit(Weight::zero()); - let _result; - - #[block] - { - _result = Pallet::::service_task(&mut counter, now, now, 0, true, task); - } - - // assert!(result.is_ok()); + }: { + let result = Scheduler::::service_task(&mut counter, now, now, 0, true, task); + } verify { } // `execute_dispatch` when the origin is `Signed`, not counting the dispatchable's weight. - #[benchmark] - fn execute_dispatch_signed() -> Result<(), BenchmarkError> { + execute_dispatch_signed { let mut counter = WeightMeter::new(); let origin = make_origin::(true); - let call = T::Preimages::realize(&make_call::(None))?.0; - let result; - - #[block] - { - result = Pallet::::execute_dispatch(&mut counter, origin, call); - } - - assert!(result.is_ok()); - - Ok(()) + let call = T::Preimages::realize(&make_call::(None)).unwrap().0; + }: { + assert!(Scheduler::::execute_dispatch(&mut counter, origin, call).is_ok()); + } + verify { } // `execute_dispatch` when the origin is not `Signed`, not counting the dispatchable's weight. - #[benchmark] - fn execute_dispatch_unsigned() -> Result<(), BenchmarkError> { + execute_dispatch_unsigned { let mut counter = WeightMeter::new(); let origin = make_origin::(false); - let call = T::Preimages::realize(&make_call::(None))?.0; - let result; - - #[block] - { - result = Pallet::::execute_dispatch(&mut counter, origin, call); - } - - assert!(result.is_ok()); - - Ok(()) + let call = T::Preimages::realize(&make_call::(None)).unwrap().0; + }: { + assert!(Scheduler::::execute_dispatch(&mut counter, origin, call).is_ok()); + } + verify { } - #[benchmark] - fn schedule( - s: Linear<0, { T::MaxScheduledPerBlock::get() - 1 }>, - ) -> Result<(), BenchmarkError> { + schedule { + let s in 0 .. (T::MaxScheduledPerBlock::get() - 1); let when = BLOCK_NUMBER.into(); let periodic = Some((BlockNumberFor::::one(), 100)); let priority = 0; @@ -293,27 +242,24 @@ mod benchmarks { let call = Box::new(SystemCall::set_storage { items: vec![] }.into()); fill_schedule::(when, s)?; - - #[extrinsic_call] - _(RawOrigin::Root, when, periodic, priority, call); - - ensure!(Agenda::::get(when).len() == s as usize + 1, "didn't add to schedule"); - - Ok(()) + }: _(RawOrigin::Root, when, periodic, priority, call) + verify { + ensure!( + Agenda::::get(when).len() == (s + 1) as usize, + "didn't add to schedule" + ); } - #[benchmark] - fn cancel(s: Linear<1, { T::MaxScheduledPerBlock::get() }>) -> Result<(), BenchmarkError> { + cancel { + let s in 1 .. T::MaxScheduledPerBlock::get(); let when = BLOCK_NUMBER.into(); fill_schedule::(when, s)?; assert_eq!(Agenda::::get(when).len(), s as usize); let schedule_origin = T::ScheduleOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; - - #[extrinsic_call] - _(schedule_origin as SystemOrigin, when, 0); - + }: _>(schedule_origin, when, 0) + verify { ensure!( s == 1 || Lookup::::get(u32_to_name(0)).is_none(), "didn't remove from lookup if more than 1 task scheduled for `when`" @@ -327,14 +273,10 @@ mod benchmarks { s > 1 || Agenda::::get(when).len() == 0, "remove from schedule if only 1 task scheduled for `when`" ); - - Ok(()) } - #[benchmark] - fn schedule_named( - s: Linear<0, { T::MaxScheduledPerBlock::get() - 1 }>, - ) -> Result<(), BenchmarkError> { + schedule_named { + let s in 0 .. (T::MaxScheduledPerBlock::get() - 1); let id = u32_to_name(s); let when = BLOCK_NUMBER.into(); let periodic = Some((BlockNumberFor::::one(), 100)); @@ -343,26 +285,21 @@ mod benchmarks { let call = Box::new(SystemCall::set_storage { items: vec![] }.into()); fill_schedule::(when, s)?; - - #[extrinsic_call] - _(RawOrigin::Root, id, when, periodic, priority, call); - - ensure!(Agenda::::get(when).len() == s as usize + 1, "didn't add to schedule"); - - Ok(()) + }: _(RawOrigin::Root, id, when, periodic, priority, call) + verify { + ensure!( + Agenda::::get(when).len() == (s + 1) as usize, + "didn't add to schedule" + ); } - #[benchmark] - fn cancel_named( - s: Linear<1, { T::MaxScheduledPerBlock::get() }>, - ) -> Result<(), BenchmarkError> { + cancel_named { + let s in 1 .. T::MaxScheduledPerBlock::get(); let when = BLOCK_NUMBER.into(); fill_schedule::(when, s)?; - - #[extrinsic_call] - _(RawOrigin::Root, u32_to_name(0)); - + }: _(RawOrigin::Root, u32_to_name(0)) + verify { ensure!( s == 1 || Lookup::::get(u32_to_name(0)).is_none(), "didn't remove from lookup if more than 1 task scheduled for `when`" @@ -376,49 +313,33 @@ mod benchmarks { s > 1 || Agenda::::get(when).len() == 0, "remove from schedule if only 1 task scheduled for `when`" ); - - Ok(()) } - #[benchmark] - fn schedule_retry( - s: Linear<1, { T::MaxScheduledPerBlock::get() }>, - ) -> Result<(), BenchmarkError> { + schedule_retry { + let s in 1 .. T::MaxScheduledPerBlock::get(); let when = BLOCK_NUMBER.into(); fill_schedule::(when, s)?; let name = u32_to_name(s - 1); let address = Lookup::::get(name).unwrap(); - let period: BlockNumberFor = 1_u32.into(); + let period: BlockNumberFor = 1u32.into(); + let root: ::PalletsOrigin = frame_system::RawOrigin::Root.into(); let retry_config = RetryConfig { total_retries: 10, remaining: 10, period }; Retries::::insert(address, retry_config); let (mut when, index) = address; let task = Agenda::::get(when)[index as usize].clone().unwrap(); let mut weight_counter = WeightMeter::with_limit(T::MaximumWeight::get()); - - #[block] - { - Pallet::::schedule_retry( - &mut weight_counter, - when, - when, - index, - &task, - retry_config, - ); - } - + }: { + Scheduler::::schedule_retry(&mut weight_counter, when, when, index, &task, retry_config); + } verify { when = when + BlockNumberFor::::one(); assert_eq!( Retries::::get((when, 0)), Some(RetryConfig { total_retries: 10, remaining: 9, period }) ); - - Ok(()) } - #[benchmark] - fn set_retry() -> Result<(), BenchmarkError> { + set_retry { let s = T::MaxScheduledPerBlock::get(); let when = BLOCK_NUMBER.into(); @@ -427,10 +348,8 @@ mod benchmarks { let address = Lookup::::get(name).unwrap(); let (when, index) = address; let period = BlockNumberFor::::one(); - - #[extrinsic_call] - _(RawOrigin::Root, (when, index), 10, period); - + }: _(RawOrigin::Root, (when, index), 10, period) + verify { assert_eq!( Retries::::get((when, index)), Some(RetryConfig { total_retries: 10, remaining: 10, period }) @@ -438,12 +357,9 @@ mod benchmarks { assert_last_event::( Event::RetrySet { task: address, id: None, period, retries: 10 }.into(), ); - - Ok(()) } - #[benchmark] - fn set_retry_named() -> Result<(), BenchmarkError> { + set_retry_named { let s = T::MaxScheduledPerBlock::get(); let when = BLOCK_NUMBER.into(); @@ -452,10 +368,8 @@ mod benchmarks { let address = Lookup::::get(name).unwrap(); let (when, index) = address; let period = BlockNumberFor::::one(); - - #[extrinsic_call] - _(RawOrigin::Root, name, 10, period); - + }: _(RawOrigin::Root, name, 10, period) + verify { assert_eq!( Retries::::get((when, index)), Some(RetryConfig { total_retries: 10, remaining: 10, period }) @@ -463,12 +377,9 @@ mod benchmarks { assert_last_event::( Event::RetrySet { task: address, id: Some(name), period, retries: 10 }.into(), ); - - Ok(()) } - #[benchmark] - fn cancel_retry() -> Result<(), BenchmarkError> { + cancel_retry { let s = T::MaxScheduledPerBlock::get(); let when = BLOCK_NUMBER.into(); @@ -477,19 +388,16 @@ mod benchmarks { let address = Lookup::::get(name).unwrap(); let (when, index) = address; let period = BlockNumberFor::::one(); - assert!(Pallet::::set_retry(RawOrigin::Root.into(), (when, index), 10, period).is_ok()); - - #[extrinsic_call] - _(RawOrigin::Root, (when, index)); - + assert!(Scheduler::::set_retry(RawOrigin::Root.into(), (when, index), 10, period).is_ok()); + }: _(RawOrigin::Root, (when, index)) + verify { assert!(!Retries::::contains_key((when, index))); - assert_last_event::(Event::RetryCancelled { task: address, id: None }.into()); - - Ok(()) + assert_last_event::( + Event::RetryCancelled { task: address, id: None }.into(), + ); } - #[benchmark] - fn cancel_retry_named() -> Result<(), BenchmarkError> { + cancel_retry_named { let s = T::MaxScheduledPerBlock::get(); let when = BLOCK_NUMBER.into(); @@ -498,20 +406,14 @@ mod benchmarks { let address = Lookup::::get(name).unwrap(); let (when, index) = address; let period = BlockNumberFor::::one(); - assert!(Pallet::::set_retry_named(RawOrigin::Root.into(), name, 10, period).is_ok()); - - #[extrinsic_call] - _(RawOrigin::Root, name); - + assert!(Scheduler::::set_retry_named(RawOrigin::Root.into(), name, 10, period).is_ok()); + }: _(RawOrigin::Root, name) + verify { assert!(!Retries::::contains_key((when, index))); - assert_last_event::(Event::RetryCancelled { task: address, id: Some(name) }.into()); - - Ok(()) + assert_last_event::( + Event::RetryCancelled { task: address, id: Some(name) }.into(), + ); } - impl_benchmark_test_suite! { - Pallet, - mock::new_test_ext(), - mock::Test - } + impl_benchmark_test_suite!(Scheduler, crate::mock::new_test_ext(), crate::mock::Test); } diff --git a/substrate/frame/scheduler/src/weights.rs b/substrate/frame/scheduler/src/weights.rs index dc34ae556e70..62d2fe78049d 100644 --- a/substrate/frame/scheduler/src/weights.rs +++ b/substrate/frame/scheduler/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_scheduler` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-04-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -79,8 +79,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `31` // Estimated: `1489` - // Minimum execution time: 3_735_000 picoseconds. - Weight::from_parts(3_928_000, 1489) + // Minimum execution time: 3_099_000 picoseconds. + Weight::from_parts(3_298_000, 1489) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -91,10 +91,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `81 + s * (177 ±0)` // Estimated: `110487` - // Minimum execution time: 3_944_000 picoseconds. - Weight::from_parts(4_034_000, 110487) - // Standard Error: 1_119 - .saturating_add(Weight::from_parts(468_891, 0).saturating_mul(s.into())) + // Minimum execution time: 3_558_000 picoseconds. + Weight::from_parts(5_984_191, 110487) + // Standard Error: 564 + .saturating_add(Weight::from_parts(334_983, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -102,11 +102,11 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_235_000 picoseconds. - Weight::from_parts(3_423_000, 0) + // Minimum execution time: 3_389_000 picoseconds. + Weight::from_parts(3_609_000, 0) } /// Storage: `Preimage::PreimageFor` (r:1 w:1) - /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) + /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `Measured`) /// Storage: `Preimage::StatusFor` (r:1 w:0) /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) @@ -114,14 +114,15 @@ impl WeightInfo for SubstrateWeight { /// The range of component `s` is `[128, 4194304]`. fn service_task_fetched(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `141 + s * (1 ±0)` - // Estimated: `4197809` - // Minimum execution time: 18_976_000 picoseconds. - Weight::from_parts(19_220_000, 4197809) - // Standard Error: 16 - .saturating_add(Weight::from_parts(1_871, 0).saturating_mul(s.into())) + // Measured: `246 + s * (1 ±0)` + // Estimated: `3711 + s * (1 ±0)` + // Minimum execution time: 18_292_000 picoseconds. + Weight::from_parts(18_574_000, 3711) + // Standard Error: 1 + .saturating_add(Weight::from_parts(1_189, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) + .saturating_add(Weight::from_parts(0, 1).saturating_mul(s.into())) } /// Storage: `Scheduler::Lookup` (r:0 w:1) /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) @@ -129,16 +130,16 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_858_000 picoseconds. - Weight::from_parts(5_041_000, 0) + // Minimum execution time: 5_216_000 picoseconds. + Weight::from_parts(5_439_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } fn service_task_periodic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_249_000 picoseconds. - Weight::from_parts(3_377_000, 0) + // Minimum execution time: 3_383_000 picoseconds. + Weight::from_parts(3_661_000, 0) } /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -148,16 +149,16 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3997` - // Minimum execution time: 8_482_000 picoseconds. - Weight::from_parts(9_252_000, 3997) + // Minimum execution time: 6_692_000 picoseconds. + Weight::from_parts(7_069_000, 3997) .saturating_add(T::DbWeight::get().reads(2_u64)) } fn execute_dispatch_unsigned() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_391_000 picoseconds. - Weight::from_parts(2_591_000, 0) + // Minimum execution time: 2_165_000 picoseconds. + Weight::from_parts(2_332_000, 0) } /// Storage: `Scheduler::Agenda` (r:1 w:1) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) @@ -166,10 +167,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `81 + s * (177 ±0)` // Estimated: `110487` - // Minimum execution time: 10_698_000 picoseconds. - Weight::from_parts(7_346_814, 110487) - // Standard Error: 2_513 - .saturating_add(Weight::from_parts(535_729, 0).saturating_mul(s.into())) + // Minimum execution time: 10_209_000 picoseconds. + Weight::from_parts(11_235_511, 110487) + // Standard Error: 906 + .saturating_add(Weight::from_parts(375_445, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -184,10 +185,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `81 + s * (177 ±0)` // Estimated: `110487` - // Minimum execution time: 16_371_000 picoseconds. - Weight::from_parts(9_559_789, 110487) - // Standard Error: 2_542 - .saturating_add(Weight::from_parts(723_961, 0).saturating_mul(s.into())) + // Minimum execution time: 15_906_000 picoseconds. + Weight::from_parts(13_697_344, 110487) + // Standard Error: 949 + .saturating_add(Weight::from_parts(564_461, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -200,10 +201,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `596 + s * (178 ±0)` // Estimated: `110487` - // Minimum execution time: 13_995_000 picoseconds. - Weight::from_parts(16_677_389, 110487) - // Standard Error: 2_606 - .saturating_add(Weight::from_parts(555_434, 0).saturating_mul(s.into())) + // Minimum execution time: 13_618_000 picoseconds. + Weight::from_parts(17_489_572, 110487) + // Standard Error: 766 + .saturating_add(Weight::from_parts(377_559, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -218,10 +219,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `709 + s * (177 ±0)` // Estimated: `110487` - // Minimum execution time: 18_962_000 picoseconds. - Weight::from_parts(17_610_180, 110487) - // Standard Error: 2_556 - .saturating_add(Weight::from_parts(743_494, 0).saturating_mul(s.into())) + // Minimum execution time: 17_954_000 picoseconds. + Weight::from_parts(18_459_344, 110487) + // Standard Error: 835 + .saturating_add(Weight::from_parts(585_557, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -234,10 +235,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `118` // Estimated: `110487` - // Minimum execution time: 10_303_000 picoseconds. - Weight::from_parts(12_180_080, 110487) - // Standard Error: 286 - .saturating_add(Weight::from_parts(16_437, 0).saturating_mul(s.into())) + // Minimum execution time: 9_446_000 picoseconds. + Weight::from_parts(10_797_672, 110487) + // Standard Error: 184 + .saturating_add(Weight::from_parts(13_971, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -249,8 +250,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `90705` // Estimated: `110487` - // Minimum execution time: 156_198_000 picoseconds. - Weight::from_parts(167_250_000, 110487) + // Minimum execution time: 137_044_000 picoseconds. + Weight::from_parts(142_855_000, 110487) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -264,8 +265,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `91747` // Estimated: `110487` - // Minimum execution time: 169_418_000 picoseconds. - Weight::from_parts(176_781_000, 110487) + // Minimum execution time: 144_333_000 picoseconds. + Weight::from_parts(149_251_000, 110487) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -277,8 +278,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `90717` // Estimated: `110487` - // Minimum execution time: 154_106_000 picoseconds. - Weight::from_parts(166_893_000, 110487) + // Minimum execution time: 132_387_000 picoseconds. + Weight::from_parts(139_222_000, 110487) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -292,8 +293,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `91759` // Estimated: `110487` - // Minimum execution time: 167_121_000 picoseconds. - Weight::from_parts(175_510_000, 110487) + // Minimum execution time: 141_082_000 picoseconds. + Weight::from_parts(146_117_000, 110487) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -307,8 +308,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `31` // Estimated: `1489` - // Minimum execution time: 3_735_000 picoseconds. - Weight::from_parts(3_928_000, 1489) + // Minimum execution time: 3_099_000 picoseconds. + Weight::from_parts(3_298_000, 1489) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -319,10 +320,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `81 + s * (177 ±0)` // Estimated: `110487` - // Minimum execution time: 3_944_000 picoseconds. - Weight::from_parts(4_034_000, 110487) - // Standard Error: 1_119 - .saturating_add(Weight::from_parts(468_891, 0).saturating_mul(s.into())) + // Minimum execution time: 3_558_000 picoseconds. + Weight::from_parts(5_984_191, 110487) + // Standard Error: 564 + .saturating_add(Weight::from_parts(334_983, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -330,11 +331,11 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_235_000 picoseconds. - Weight::from_parts(3_423_000, 0) + // Minimum execution time: 3_389_000 picoseconds. + Weight::from_parts(3_609_000, 0) } /// Storage: `Preimage::PreimageFor` (r:1 w:1) - /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) + /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `Measured`) /// Storage: `Preimage::StatusFor` (r:1 w:0) /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) @@ -342,14 +343,15 @@ impl WeightInfo for () { /// The range of component `s` is `[128, 4194304]`. fn service_task_fetched(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `141 + s * (1 ±0)` - // Estimated: `4197809` - // Minimum execution time: 18_976_000 picoseconds. - Weight::from_parts(19_220_000, 4197809) - // Standard Error: 16 - .saturating_add(Weight::from_parts(1_871, 0).saturating_mul(s.into())) + // Measured: `246 + s * (1 ±0)` + // Estimated: `3711 + s * (1 ±0)` + // Minimum execution time: 18_292_000 picoseconds. + Weight::from_parts(18_574_000, 3711) + // Standard Error: 1 + .saturating_add(Weight::from_parts(1_189, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) + .saturating_add(Weight::from_parts(0, 1).saturating_mul(s.into())) } /// Storage: `Scheduler::Lookup` (r:0 w:1) /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) @@ -357,16 +359,16 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_858_000 picoseconds. - Weight::from_parts(5_041_000, 0) + // Minimum execution time: 5_216_000 picoseconds. + Weight::from_parts(5_439_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } fn service_task_periodic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_249_000 picoseconds. - Weight::from_parts(3_377_000, 0) + // Minimum execution time: 3_383_000 picoseconds. + Weight::from_parts(3_661_000, 0) } /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -376,16 +378,16 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3997` - // Minimum execution time: 8_482_000 picoseconds. - Weight::from_parts(9_252_000, 3997) + // Minimum execution time: 6_692_000 picoseconds. + Weight::from_parts(7_069_000, 3997) .saturating_add(RocksDbWeight::get().reads(2_u64)) } fn execute_dispatch_unsigned() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_391_000 picoseconds. - Weight::from_parts(2_591_000, 0) + // Minimum execution time: 2_165_000 picoseconds. + Weight::from_parts(2_332_000, 0) } /// Storage: `Scheduler::Agenda` (r:1 w:1) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) @@ -394,10 +396,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `81 + s * (177 ±0)` // Estimated: `110487` - // Minimum execution time: 10_698_000 picoseconds. - Weight::from_parts(7_346_814, 110487) - // Standard Error: 2_513 - .saturating_add(Weight::from_parts(535_729, 0).saturating_mul(s.into())) + // Minimum execution time: 10_209_000 picoseconds. + Weight::from_parts(11_235_511, 110487) + // Standard Error: 906 + .saturating_add(Weight::from_parts(375_445, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -412,10 +414,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `81 + s * (177 ±0)` // Estimated: `110487` - // Minimum execution time: 16_371_000 picoseconds. - Weight::from_parts(9_559_789, 110487) - // Standard Error: 2_542 - .saturating_add(Weight::from_parts(723_961, 0).saturating_mul(s.into())) + // Minimum execution time: 15_906_000 picoseconds. + Weight::from_parts(13_697_344, 110487) + // Standard Error: 949 + .saturating_add(Weight::from_parts(564_461, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -428,10 +430,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `596 + s * (178 ±0)` // Estimated: `110487` - // Minimum execution time: 13_995_000 picoseconds. - Weight::from_parts(16_677_389, 110487) - // Standard Error: 2_606 - .saturating_add(Weight::from_parts(555_434, 0).saturating_mul(s.into())) + // Minimum execution time: 13_618_000 picoseconds. + Weight::from_parts(17_489_572, 110487) + // Standard Error: 766 + .saturating_add(Weight::from_parts(377_559, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -446,10 +448,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `709 + s * (177 ±0)` // Estimated: `110487` - // Minimum execution time: 18_962_000 picoseconds. - Weight::from_parts(17_610_180, 110487) - // Standard Error: 2_556 - .saturating_add(Weight::from_parts(743_494, 0).saturating_mul(s.into())) + // Minimum execution time: 17_954_000 picoseconds. + Weight::from_parts(18_459_344, 110487) + // Standard Error: 835 + .saturating_add(Weight::from_parts(585_557, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -462,10 +464,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `118` // Estimated: `110487` - // Minimum execution time: 10_303_000 picoseconds. - Weight::from_parts(12_180_080, 110487) - // Standard Error: 286 - .saturating_add(Weight::from_parts(16_437, 0).saturating_mul(s.into())) + // Minimum execution time: 9_446_000 picoseconds. + Weight::from_parts(10_797_672, 110487) + // Standard Error: 184 + .saturating_add(Weight::from_parts(13_971, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -477,8 +479,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `90705` // Estimated: `110487` - // Minimum execution time: 156_198_000 picoseconds. - Weight::from_parts(167_250_000, 110487) + // Minimum execution time: 137_044_000 picoseconds. + Weight::from_parts(142_855_000, 110487) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -492,8 +494,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `91747` // Estimated: `110487` - // Minimum execution time: 169_418_000 picoseconds. - Weight::from_parts(176_781_000, 110487) + // Minimum execution time: 144_333_000 picoseconds. + Weight::from_parts(149_251_000, 110487) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -505,8 +507,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `90717` // Estimated: `110487` - // Minimum execution time: 154_106_000 picoseconds. - Weight::from_parts(166_893_000, 110487) + // Minimum execution time: 132_387_000 picoseconds. + Weight::from_parts(139_222_000, 110487) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -520,8 +522,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `91759` // Estimated: `110487` - // Minimum execution time: 167_121_000 picoseconds. - Weight::from_parts(175_510_000, 110487) + // Minimum execution time: 141_082_000 picoseconds. + Weight::from_parts(146_117_000, 110487) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/substrate/frame/scored-pool/Cargo.toml b/substrate/frame/scored-pool/Cargo.toml index 227868fa2a4f..d945ef42a47b 100644 --- a/substrate/frame/scored-pool/Cargo.toml +++ b/substrate/frame/scored-pool/Cargo.toml @@ -17,9 +17,9 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } -scale-info = { features = ["derive"], workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } diff --git a/substrate/frame/session/Cargo.toml b/substrate/frame/session/Cargo.toml index 737678bea8a3..b82112681e67 100644 --- a/substrate/frame/session/Cargo.toml +++ b/substrate/frame/session/Cargo.toml @@ -17,19 +17,19 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { features = ["derive"], workspace = true } -frame-support = { workspace = true } -frame-system = { workspace = true } impl-trait-for-tuples = { workspace = true } log = { workspace = true } -pallet-timestamp = { workspace = true } scale-info = { features = ["derive", "serde"], workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +pallet-timestamp = { workspace = true } sp-core = { features = ["serde"], workspace = true } sp-io = { workspace = true } sp-runtime = { features = ["serde"], workspace = true } sp-session = { workspace = true } sp-staking = { features = ["serde"], workspace = true } -sp-state-machine = { workspace = true } sp-trie = { optional = true, workspace = true } +sp-state-machine = { workspace = true } [features] default = ["historical", "std"] diff --git a/substrate/frame/session/benchmarking/Cargo.toml b/substrate/frame/session/benchmarking/Cargo.toml index 72e4b3deabfd..264bc10a33f6 100644 --- a/substrate/frame/session/benchmarking/Cargo.toml +++ b/substrate/frame/session/benchmarking/Cargo.toml @@ -17,22 +17,22 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { workspace = true } +rand = { features = ["std_rng"], workspace = true } frame-benchmarking = { workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } pallet-session = { workspace = true } pallet-staking = { workspace = true } -rand = { features = ["std_rng"], workspace = true } sp-runtime = { workspace = true } sp-session = { workspace = true } [dev-dependencies] codec = { features = ["derive"], workspace = true, default-features = true } +scale-info = { workspace = true, default-features = true } frame-election-provider-support = { workspace = true, default-features = true } pallet-balances = { workspace = true, default-features = true } pallet-staking-reward-curve = { workspace = true, default-features = true } pallet-timestamp = { workspace = true, default-features = true } -scale-info = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } sp-io = { workspace = true, default-features = true } diff --git a/substrate/frame/session/benchmarking/src/inner.rs b/substrate/frame/session/benchmarking/src/inner.rs index 9789b6bb593d..9ba47b34ed7a 100644 --- a/substrate/frame/session/benchmarking/src/inner.rs +++ b/substrate/frame/session/benchmarking/src/inner.rs @@ -22,7 +22,7 @@ use alloc::{vec, vec::Vec}; use sp_runtime::traits::{One, StaticLookup, TrailingZeroInput}; use codec::Decode; -use frame_benchmarking::v2::*; +use frame_benchmarking::v1::benchmarks; use frame_support::traits::{Get, KeyOwnerProofSystem, OnInitialize}; use frame_system::{pallet_prelude::BlockNumberFor, RawOrigin}; use pallet_session::{historical::Pallet as Historical, Pallet as Session, *}; @@ -45,12 +45,8 @@ impl OnInitialize> for Pallet { } } -#[benchmarks] -mod benchmarks { - use super::*; - - #[benchmark] - fn set_keys() -> Result<(), BenchmarkError> { +benchmarks! { + set_keys { let n = MaxNominationsOf::::get(); let (v_stash, _) = create_validator_with_nominators::( n, @@ -62,19 +58,13 @@ mod benchmarks { let v_controller = pallet_staking::Pallet::::bonded(&v_stash).ok_or("not stash")?; let keys = T::Keys::decode(&mut TrailingZeroInput::zeroes()).unwrap(); - let proof: Vec = vec![0, 1, 2, 3]; + let proof: Vec = vec![0,1,2,3]; // Whitelist controller account from further DB operations. let v_controller_key = frame_system::Account::::hashed_key_for(&v_controller); frame_benchmarking::benchmarking::add_to_whitelist(v_controller_key.into()); + }: _(RawOrigin::Signed(v_controller), keys, proof) - #[extrinsic_call] - _(RawOrigin::Signed(v_controller), keys, proof); - - Ok(()) - } - - #[benchmark] - fn purge_keys() -> Result<(), BenchmarkError> { + purge_keys { let n = MaxNominationsOf::::get(); let (v_stash, _) = create_validator_with_nominators::( n, @@ -85,33 +75,30 @@ mod benchmarks { )?; let v_controller = pallet_staking::Pallet::::bonded(&v_stash).ok_or("not stash")?; let keys = T::Keys::decode(&mut TrailingZeroInput::zeroes()).unwrap(); - let proof: Vec = vec![0, 1, 2, 3]; + let proof: Vec = vec![0,1,2,3]; Session::::set_keys(RawOrigin::Signed(v_controller.clone()).into(), keys, proof)?; // Whitelist controller account from further DB operations. let v_controller_key = frame_system::Account::::hashed_key_for(&v_controller); frame_benchmarking::benchmarking::add_to_whitelist(v_controller_key.into()); + }: _(RawOrigin::Signed(v_controller)) - #[extrinsic_call] - _(RawOrigin::Signed(v_controller)); + #[extra] + check_membership_proof_current_session { + let n in 2 .. MAX_VALIDATORS as u32; - Ok(()) - } - - #[benchmark(extra)] - fn check_membership_proof_current_session(n: Linear<2, MAX_VALIDATORS>) { let (key, key_owner_proof1) = check_membership_proof_setup::(n); let key_owner_proof2 = key_owner_proof1.clone(); - - #[block] - { - Historical::::check_proof(key, key_owner_proof1); - } - + }: { + Historical::::check_proof(key, key_owner_proof1); + } + verify { assert!(Historical::::check_proof(key, key_owner_proof2).is_some()); } - #[benchmark(extra)] - fn check_membership_proof_historical_session(n: Linear<2, MAX_VALIDATORS>) { + #[extra] + check_membership_proof_historical_session { + let n in 2 .. MAX_VALIDATORS as u32; + let (key, key_owner_proof1) = check_membership_proof_setup::(n); // skip to the next session so that the session is historical @@ -119,21 +106,14 @@ mod benchmarks { Session::::rotate_session(); let key_owner_proof2 = key_owner_proof1.clone(); - - #[block] - { - Historical::::check_proof(key, key_owner_proof1); - } - + }: { + Historical::::check_proof(key, key_owner_proof1); + } + verify { assert!(Historical::::check_proof(key, key_owner_proof2).is_some()); } - impl_benchmark_test_suite!( - Pallet, - crate::mock::new_test_ext(), - crate::mock::Test, - extra = false - ); + impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::Test, extra = false); } /// Sets up the benchmark for checking a membership proof. It creates the given diff --git a/substrate/frame/session/benchmarking/src/mock.rs b/substrate/frame/session/benchmarking/src/mock.rs index 346cd04c0fa9..2aec58cceded 100644 --- a/substrate/frame/session/benchmarking/src/mock.rs +++ b/substrate/frame/session/benchmarking/src/mock.rs @@ -27,7 +27,7 @@ use frame_support::{ derive_impl, parameter_types, traits::{ConstU32, ConstU64}, }; -use sp_runtime::{traits::IdentityLookup, BuildStorage, KeyTypeId}; +use sp_runtime::{traits::IdentityLookup, BuildStorage}; type AccountId = u64; type Nonce = u32; @@ -42,7 +42,6 @@ frame_support::construct_runtime!( Balances: pallet_balances, Staking: pallet_staking, Session: pallet_session, - Historical: pallet_session::historical } ); @@ -80,8 +79,7 @@ sp_runtime::impl_opaque_keys! { pub struct TestSessionHandler; impl pallet_session::SessionHandler for TestSessionHandler { - // corresponds to the opaque key id above - const KEY_TYPE_IDS: &'static [KeyTypeId] = &[KeyTypeId([100u8, 117u8, 109u8, 121u8])]; + const KEY_TYPE_IDS: &'static [sp_runtime::KeyTypeId] = &[]; fn on_genesis_session(_validators: &[(AccountId, Ks)]) {} diff --git a/substrate/frame/session/src/lib.rs b/substrate/frame/session/src/lib.rs index e8b4a355f49a..325758d54dd8 100644 --- a/substrate/frame/session/src/lib.rs +++ b/substrate/frame/session/src/lib.rs @@ -127,8 +127,8 @@ use frame_support::{ dispatch::DispatchResult, ensure, traits::{ - Defensive, EstimateNextNewSession, EstimateNextSessionRotation, FindAuthor, Get, - OneSessionHandler, ValidatorRegistration, ValidatorSet, + EstimateNextNewSession, EstimateNextSessionRotation, FindAuthor, Get, OneSessionHandler, + ValidatorRegistration, ValidatorSet, }, weights::Weight, Parameter, @@ -735,23 +735,6 @@ impl Pallet { }) } - /// Re-enable the validator of index `i`, returns `false` if the validator was already enabled. - pub fn enable_index(i: u32) -> bool { - if i >= Validators::::decode_len().defensive_unwrap_or(0) as u32 { - return false - } - - // If the validator is not disabled, return false. - DisabledValidators::::mutate(|disabled| { - if let Ok(index) = disabled.binary_search(&i) { - disabled.remove(index); - true - } else { - false - } - }) - } - /// Disable the validator identified by `c`. (If using with the staking pallet, /// this would be their *stash* account.) /// diff --git a/substrate/frame/session/src/weights.rs b/substrate/frame/session/src/weights.rs index a52db0645701..2908a7563f07 100644 --- a/substrate/frame/session/src/weights.rs +++ b/substrate/frame/session/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_session` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-04-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -66,10 +66,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Session::KeyOwner` (`max_values`: None, `max_size`: None, mode: `Measured`) fn set_keys() -> Weight { // Proof Size summary in bytes: - // Measured: `1952` - // Estimated: `17792` - // Minimum execution time: 68_425_000 picoseconds. - Weight::from_parts(69_632_000, 17792) + // Measured: `1919` + // Estimated: `17759` + // Minimum execution time: 58_466_000 picoseconds. + Weight::from_parts(59_558_000, 17759) .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } @@ -81,10 +81,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Session::KeyOwner` (`max_values`: None, `max_size`: None, mode: `Measured`) fn purge_keys() -> Weight { // Proof Size summary in bytes: - // Measured: `1850` - // Estimated: `5315` - // Minimum execution time: 49_086_000 picoseconds. - Weight::from_parts(50_131_000, 5315) + // Measured: `1817` + // Estimated: `5282` + // Minimum execution time: 41_730_000 picoseconds. + Weight::from_parts(42_476_000, 5282) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } @@ -100,10 +100,10 @@ impl WeightInfo for () { /// Proof: `Session::KeyOwner` (`max_values`: None, `max_size`: None, mode: `Measured`) fn set_keys() -> Weight { // Proof Size summary in bytes: - // Measured: `1952` - // Estimated: `17792` - // Minimum execution time: 68_425_000 picoseconds. - Weight::from_parts(69_632_000, 17792) + // Measured: `1919` + // Estimated: `17759` + // Minimum execution time: 58_466_000 picoseconds. + Weight::from_parts(59_558_000, 17759) .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } @@ -115,10 +115,10 @@ impl WeightInfo for () { /// Proof: `Session::KeyOwner` (`max_values`: None, `max_size`: None, mode: `Measured`) fn purge_keys() -> Weight { // Proof Size summary in bytes: - // Measured: `1850` - // Estimated: `5315` - // Minimum execution time: 49_086_000 picoseconds. - Weight::from_parts(50_131_000, 5315) + // Measured: `1817` + // Estimated: `5282` + // Minimum execution time: 41_730_000 picoseconds. + Weight::from_parts(42_476_000, 5282) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } diff --git a/substrate/frame/society/Cargo.toml b/substrate/frame/society/Cargo.toml index d5860518fdda..555dee68ba01 100644 --- a/substrate/frame/society/Cargo.toml +++ b/substrate/frame/society/Cargo.toml @@ -16,17 +16,17 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { features = ["derive"], workspace = true } log = { workspace = true } rand_chacha = { workspace = true } scale-info = { features = ["derive"], workspace = true } +codec = { features = ["derive"], workspace = true } +sp-io = { workspace = true } +sp-arithmetic = { workspace = true } +sp-runtime = { workspace = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } -sp-arithmetic = { workspace = true } -sp-io = { workspace = true } -sp-runtime = { workspace = true } [dev-dependencies] frame-support-test = { workspace = true } diff --git a/substrate/frame/society/src/benchmarking.rs b/substrate/frame/society/src/benchmarking.rs index dc8e3cab775f..8c3d2bf32ce7 100644 --- a/substrate/frame/society/src/benchmarking.rs +++ b/substrate/frame/society/src/benchmarking.rs @@ -21,7 +21,7 @@ use super::*; -use frame_benchmarking::v2::*; +use frame_benchmarking::{account, benchmarks_instance_pallet, whitelisted_caller}; use frame_system::RawOrigin; use alloc::vec; @@ -111,57 +111,42 @@ fn increment_round, I: 'static>() { RoundCount::::put(round_count); } -#[instance_benchmarks] -mod benchmarks { - use super::*; - - #[benchmark] - fn bid() -> Result<(), BenchmarkError> { - setup_society::()?; +benchmarks_instance_pallet! { + bid { + let founder = setup_society::()?; let caller: T::AccountId = whitelisted_caller(); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); - - #[extrinsic_call] - _(RawOrigin::Signed(caller.clone()), 10u32.into()); - + }: _(RawOrigin::Signed(caller.clone()), 10u32.into()) + verify { let first_bid: Bid> = Bid { who: caller.clone(), kind: BidKind::Deposit(mock_balance_deposit::()), value: 10u32.into(), }; assert_eq!(Bids::::get(), vec![first_bid]); - Ok(()) } - #[benchmark] - fn unbid() -> Result<(), BenchmarkError> { - setup_society::()?; + unbid { + let founder = setup_society::()?; let caller: T::AccountId = whitelisted_caller(); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); let mut bids = Bids::::get(); Society::::insert_bid(&mut bids, &caller, 10u32.into(), make_bid::(&caller)); Bids::::put(bids); - - #[extrinsic_call] - _(RawOrigin::Signed(caller.clone())); - + }: _(RawOrigin::Signed(caller.clone())) + verify { assert_eq!(Bids::::get(), vec![]); - Ok(()) } - #[benchmark] - fn vouch() -> Result<(), BenchmarkError> { - setup_society::()?; + vouch { + let founder = setup_society::()?; let caller: T::AccountId = whitelisted_caller(); let vouched: T::AccountId = account("vouched", 0, 0); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); let _ = Society::::insert_member(&caller, 1u32.into()); - let vouched_lookup: ::Source = - T::Lookup::unlookup(vouched.clone()); - - #[extrinsic_call] - _(RawOrigin::Signed(caller.clone()), vouched_lookup, 0u32.into(), 0u32.into()); - + let vouched_lookup: ::Source = T::Lookup::unlookup(vouched.clone()); + }: _(RawOrigin::Signed(caller.clone()), vouched_lookup, 0u32.into(), 0u32.into()) + verify { let bids = Bids::::get(); let vouched_bid: Bid> = Bid { who: vouched.clone(), @@ -169,328 +154,207 @@ mod benchmarks { value: 0u32.into(), }; assert_eq!(bids, vec![vouched_bid]); - Ok(()) } - #[benchmark] - fn unvouch() -> Result<(), BenchmarkError> { - setup_society::()?; + unvouch { + let founder = setup_society::()?; let caller: T::AccountId = whitelisted_caller(); + let vouched: T::AccountId = account("vouched", 0, 0); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); let mut bids = Bids::::get(); - Society::::insert_bid( - &mut bids, - &caller, - 10u32.into(), - BidKind::Vouch(caller.clone(), 0u32.into()), - ); + Society::::insert_bid(&mut bids, &caller, 10u32.into(), BidKind::Vouch(caller.clone(), 0u32.into())); Bids::::put(bids); - - #[extrinsic_call] - _(RawOrigin::Signed(caller.clone())); - + }: _(RawOrigin::Signed(caller.clone())) + verify { assert_eq!(Bids::::get(), vec![]); - Ok(()) } - #[benchmark] - fn vote() -> Result<(), BenchmarkError> { - setup_society::()?; + vote { + let founder = setup_society::()?; let caller: T::AccountId = whitelisted_caller(); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); let _ = Society::::insert_member(&caller, 1u32.into()); let candidate = add_candidate::("candidate", Default::default(), false); - let candidate_lookup: ::Source = - T::Lookup::unlookup(candidate.clone()); - - #[extrinsic_call] - _(RawOrigin::Signed(caller.clone()), candidate_lookup, true); - + let candidate_lookup: ::Source = T::Lookup::unlookup(candidate.clone()); + }: _(RawOrigin::Signed(caller.clone()), candidate_lookup, true) + verify { let maybe_vote: Vote = >::get(candidate.clone(), caller).unwrap(); assert_eq!(maybe_vote.approve, true); - Ok(()) } - #[benchmark] - fn defender_vote() -> Result<(), BenchmarkError> { - setup_society::()?; + defender_vote { + let founder = setup_society::()?; let caller: T::AccountId = whitelisted_caller(); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); let _ = Society::::insert_member(&caller, 1u32.into()); let defender: T::AccountId = account("defender", 0, 0); Defending::::put((defender, caller.clone(), Tally::default())); - - #[extrinsic_call] - _(RawOrigin::Signed(caller.clone()), false); - + }: _(RawOrigin::Signed(caller.clone()), false) + verify { let round = RoundCount::::get(); let skeptic_vote: Vote = DefenderVotes::::get(round, &caller).unwrap(); assert_eq!(skeptic_vote.approve, false); - Ok(()) } - #[benchmark] - fn payout() -> Result<(), BenchmarkError> { - setup_funded_society::()?; + payout { + let founder = setup_funded_society::()?; // Payee's account already exists and is a member. let caller: T::AccountId = whitelisted_caller(); T::Currency::make_free_balance_be(&caller, mock_balance_deposit::()); let _ = Society::::insert_member(&caller, 0u32.into()); // Introduce payout. Society::::bump_payout(&caller, 0u32.into(), 1u32.into()); - - #[extrinsic_call] - _(RawOrigin::Signed(caller.clone())); - + }: _(RawOrigin::Signed(caller.clone())) + verify { let record = Payouts::::get(caller); assert!(record.payouts.is_empty()); - Ok(()) } - #[benchmark] - fn waive_repay() -> Result<(), BenchmarkError> { - setup_funded_society::()?; + waive_repay { + let founder = setup_funded_society::()?; let caller: T::AccountId = whitelisted_caller(); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); let _ = Society::::insert_member(&caller, 0u32.into()); Society::::bump_payout(&caller, 0u32.into(), 1u32.into()); - - #[extrinsic_call] - _(RawOrigin::Signed(caller.clone()), 1u32.into()); - + }: _(RawOrigin::Signed(caller.clone()), 1u32.into()) + verify { let record = Payouts::::get(caller); assert!(record.payouts.is_empty()); - Ok(()) } - #[benchmark] - fn found_society() -> Result<(), BenchmarkError> { + found_society { let founder: T::AccountId = whitelisted_caller(); let can_found = T::FounderSetOrigin::try_successful_origin().map_err(|_| "No origin")?; - let founder_lookup: ::Source = - T::Lookup::unlookup(founder.clone()); - - #[extrinsic_call] - _( - can_found as T::RuntimeOrigin, - founder_lookup, - 5, - 3, - 3, - mock_balance_deposit::(), - b"benchmarking-society".to_vec(), - ); - + let founder_lookup: ::Source = T::Lookup::unlookup(founder.clone()); + }: _(can_found, founder_lookup, 5, 3, 3, mock_balance_deposit::(), b"benchmarking-society".to_vec()) + verify { assert_eq!(Founder::::get(), Some(founder.clone())); - Ok(()) } - #[benchmark] - fn dissolve() -> Result<(), BenchmarkError> { + dissolve { let founder = setup_society::()?; let members_and_candidates = vec![("m1", "c1"), ("m2", "c2"), ("m3", "c3"), ("m4", "c4")]; let members_count = members_and_candidates.clone().len() as u32; for (m, c) in members_and_candidates { let member: T::AccountId = account(m, 0, 0); let _ = Society::::insert_member(&member, 100u32.into()); - let candidate = add_candidate::( - c, - Tally { approvals: 1u32.into(), rejections: 1u32.into() }, - false, - ); - let candidate_lookup: ::Source = - T::Lookup::unlookup(candidate); + let candidate = add_candidate::(c, Tally { approvals: 1u32.into(), rejections: 1u32.into() }, false); + let candidate_lookup: ::Source = T::Lookup::unlookup(candidate); let _ = Society::::vote(RawOrigin::Signed(member).into(), candidate_lookup, true); } // Leaving only Founder member. - MemberCount::::mutate(|i| i.saturating_reduce(members_count)); - - #[extrinsic_call] - _(RawOrigin::Signed(founder)); - + MemberCount::::mutate(|i| { i.saturating_reduce(members_count) }); + }: _(RawOrigin::Signed(founder)) + verify { assert_eq!(Founder::::get(), None); - Ok(()) } - #[benchmark] - fn judge_suspended_member() -> Result<(), BenchmarkError> { + judge_suspended_member { let founder = setup_society::()?; let caller: T::AccountId = whitelisted_caller(); - let caller_lookup: ::Source = - T::Lookup::unlookup(caller.clone()); + let caller_lookup: ::Source = T::Lookup::unlookup(caller.clone()); let _ = Society::::insert_member(&caller, 0u32.into()); let _ = Society::::suspend_member(&caller); - - #[extrinsic_call] - _(RawOrigin::Signed(founder), caller_lookup, false); - + }: _(RawOrigin::Signed(founder), caller_lookup, false) + verify { assert_eq!(SuspendedMembers::::contains_key(&caller), false); - Ok(()) } - #[benchmark] - fn set_parameters() -> Result<(), BenchmarkError> { + set_parameters { let founder = setup_society::()?; let max_members = 10u32; let max_intake = 10u32; let max_strikes = 10u32; let candidate_deposit: BalanceOf = 10u32.into(); let params = GroupParams { max_members, max_intake, max_strikes, candidate_deposit }; - - #[extrinsic_call] - _(RawOrigin::Signed(founder), max_members, max_intake, max_strikes, candidate_deposit); - + }: _(RawOrigin::Signed(founder), max_members, max_intake, max_strikes, candidate_deposit) + verify { assert_eq!(Parameters::::get(), Some(params)); - Ok(()) } - #[benchmark] - fn punish_skeptic() -> Result<(), BenchmarkError> { - setup_society::()?; + punish_skeptic { + let founder = setup_society::()?; let candidate = add_candidate::("candidate", Default::default(), false); let skeptic: T::AccountId = account("skeptic", 0, 0); let _ = Society::::insert_member(&skeptic, 0u32.into()); Skeptic::::put(&skeptic); if let Period::Voting { more, .. } = Society::::period() { - frame_system::Pallet::::set_block_number( - frame_system::Pallet::::block_number() + more, - ); + frame_system::Pallet::::set_block_number(frame_system::Pallet::::block_number() + more); } - - #[extrinsic_call] - _(RawOrigin::Signed(candidate.clone())); - + }: _(RawOrigin::Signed(candidate.clone())) + verify { let candidacy = Candidates::::get(&candidate).unwrap(); assert_eq!(candidacy.skeptic_struck, true); - Ok(()) } - #[benchmark] - fn claim_membership() -> Result<(), BenchmarkError> { - setup_society::()?; - let candidate = add_candidate::( - "candidate", - Tally { approvals: 3u32.into(), rejections: 0u32.into() }, - false, - ); + claim_membership { + let founder = setup_society::()?; + let candidate = add_candidate::("candidate", Tally { approvals: 3u32.into(), rejections: 0u32.into() }, false); increment_round::(); - - #[extrinsic_call] - _(RawOrigin::Signed(candidate.clone())); - + }: _(RawOrigin::Signed(candidate.clone())) + verify { assert!(!Candidates::::contains_key(&candidate)); assert!(Members::::contains_key(&candidate)); - Ok(()) } - #[benchmark] - fn bestow_membership() -> Result<(), BenchmarkError> { + bestow_membership { let founder = setup_society::()?; - let candidate = add_candidate::( - "candidate", - Tally { approvals: 3u32.into(), rejections: 1u32.into() }, - false, - ); + let candidate = add_candidate::("candidate", Tally { approvals: 3u32.into(), rejections: 1u32.into() }, false); increment_round::(); - - #[extrinsic_call] - _(RawOrigin::Signed(founder), candidate.clone()); - + }: _(RawOrigin::Signed(founder), candidate.clone()) + verify { assert!(!Candidates::::contains_key(&candidate)); assert!(Members::::contains_key(&candidate)); - Ok(()) } - #[benchmark] - fn kick_candidate() -> Result<(), BenchmarkError> { + kick_candidate { let founder = setup_society::()?; - let candidate = add_candidate::( - "candidate", - Tally { approvals: 1u32.into(), rejections: 1u32.into() }, - false, - ); + let candidate = add_candidate::("candidate", Tally { approvals: 1u32.into(), rejections: 1u32.into() }, false); increment_round::(); - - #[extrinsic_call] - _(RawOrigin::Signed(founder), candidate.clone()); - + }: _(RawOrigin::Signed(founder), candidate.clone()) + verify { assert!(!Candidates::::contains_key(&candidate)); - Ok(()) } - #[benchmark] - fn resign_candidacy() -> Result<(), BenchmarkError> { - setup_society::()?; - let candidate = add_candidate::( - "candidate", - Tally { approvals: 0u32.into(), rejections: 0u32.into() }, - false, - ); - - #[extrinsic_call] - _(RawOrigin::Signed(candidate.clone())); - + resign_candidacy { + let founder = setup_society::()?; + let candidate = add_candidate::("candidate", Tally { approvals: 0u32.into(), rejections: 0u32.into() }, false); + }: _(RawOrigin::Signed(candidate.clone())) + verify { assert!(!Candidates::::contains_key(&candidate)); - Ok(()) } - #[benchmark] - fn drop_candidate() -> Result<(), BenchmarkError> { - setup_society::()?; - let candidate = add_candidate::( - "candidate", - Tally { approvals: 0u32.into(), rejections: 3u32.into() }, - false, - ); + drop_candidate { + let founder = setup_society::()?; + let candidate = add_candidate::("candidate", Tally { approvals: 0u32.into(), rejections: 3u32.into() }, false); let caller: T::AccountId = whitelisted_caller(); let _ = Society::::insert_member(&caller, 0u32.into()); let mut round_count = RoundCount::::get(); round_count = round_count.saturating_add(2u32); RoundCount::::put(round_count); - - #[extrinsic_call] - _(RawOrigin::Signed(caller), candidate.clone()); - + }: _(RawOrigin::Signed(caller), candidate.clone()) + verify { assert!(!Candidates::::contains_key(&candidate)); - Ok(()) } - #[benchmark] - fn cleanup_candidacy() -> Result<(), BenchmarkError> { - setup_society::()?; - let candidate = add_candidate::( - "candidate", - Tally { approvals: 0u32.into(), rejections: 0u32.into() }, - false, - ); + cleanup_candidacy { + let founder = setup_society::()?; + let candidate = add_candidate::("candidate", Tally { approvals: 0u32.into(), rejections: 0u32.into() }, false); let member_one: T::AccountId = account("one", 0, 0); let member_two: T::AccountId = account("two", 0, 0); let _ = Society::::insert_member(&member_one, 0u32.into()); let _ = Society::::insert_member(&member_two, 0u32.into()); - let candidate_lookup: ::Source = - T::Lookup::unlookup(candidate.clone()); - let _ = Society::::vote( - RawOrigin::Signed(member_one.clone()).into(), - candidate_lookup.clone(), - true, - ); - let _ = Society::::vote( - RawOrigin::Signed(member_two.clone()).into(), - candidate_lookup, - true, - ); + let candidate_lookup: ::Source = T::Lookup::unlookup(candidate.clone()); + let _ = Society::::vote(RawOrigin::Signed(member_one.clone()).into(), candidate_lookup.clone(), true); + let _ = Society::::vote(RawOrigin::Signed(member_two.clone()).into(), candidate_lookup, true); Candidates::::remove(&candidate); - - #[extrinsic_call] - _(RawOrigin::Signed(member_one), candidate.clone(), 5); - + }: _(RawOrigin::Signed(member_one), candidate.clone(), 5) + verify { assert_eq!(Votes::::get(&candidate, &member_two), None); - Ok(()) } - #[benchmark] - fn cleanup_challenge() -> Result<(), BenchmarkError> { - setup_society::()?; + cleanup_challenge { + let founder = setup_society::()?; ChallengeRoundCount::::put(1u32); let member: T::AccountId = whitelisted_caller(); let _ = Society::::insert_member(&member, 0u32.into()); @@ -500,12 +364,9 @@ mod benchmarks { ChallengeRoundCount::::put(2u32); let mut challenge_round = ChallengeRoundCount::::get(); challenge_round = challenge_round.saturating_sub(1u32); - - #[extrinsic_call] - _(RawOrigin::Signed(member.clone()), challenge_round, 1u32); - + }: _(RawOrigin::Signed(member.clone()), challenge_round, 1u32) + verify { assert_eq!(DefenderVotes::::get(challenge_round, &defender), None); - Ok(()) } impl_benchmark_test_suite!( diff --git a/substrate/frame/society/src/lib.rs b/substrate/frame/society/src/lib.rs index b893bb6fba7d..04879cd87091 100644 --- a/substrate/frame/society/src/lib.rs +++ b/substrate/frame/society/src/lib.rs @@ -297,14 +297,14 @@ type NegativeImbalanceOf = <>::Currency as Currency< >>::NegativeImbalance; type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; -#[derive(Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)] +#[derive(Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo)] pub struct Vote { approve: bool, weight: u32, } /// A judgement by the suspension judgement origin on a suspended candidate. -#[derive(Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)] +#[derive(Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo)] pub enum Judgement { /// The suspension judgement origin takes no direct judgment /// and places the candidate back into the bid pool. @@ -316,9 +316,7 @@ pub enum Judgement { } /// Details of a payout given as a per-block linear "trickle". -#[derive( - Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug, Default, TypeInfo, MaxEncodedLen, -)] +#[derive(Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug, Default, TypeInfo)] pub struct Payout { /// Total value of the payout. value: Balance, @@ -331,7 +329,7 @@ pub struct Payout { } /// Status of a vouching member. -#[derive(Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)] +#[derive(Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo)] pub enum VouchingStatus { /// Member is currently vouching for a user. Vouching, @@ -343,7 +341,7 @@ pub enum VouchingStatus { pub type StrikeCount = u32; /// A bid for entry into society. -#[derive(Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)] +#[derive(Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo)] pub struct Bid { /// The bidder/candidate trying to enter society who: AccountId, @@ -363,9 +361,7 @@ pub type Rank = u32; pub type VoteCount = u32; /// Tally of votes. -#[derive( - Default, Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen, -)] +#[derive(Default, Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo)] pub struct Tally { /// The approval votes. approvals: VoteCount, @@ -392,7 +388,7 @@ impl Tally { } /// A bid for entry into society. -#[derive(Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)] +#[derive(Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo)] pub struct Candidacy { /// The index of the round where the candidacy began. round: RoundIndex, @@ -407,7 +403,7 @@ pub struct Candidacy { } /// A vote by a member on a candidate application. -#[derive(Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)] +#[derive(Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo)] pub enum BidKind { /// The given deposit was paid for this bid. Deposit(Balance), @@ -426,7 +422,7 @@ pub type PayoutsFor = BoundedVec<(BlockNumberFor, BalanceOf), >::MaxPayouts>; /// Information concerning a member. -#[derive(Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)] +#[derive(Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo)] pub struct MemberRecord { rank: Rank, strikes: StrikeCount, @@ -435,7 +431,7 @@ pub struct MemberRecord { } /// Information concerning a member. -#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo, Default, MaxEncodedLen)] +#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo, Default)] pub struct PayoutRecord { paid: Balance, payouts: PayoutsVec, @@ -447,7 +443,7 @@ pub type PayoutRecordFor = PayoutRecord< >; /// Record for an individual new member who was elevated from a candidate recently. -#[derive(Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)] +#[derive(Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo)] pub struct IntakeRecord { who: AccountId, bid: Balance, @@ -457,7 +453,7 @@ pub struct IntakeRecord { pub type IntakeRecordFor = IntakeRecord<::AccountId, BalanceOf>; -#[derive(Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)] +#[derive(Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo)] pub struct GroupParams { max_members: u32, max_intake: u32, @@ -475,6 +471,7 @@ pub mod pallet { #[pallet::pallet] #[pallet::storage_version(STORAGE_VERSION)] + #[pallet::without_storage_info] pub struct Pallet(_); #[pallet::config] diff --git a/substrate/frame/society/src/weights.rs b/substrate/frame/society/src/weights.rs index f6f59d20d659..17ff0318f6a6 100644 --- a/substrate/frame/society/src/weights.rs +++ b/substrate/frame/society/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_society` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-04-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -90,8 +90,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `444` // Estimated: `3909` - // Minimum execution time: 37_812_000 picoseconds. - Weight::from_parts(38_375_000, 3909) + // Minimum execution time: 31_464_000 picoseconds. + Weight::from_parts(32_533_000, 3909) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -101,8 +101,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `461` // Estimated: `1946` - // Minimum execution time: 28_526_000 picoseconds. - Weight::from_parts(29_680_000, 1946) + // Minimum execution time: 24_132_000 picoseconds. + Weight::from_parts(24_936_000, 1946) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -118,8 +118,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `481` // Estimated: `6421` - // Minimum execution time: 28_051_000 picoseconds. - Weight::from_parts(29_088_000, 6421) + // Minimum execution time: 22_568_000 picoseconds. + Weight::from_parts(24_273_000, 6421) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -131,8 +131,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `535` // Estimated: `4000` - // Minimum execution time: 20_861_000 picoseconds. - Weight::from_parts(21_379_000, 4000) + // Minimum execution time: 15_524_000 picoseconds. + Weight::from_parts(16_324_000, 4000) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -146,8 +146,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `569` // Estimated: `4034` - // Minimum execution time: 27_803_000 picoseconds. - Weight::from_parts(28_621_000, 4034) + // Minimum execution time: 22_360_000 picoseconds. + Weight::from_parts(23_318_000, 4034) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -163,8 +163,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `561` // Estimated: `4026` - // Minimum execution time: 24_774_000 picoseconds. - Weight::from_parts(26_040_000, 4026) + // Minimum execution time: 19_457_000 picoseconds. + Weight::from_parts(20_461_000, 4026) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -176,10 +176,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn payout() -> Weight { // Proof Size summary in bytes: - // Measured: `687` - // Estimated: `4152` - // Minimum execution time: 58_072_000 picoseconds. - Weight::from_parts(59_603_000, 4152) + // Measured: `650` + // Estimated: `4115` + // Minimum execution time: 52_032_000 picoseconds. + Weight::from_parts(52_912_000, 4115) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -191,8 +191,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `547` // Estimated: `4012` - // Minimum execution time: 24_809_000 picoseconds. - Weight::from_parts(25_927_000, 4012) + // Minimum execution time: 19_479_000 picoseconds. + Weight::from_parts(20_120_000, 4012) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -214,8 +214,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `180` // Estimated: `1665` - // Minimum execution time: 15_541_000 picoseconds. - Weight::from_parts(15_950_000, 1665) + // Minimum execution time: 15_843_000 picoseconds. + Weight::from_parts(16_617_000, 1665) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } @@ -255,8 +255,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1654` // Estimated: `15019` - // Minimum execution time: 62_275_000 picoseconds. - Weight::from_parts(64_251_000, 15019) + // Minimum execution time: 58_302_000 picoseconds. + Weight::from_parts(59_958_000, 15019) .saturating_add(T::DbWeight::get().reads(20_u64)) .saturating_add(T::DbWeight::get().writes(30_u64)) } @@ -272,8 +272,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `505` // Estimated: `3970` - // Minimum execution time: 25_561_000 picoseconds. - Weight::from_parts(26_796_000, 3970) + // Minimum execution time: 20_044_000 picoseconds. + Weight::from_parts(20_884_000, 3970) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -287,8 +287,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `387` // Estimated: `1872` - // Minimum execution time: 12_183_000 picoseconds. - Weight::from_parts(12_813_000, 1872) + // Minimum execution time: 11_183_000 picoseconds. + Weight::from_parts(11_573_000, 1872) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -308,8 +308,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `636` // Estimated: `4101` - // Minimum execution time: 30_355_000 picoseconds. - Weight::from_parts(31_281_000, 4101) + // Minimum execution time: 24_149_000 picoseconds. + Weight::from_parts(25_160_000, 4101) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -333,8 +333,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `632` // Estimated: `4097` - // Minimum execution time: 43_935_000 picoseconds. - Weight::from_parts(45_511_000, 4097) + // Minimum execution time: 37_992_000 picoseconds. + Weight::from_parts(39_226_000, 4097) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -360,8 +360,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `650` // Estimated: `4115` - // Minimum execution time: 46_043_000 picoseconds. - Weight::from_parts(47_190_000, 4115) + // Minimum execution time: 39_383_000 picoseconds. + Weight::from_parts(40_367_000, 4115) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -377,8 +377,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `776` // Estimated: `6196` - // Minimum execution time: 46_161_000 picoseconds. - Weight::from_parts(47_207_000, 6196) + // Minimum execution time: 40_060_000 picoseconds. + Weight::from_parts(40_836_000, 6196) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -392,8 +392,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `746` // Estimated: `6196` - // Minimum execution time: 43_176_000 picoseconds. - Weight::from_parts(44_714_000, 6196) + // Minimum execution time: 37_529_000 picoseconds. + Weight::from_parts(38_342_000, 6196) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -407,8 +407,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `758` // Estimated: `6196` - // Minimum execution time: 43_972_000 picoseconds. - Weight::from_parts(45_094_000, 6196) + // Minimum execution time: 37_992_000 picoseconds. + Weight::from_parts(39_002_000, 6196) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -422,8 +422,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `552` // Estimated: `6492` - // Minimum execution time: 19_900_000 picoseconds. - Weight::from_parts(20_940_000, 6492) + // Minimum execution time: 17_266_000 picoseconds. + Weight::from_parts(18_255_000, 6492) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -435,8 +435,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `510` // Estimated: `3975` - // Minimum execution time: 14_358_000 picoseconds. - Weight::from_parts(15_014_000, 3975) + // Minimum execution time: 11_636_000 picoseconds. + Weight::from_parts(12_122_000, 3975) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -458,8 +458,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `444` // Estimated: `3909` - // Minimum execution time: 37_812_000 picoseconds. - Weight::from_parts(38_375_000, 3909) + // Minimum execution time: 31_464_000 picoseconds. + Weight::from_parts(32_533_000, 3909) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -469,8 +469,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `461` // Estimated: `1946` - // Minimum execution time: 28_526_000 picoseconds. - Weight::from_parts(29_680_000, 1946) + // Minimum execution time: 24_132_000 picoseconds. + Weight::from_parts(24_936_000, 1946) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -486,8 +486,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `481` // Estimated: `6421` - // Minimum execution time: 28_051_000 picoseconds. - Weight::from_parts(29_088_000, 6421) + // Minimum execution time: 22_568_000 picoseconds. + Weight::from_parts(24_273_000, 6421) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -499,8 +499,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `535` // Estimated: `4000` - // Minimum execution time: 20_861_000 picoseconds. - Weight::from_parts(21_379_000, 4000) + // Minimum execution time: 15_524_000 picoseconds. + Weight::from_parts(16_324_000, 4000) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -514,8 +514,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `569` // Estimated: `4034` - // Minimum execution time: 27_803_000 picoseconds. - Weight::from_parts(28_621_000, 4034) + // Minimum execution time: 22_360_000 picoseconds. + Weight::from_parts(23_318_000, 4034) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -531,8 +531,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `561` // Estimated: `4026` - // Minimum execution time: 24_774_000 picoseconds. - Weight::from_parts(26_040_000, 4026) + // Minimum execution time: 19_457_000 picoseconds. + Weight::from_parts(20_461_000, 4026) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -544,10 +544,10 @@ impl WeightInfo for () { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn payout() -> Weight { // Proof Size summary in bytes: - // Measured: `687` - // Estimated: `4152` - // Minimum execution time: 58_072_000 picoseconds. - Weight::from_parts(59_603_000, 4152) + // Measured: `650` + // Estimated: `4115` + // Minimum execution time: 52_032_000 picoseconds. + Weight::from_parts(52_912_000, 4115) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -559,8 +559,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `547` // Estimated: `4012` - // Minimum execution time: 24_809_000 picoseconds. - Weight::from_parts(25_927_000, 4012) + // Minimum execution time: 19_479_000 picoseconds. + Weight::from_parts(20_120_000, 4012) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -582,8 +582,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `180` // Estimated: `1665` - // Minimum execution time: 15_541_000 picoseconds. - Weight::from_parts(15_950_000, 1665) + // Minimum execution time: 15_843_000 picoseconds. + Weight::from_parts(16_617_000, 1665) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } @@ -623,8 +623,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1654` // Estimated: `15019` - // Minimum execution time: 62_275_000 picoseconds. - Weight::from_parts(64_251_000, 15019) + // Minimum execution time: 58_302_000 picoseconds. + Weight::from_parts(59_958_000, 15019) .saturating_add(RocksDbWeight::get().reads(20_u64)) .saturating_add(RocksDbWeight::get().writes(30_u64)) } @@ -640,8 +640,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `505` // Estimated: `3970` - // Minimum execution time: 25_561_000 picoseconds. - Weight::from_parts(26_796_000, 3970) + // Minimum execution time: 20_044_000 picoseconds. + Weight::from_parts(20_884_000, 3970) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -655,8 +655,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `387` // Estimated: `1872` - // Minimum execution time: 12_183_000 picoseconds. - Weight::from_parts(12_813_000, 1872) + // Minimum execution time: 11_183_000 picoseconds. + Weight::from_parts(11_573_000, 1872) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -676,8 +676,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `636` // Estimated: `4101` - // Minimum execution time: 30_355_000 picoseconds. - Weight::from_parts(31_281_000, 4101) + // Minimum execution time: 24_149_000 picoseconds. + Weight::from_parts(25_160_000, 4101) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -701,8 +701,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `632` // Estimated: `4097` - // Minimum execution time: 43_935_000 picoseconds. - Weight::from_parts(45_511_000, 4097) + // Minimum execution time: 37_992_000 picoseconds. + Weight::from_parts(39_226_000, 4097) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -728,8 +728,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `650` // Estimated: `4115` - // Minimum execution time: 46_043_000 picoseconds. - Weight::from_parts(47_190_000, 4115) + // Minimum execution time: 39_383_000 picoseconds. + Weight::from_parts(40_367_000, 4115) .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -745,8 +745,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `776` // Estimated: `6196` - // Minimum execution time: 46_161_000 picoseconds. - Weight::from_parts(47_207_000, 6196) + // Minimum execution time: 40_060_000 picoseconds. + Weight::from_parts(40_836_000, 6196) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -760,8 +760,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `746` // Estimated: `6196` - // Minimum execution time: 43_176_000 picoseconds. - Weight::from_parts(44_714_000, 6196) + // Minimum execution time: 37_529_000 picoseconds. + Weight::from_parts(38_342_000, 6196) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -775,8 +775,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `758` // Estimated: `6196` - // Minimum execution time: 43_972_000 picoseconds. - Weight::from_parts(45_094_000, 6196) + // Minimum execution time: 37_992_000 picoseconds. + Weight::from_parts(39_002_000, 6196) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -790,8 +790,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `552` // Estimated: `6492` - // Minimum execution time: 19_900_000 picoseconds. - Weight::from_parts(20_940_000, 6492) + // Minimum execution time: 17_266_000 picoseconds. + Weight::from_parts(18_255_000, 6492) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -803,8 +803,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `510` // Estimated: `3975` - // Minimum execution time: 14_358_000 picoseconds. - Weight::from_parts(15_014_000, 3975) + // Minimum execution time: 11_636_000 picoseconds. + Weight::from_parts(12_122_000, 3975) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/substrate/frame/src/lib.rs b/substrate/frame/src/lib.rs index b3e340cbcbff..0ca36ca8545a 100644 --- a/substrate/frame/src/lib.rs +++ b/substrate/frame/src/lib.rs @@ -203,18 +203,12 @@ pub mod prelude { /// Dispatch types from `frame-support`, other fundamental traits #[doc(no_inline)] pub use frame_support::dispatch::{GetDispatchInfo, PostDispatchInfo}; - pub use frame_support::traits::{ - Contains, EstimateNextSessionRotation, IsSubType, OnRuntimeUpgrade, OneSessionHandler, - }; + pub use frame_support::traits::{Contains, IsSubType, OnRuntimeUpgrade}; /// Pallet prelude of `frame-system`. #[doc(no_inline)] pub use frame_system::pallet_prelude::*; - /// Transaction related helpers to submit transactions. - #[doc(no_inline)] - pub use frame_system::offchain::*; - /// All FRAME-relevant derive macros. #[doc(no_inline)] pub use super::derive::*; @@ -222,21 +216,16 @@ pub mod prelude { /// All hashing related things pub use super::hashing::*; - /// All arithmetic types and traits used for safe math. - pub use super::arithmetic::*; - /// Runtime traits #[doc(no_inline)] pub use sp_runtime::traits::{ - BlockNumberProvider, Bounded, DispatchInfoOf, Dispatchable, SaturatedConversion, - Saturating, StaticLookup, TrailingZeroInput, + Bounded, DispatchInfoOf, Dispatchable, SaturatedConversion, Saturating, StaticLookup, + TrailingZeroInput, }; - /// Other runtime types and traits + /// Other error/result types for runtime #[doc(no_inline)] - pub use sp_runtime::{ - BoundToRuntimeAppPublic, DispatchErrorWithPostInfo, DispatchResultWithInfo, TokenError, - }; + pub use sp_runtime::{DispatchErrorWithPostInfo, DispatchResultWithInfo, TokenError}; } #[cfg(any(feature = "try-runtime", test))] @@ -402,7 +391,7 @@ pub mod runtime { LOCAL_TESTNET_RUNTIME_PRESET, }; pub use sp_inherents::{CheckInherentsResult, InherentData}; - pub use sp_keyring::Sr25519Keyring; + pub use sp_keyring::AccountKeyring; pub use sp_runtime::{ApplyExtrinsicResult, ExtrinsicInclusionMode}; } @@ -520,8 +509,6 @@ pub mod traits { } /// The arithmetic types used for safe math. -/// -/// This is already part of the [`prelude`]. pub mod arithmetic { pub use sp_arithmetic::{traits::*, *}; } diff --git a/substrate/frame/staking/CHANGELOG.md b/substrate/frame/staking/CHANGELOG.md index 064a7d4a48f4..113b7a6200b6 100644 --- a/substrate/frame/staking/CHANGELOG.md +++ b/substrate/frame/staking/CHANGELOG.md @@ -7,18 +7,6 @@ on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). We maintain a single integer version number for staking pallet to keep track of all storage migrations. -## [v16] - - -### Added - -- New default implementation of `DisablingStrategy` - `UpToLimitWithReEnablingDisablingStrategy`. - Same as `UpToLimitDisablingStrategy` except when a limit (1/3 default) is reached. When limit is - reached the offender is only disabled if his offence is greater or equal than some other already - disabled offender. The smallest possible offender is re-enabled to make space for the new greater - offender. A limit should thus always be respected. -- `DisabledValidators` changed format to include severity of the offence. - ## [v15] ### Added diff --git a/substrate/frame/staking/Cargo.toml b/substrate/frame/staking/Cargo.toml index 22176b6d720b..a6a0ccd3b0a7 100644 --- a/substrate/frame/staking/Cargo.toml +++ b/substrate/frame/staking/Cargo.toml @@ -16,40 +16,40 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] +serde = { features = ["alloc", "derive"], workspace = true } codec = { features = [ "derive", ], workspace = true } -frame-election-provider-support = { workspace = true } +scale-info = { features = ["derive", "serde"], workspace = true } +sp-io = { workspace = true } +sp-runtime = { features = ["serde"], workspace = true } +sp-staking = { features = ["serde"], workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } -log = { workspace = true } -pallet-authorship = { workspace = true } pallet-session = { features = [ "historical", ], workspace = true } -scale-info = { features = ["derive", "serde"], workspace = true } -serde = { features = ["alloc", "derive"], workspace = true } +pallet-authorship = { workspace = true } sp-application-crypto = { features = ["serde"], workspace = true } -sp-io = { workspace = true } -sp-runtime = { features = ["serde"], workspace = true } -sp-staking = { features = ["serde"], workspace = true } +frame-election-provider-support = { workspace = true } +log = { workspace = true } # Optional imports for benchmarking frame-benchmarking = { optional = true, workspace = true } rand_chacha = { optional = true, workspace = true } [dev-dependencies] -frame-benchmarking = { workspace = true, default-features = true } -frame-election-provider-support = { workspace = true, default-features = true } -pallet-bags-list = { workspace = true, default-features = true } pallet-balances = { workspace = true, default-features = true } -pallet-staking-reward-curve = { workspace = true, default-features = true } -pallet-timestamp = { workspace = true, default-features = true } -rand_chacha = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } sp-npos-elections = { workspace = true, default-features = true } -sp-tracing = { workspace = true, default-features = true } +pallet-timestamp = { workspace = true, default-features = true } +pallet-staking-reward-curve = { workspace = true, default-features = true } +pallet-bags-list = { workspace = true, default-features = true } substrate-test-utils = { workspace = true } +frame-benchmarking = { workspace = true, default-features = true } +frame-election-provider-support = { workspace = true, default-features = true } +rand_chacha = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/frame/staking/src/benchmarking.rs b/substrate/frame/staking/src/benchmarking.rs index 79d8dd3fbc30..96bd3860542f 100644 --- a/substrate/frame/staking/src/benchmarking.rs +++ b/substrate/frame/staking/src/benchmarking.rs @@ -708,7 +708,7 @@ mod benchmarks { >::insert( current_era, validator.clone(), - Validators::::get(&validator), + >::validators(&validator), ); let caller = whitelisted_caller(); @@ -975,22 +975,20 @@ mod benchmarks { ) -> Result<(), BenchmarkError> { // number of nominator intention. let n = MaxNominators::::get(); - create_validators_with_nominators_for_era::( - v, - n, - MaxNominationsOf::::get() as usize, - false, - None, - )?; - - let targets; #[block] { - // default bounds are unbounded. - targets = >::get_npos_targets(DataProviderBounds::default()); + create_validators_with_nominators_for_era::( + v, + n, + MaxNominationsOf::::get() as usize, + false, + None, + )?; } + // default bounds are unbounded. + let targets = >::get_npos_targets(DataProviderBounds::default()); assert_eq!(targets.len() as u32, v); Ok(()) diff --git a/substrate/frame/staking/src/lib.rs b/substrate/frame/staking/src/lib.rs index 6361663b2b1c..19d999109d8d 100644 --- a/substrate/frame/staking/src/lib.rs +++ b/substrate/frame/staking/src/lib.rs @@ -324,7 +324,7 @@ use sp_runtime::{ Perbill, Perquintill, Rounding, RuntimeDebug, Saturating, }; use sp_staking::{ - offence::{Offence, OffenceError, OffenceSeverity, ReportOffence}, + offence::{Offence, OffenceError, ReportOffence}, EraIndex, ExposurePage, OnStakingUpdate, Page, PagedExposureMetadata, SessionIndex, StakingAccount, }; @@ -849,9 +849,6 @@ pub trait SessionInterface { /// Disable the validator at the given index, returns `false` if the validator was already /// disabled or the index is out of bounds. fn disable_validator(validator_index: u32) -> bool; - /// Re-enable a validator that was previously disabled. Returns `false` if the validator was - /// already enabled or the index is out of bounds. - fn enable_validator(validator_index: u32) -> bool; /// Get the validators from session. fn validators() -> Vec; /// Prune historical session tries up to but not including the given index. @@ -876,10 +873,6 @@ where >::disable_index(validator_index) } - fn enable_validator(validator_index: u32) -> bool { - >::enable_index(validator_index) - } - fn validators() -> Vec<::AccountId> { >::validators() } @@ -893,9 +886,6 @@ impl SessionInterface for () { fn disable_validator(_: u32) -> bool { true } - fn enable_validator(_: u32) -> bool { - true - } fn validators() -> Vec { Vec::new() } @@ -1006,7 +996,7 @@ impl Convert for ExposureOf { fn convert(validator: T::AccountId) -> Option>> { - ActiveEra::::get() + >::active_era() .map(|active_era| >::eras_stakers(active_era.index, &validator)) } } @@ -1281,47 +1271,19 @@ impl BenchmarkingConfig for TestBenchmarkingConfig { /// Controls validator disabling pub trait DisablingStrategy { - /// Make a disabling decision. Returning a [`DisablingDecision`] + /// Make a disabling decision. Returns the index of the validator to disable or `None` if no new + /// validator should be disabled. fn decision( offender_stash: &T::AccountId, - offender_slash_severity: OffenceSeverity, slash_era: EraIndex, - currently_disabled: &Vec<(u32, OffenceSeverity)>, - ) -> DisablingDecision; -} - -/// Helper struct representing a decision coming from a given [`DisablingStrategy`] implementing -/// `decision` -/// -/// `disable` is the index of the validator to disable, -/// `reenable` is the index of the validator to re-enable. -#[derive(Debug)] -pub struct DisablingDecision { - pub disable: Option, - pub reenable: Option, -} - -/// Calculate the disabling limit based on the number of validators and the disabling limit factor. -/// -/// This is a sensible default implementation for the disabling limit factor for most disabling -/// strategies. -/// -/// Disabling limit factor n=2 -> 1/n = 1/2 = 50% of validators can be disabled -fn factor_based_disable_limit(validators_len: usize, disabling_limit_factor: usize) -> usize { - validators_len - .saturating_sub(1) - .checked_div(disabling_limit_factor) - .unwrap_or_else(|| { - defensive!("DISABLING_LIMIT_FACTOR should not be 0"); - 0 - }) + currently_disabled: &Vec, + ) -> Option; } -/// Implementation of [`DisablingStrategy`] using factor_based_disable_limit which disables -/// validators from the active set up to a threshold. `DISABLING_LIMIT_FACTOR` is the factor of the -/// maximum disabled validators in the active set. E.g. setting this value to `3` means no more than -/// 1/3 of the validators in the active set can be disabled in an era. -/// +/// Implementation of [`DisablingStrategy`] which disables validators from the active set up to a +/// threshold. `DISABLING_LIMIT_FACTOR` is the factor of the maximum disabled validators in the +/// active set. E.g. setting this value to `3` means no more than 1/3 of the validators in the +/// active set can be disabled in an era. /// By default a factor of 3 is used which is the byzantine threshold. pub struct UpToLimitDisablingStrategy; @@ -1329,7 +1291,13 @@ impl UpToLimitDisablingStrategy usize { - factor_based_disable_limit(validators_len, DISABLING_LIMIT_FACTOR) + validators_len + .saturating_sub(1) + .checked_div(DISABLING_LIMIT_FACTOR) + .unwrap_or_else(|| { + defensive!("DISABLING_LIMIT_FACTOR should not be 0"); + 0 + }) } } @@ -1338,10 +1306,9 @@ impl DisablingStrategy { fn decision( offender_stash: &T::AccountId, - _offender_slash_severity: OffenceSeverity, slash_era: EraIndex, - currently_disabled: &Vec<(u32, OffenceSeverity)>, - ) -> DisablingDecision { + currently_disabled: &Vec, + ) -> Option { let active_set = T::SessionInterface::validators(); // We don't disable more than the limit @@ -1351,66 +1318,9 @@ impl DisablingStrategy "Won't disable: reached disabling limit {:?}", Self::disable_limit(active_set.len()) ); - return DisablingDecision { disable: None, reenable: None } - } - - // We don't disable for offences in previous eras - if ActiveEra::::get().map(|e| e.index).unwrap_or_default() > slash_era { - log!( - debug, - "Won't disable: current_era {:?} > slash_era {:?}", - CurrentEra::::get().unwrap_or_default(), - slash_era - ); - return DisablingDecision { disable: None, reenable: None } + return None } - let offender_idx = if let Some(idx) = active_set.iter().position(|i| i == offender_stash) { - idx as u32 - } else { - log!(debug, "Won't disable: offender not in active set",); - return DisablingDecision { disable: None, reenable: None } - }; - - log!(debug, "Will disable {:?}", offender_idx); - - DisablingDecision { disable: Some(offender_idx), reenable: None } - } -} - -/// Implementation of [`DisablingStrategy`] which disables validators from the active set up to a -/// limit (factor_based_disable_limit) and if the limit is reached and the new offender is higher -/// (bigger punishment/severity) then it re-enables the lowest offender to free up space for the new -/// offender. -/// -/// This strategy is not based on cumulative severity of offences but only on the severity of the -/// highest offence. Offender first committing a 25% offence and then a 50% offence will be treated -/// the same as an offender committing 50% offence. -/// -/// An extension of [`UpToLimitDisablingStrategy`]. -pub struct UpToLimitWithReEnablingDisablingStrategy; - -impl - UpToLimitWithReEnablingDisablingStrategy -{ - /// Disabling limit calculated from the total number of validators in the active set. When - /// reached re-enabling logic might kick in. - pub fn disable_limit(validators_len: usize) -> usize { - factor_based_disable_limit(validators_len, DISABLING_LIMIT_FACTOR) - } -} - -impl DisablingStrategy - for UpToLimitWithReEnablingDisablingStrategy -{ - fn decision( - offender_stash: &T::AccountId, - offender_slash_severity: OffenceSeverity, - slash_era: EraIndex, - currently_disabled: &Vec<(u32, OffenceSeverity)>, - ) -> DisablingDecision { - let active_set = T::SessionInterface::validators(); - // We don't disable for offences in previous eras if ActiveEra::::get().map(|e| e.index).unwrap_or_default() > slash_era { log!( @@ -1419,59 +1329,18 @@ impl DisablingStrategy Pallet::::current_era().unwrap_or_default(), slash_era ); - return DisablingDecision { disable: None, reenable: None } + return None } - // We don't disable validators that are not in the active set let offender_idx = if let Some(idx) = active_set.iter().position(|i| i == offender_stash) { idx as u32 } else { log!(debug, "Won't disable: offender not in active set",); - return DisablingDecision { disable: None, reenable: None } + return None }; - // Check if offender is already disabled - if let Some((_, old_severity)) = - currently_disabled.iter().find(|(idx, _)| *idx == offender_idx) - { - if offender_slash_severity > *old_severity { - log!(debug, "Offender already disabled but with lower severity, will disable again to refresh severity of {:?}", offender_idx); - return DisablingDecision { disable: Some(offender_idx), reenable: None }; - } else { - log!(debug, "Offender already disabled with higher or equal severity"); - return DisablingDecision { disable: None, reenable: None }; - } - } - - // We don't disable more than the limit (but we can re-enable a smaller offender to make - // space) - if currently_disabled.len() >= Self::disable_limit(active_set.len()) { - log!( - debug, - "Reached disabling limit {:?}, checking for re-enabling", - Self::disable_limit(active_set.len()) - ); + log!(debug, "Will disable {:?}", offender_idx); - // Find the smallest offender to re-enable that is not higher than - // offender_slash_severity - if let Some((smallest_idx, _)) = currently_disabled - .iter() - .filter(|(_, severity)| *severity <= offender_slash_severity) - .min_by_key(|(_, severity)| *severity) - { - log!(debug, "Will disable {:?} and re-enable {:?}", offender_idx, smallest_idx); - return DisablingDecision { - disable: Some(offender_idx), - reenable: Some(*smallest_idx), - } - } else { - log!(debug, "No smaller offender found to re-enable"); - return DisablingDecision { disable: None, reenable: None } - } - } else { - // If we are not at the limit, just disable the new offender and dont re-enable anyone - log!(debug, "Will disable {:?}", offender_idx); - return DisablingDecision { disable: Some(offender_idx), reenable: None } - } + Some(offender_idx) } } diff --git a/substrate/frame/staking/src/migrations.rs b/substrate/frame/staking/src/migrations.rs index 9dfa93c70b32..5c9cf8613213 100644 --- a/substrate/frame/staking/src/migrations.rs +++ b/substrate/frame/staking/src/migrations.rs @@ -60,79 +60,6 @@ impl Default for ObsoleteReleases { #[storage_alias] type StorageVersion = StorageValue, ObsoleteReleases, ValueQuery>; -/// Migrating `DisabledValidators` from `Vec` to `Vec<(u32, OffenceSeverity)>` to track offense -/// severity for re-enabling purposes. -pub mod v16 { - use super::*; - use sp_staking::offence::OffenceSeverity; - - pub struct VersionUncheckedMigrateV15ToV16(core::marker::PhantomData); - impl UncheckedOnRuntimeUpgrade for VersionUncheckedMigrateV15ToV16 { - #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { - let old_disabled_validators = v15::DisabledValidators::::get(); - Ok(old_disabled_validators.encode()) - } - - fn on_runtime_upgrade() -> Weight { - // Migrating `DisabledValidators` from `Vec` to `Vec<(u32, OffenceSeverity)>`. - // Using max severity (PerBill 100%) for the migration which effectively makes it so - // offenders before the migration will not be re-enabled this era unless there are - // other 100% offenders. - let max_offence = OffenceSeverity(Perbill::from_percent(100)); - // Inject severity - let migrated = v15::DisabledValidators::::take() - .into_iter() - .map(|v| (v, max_offence)) - .collect::>(); - - DisabledValidators::::set(migrated); - - log!(info, "v16 applied successfully."); - T::DbWeight::get().reads_writes(1, 1) - } - - #[cfg(feature = "try-runtime")] - fn post_upgrade(state: Vec) -> Result<(), TryRuntimeError> { - // Decode state to get old_disabled_validators in a format of Vec - let old_disabled_validators = - Vec::::decode(&mut state.as_slice()).expect("Failed to decode state"); - let new_disabled_validators = DisabledValidators::::get(); - - // Compare lengths - frame_support::ensure!( - old_disabled_validators.len() == new_disabled_validators.len(), - "DisabledValidators length mismatch" - ); - - // Compare contents - let new_disabled_validators = - new_disabled_validators.into_iter().map(|(v, _)| v).collect::>(); - frame_support::ensure!( - old_disabled_validators == new_disabled_validators, - "DisabledValidator ids mismatch" - ); - - // Verify severity - let max_severity = OffenceSeverity(Perbill::from_percent(100)); - let new_disabled_validators = DisabledValidators::::get(); - for (_, severity) in new_disabled_validators { - frame_support::ensure!(severity == max_severity, "Severity mismatch"); - } - - Ok(()) - } - } - - pub type MigrateV15ToV16 = VersionedMigration< - 15, - 16, - VersionUncheckedMigrateV15ToV16, - Pallet, - ::DbWeight, - >; -} - /// Migrating `OffendingValidators` from `Vec<(u32, bool)>` to `Vec` pub mod v15 { use super::*; @@ -140,9 +67,6 @@ pub mod v15 { // The disabling strategy used by staking pallet type DefaultDisablingStrategy = UpToLimitDisablingStrategy; - #[storage_alias] - pub(crate) type DisabledValidators = StorageValue, Vec, ValueQuery>; - pub struct VersionUncheckedMigrateV14ToV15(core::marker::PhantomData); impl UncheckedOnRuntimeUpgrade for VersionUncheckedMigrateV14ToV15 { fn on_runtime_upgrade() -> Weight { diff --git a/substrate/frame/staking/src/mock.rs b/substrate/frame/staking/src/mock.rs index df8cb38e8b37..4a0209fc5b08 100644 --- a/substrate/frame/staking/src/mock.rs +++ b/substrate/frame/staking/src/mock.rs @@ -258,8 +258,7 @@ impl OnStakingUpdate for EventListenerMock { } } -// Disabling threshold for `UpToLimitDisablingStrategy` and -// `UpToLimitWithReEnablingDisablingStrategy`` +// Disabling threshold for `UpToLimitDisablingStrategy` pub(crate) const DISABLING_LIMIT_FACTOR: usize = 3; #[derive_impl(crate::config_preludes::TestDefaultConfig)] @@ -285,8 +284,7 @@ impl crate::pallet::pallet::Config for Test { type HistoryDepth = HistoryDepth; type MaxControllersInDeprecationBatch = MaxControllersInDeprecationBatch; type EventListeners = EventListenerMock; - type DisablingStrategy = - pallet_staking::UpToLimitWithReEnablingDisablingStrategy; + type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; } pub struct WeightedNominationsQuota; @@ -570,11 +568,11 @@ impl ExtBuilder { } pub(crate) fn active_era() -> EraIndex { - pallet_staking::ActiveEra::::get().unwrap().index + Staking::active_era().unwrap().index } pub(crate) fn current_era() -> EraIndex { - pallet_staking::CurrentEra::::get().unwrap() + Staking::current_era().unwrap() } pub(crate) fn bond(who: AccountId, val: Balance) { @@ -665,7 +663,7 @@ pub(crate) fn start_active_era(era_index: EraIndex) { pub(crate) fn current_total_payout_for_duration(duration: u64) -> Balance { let (payout, _rest) = ::EraPayout::era_payout( - pallet_staking::ErasTotalStake::::get(active_era()), + Staking::eras_total_stake(active_era()), pallet_balances::TotalIssuance::::get(), duration, ); @@ -675,7 +673,7 @@ pub(crate) fn current_total_payout_for_duration(duration: u64) -> Balance { pub(crate) fn maximum_payout_for_duration(duration: u64) -> Balance { let (payout, rest) = ::EraPayout::era_payout( - pallet_staking::ErasTotalStake::::get(active_era()), + Staking::eras_total_stake(active_era()), pallet_balances::TotalIssuance::::get(), duration, ); @@ -734,11 +732,11 @@ pub(crate) fn on_offence_in_era( } } - if pallet_staking::ActiveEra::::get().unwrap().index == era { + if Staking::active_era().unwrap().index == era { let _ = Staking::on_offence( offenders, slash_fraction, - pallet_staking::ErasStartSessionIndex::::get(era).unwrap(), + Staking::eras_start_session_index(era).unwrap(), ); } else { panic!("cannot slash in era {}", era); @@ -752,7 +750,7 @@ pub(crate) fn on_offence_now( >], slash_fraction: &[Perbill], ) { - let now = pallet_staking::ActiveEra::::get().unwrap().index; + let now = Staking::active_era().unwrap().index; on_offence_in_era(offenders, slash_fraction, now) } @@ -891,10 +889,10 @@ macro_rules! assert_session_era { $session, ); assert_eq!( - CurrentEra::::get().unwrap(), + Staking::current_era().unwrap(), $era, "wrong current era {} != {}", - CurrentEra::::get().unwrap(), + Staking::current_era().unwrap(), $era, ); }; diff --git a/substrate/frame/staking/src/pallet/impls.rs b/substrate/frame/staking/src/pallet/impls.rs index 2ae925d03643..d3423d82769d 100644 --- a/substrate/frame/staking/src/pallet/impls.rs +++ b/substrate/frame/staking/src/pallet/impls.rs @@ -193,7 +193,7 @@ impl Pallet { ) -> Result { let mut ledger = Self::ledger(Controller(controller.clone()))?; let (stash, old_total) = (ledger.stash.clone(), ledger.total); - if let Some(current_era) = CurrentEra::::get() { + if let Some(current_era) = Self::current_era() { ledger = ledger.consolidate_unlocked(current_era) } let new_total = ledger.total; @@ -450,9 +450,9 @@ impl Pallet { session_index: SessionIndex, is_genesis: bool, ) -> Option>> { - if let Some(current_era) = CurrentEra::::get() { + if let Some(current_era) = Self::current_era() { // Initial era has been set. - let current_era_start_session_index = ErasStartSessionIndex::::get(current_era) + let current_era_start_session_index = Self::eras_start_session_index(current_era) .unwrap_or_else(|| { frame_support::print("Error: start_session_index must be set for current_era"); 0 @@ -492,12 +492,12 @@ impl Pallet { /// Start a session potentially starting an era. fn start_session(start_session: SessionIndex) { - let next_active_era = ActiveEra::::get().map(|e| e.index + 1).unwrap_or(0); + let next_active_era = Self::active_era().map(|e| e.index + 1).unwrap_or(0); // This is only `Some` when current era has already progressed to the next era, while the // active era is one behind (i.e. in the *last session of the active era*, or *first session // of the new current era*, depending on how you look at it). if let Some(next_active_era_start_session_index) = - ErasStartSessionIndex::::get(next_active_era) + Self::eras_start_session_index(next_active_era) { if next_active_era_start_session_index == start_session { Self::start_era(start_session); @@ -510,16 +510,16 @@ impl Pallet { } // disable all offending validators that have been disabled for the whole era - for (index, _) in >::get() { + for index in >::get() { T::SessionInterface::disable_validator(index); } } /// End a session potentially ending an era. fn end_session(session_index: SessionIndex) { - if let Some(active_era) = ActiveEra::::get() { + if let Some(active_era) = Self::active_era() { if let Some(next_active_era_start_session_index) = - ErasStartSessionIndex::::get(active_era.index + 1) + Self::eras_start_session_index(active_era.index + 1) { if next_active_era_start_session_index == session_index + 1 { Self::end_era(active_era, session_index); @@ -577,7 +577,7 @@ impl Pallet { let era_duration = (now_as_millis_u64.defensive_saturating_sub(active_era_start)) .saturated_into::(); - let staked = ErasTotalStake::::get(&active_era.index); + let staked = Self::eras_total_stake(&active_era.index); let issuance = asset::total_issuance::(); let (validator_payout, remainder) = @@ -668,7 +668,7 @@ impl Pallet { }; let exposures = Self::collect_exposures(election_result); - if (exposures.len() as u32) < MinimumValidatorCount::::get().max(1) { + if (exposures.len() as u32) < Self::minimum_validator_count().max(1) { // Session will panic if we ever return an empty validator set, thus max(1) ^^. match CurrentEra::::get() { Some(current_era) if current_era > 0 => log!( @@ -677,7 +677,7 @@ impl Pallet { elected, minimum is {})", CurrentEra::::get().unwrap_or(0), exposures.len(), - MinimumValidatorCount::::get(), + Self::minimum_validator_count(), ), None => { // The initial era is allowed to have no exposures. @@ -729,7 +729,7 @@ impl Pallet { // Collect the pref of all winners. for stash in &elected_stashes { - let pref = Validators::::get(stash); + let pref = Self::validators(stash); >::insert(&new_planned_era, stash, pref); } @@ -854,7 +854,7 @@ impl Pallet { /// /// COMPLEXITY: Complexity is `number_of_validator_to_reward x current_elected_len`. pub fn reward_by_ids(validators_points: impl IntoIterator) { - if let Some(active_era) = ActiveEra::::get() { + if let Some(active_era) = Self::active_era() { >::mutate(active_era.index, |era_rewards| { for (validator, points) in validators_points.into_iter() { *era_rewards.individual.entry(validator).or_default() += points; @@ -1196,7 +1196,7 @@ impl ElectionDataProvider for Pallet { fn desired_targets() -> data_provider::Result { Self::register_weight(T::DbWeight::get().reads(1)); - Ok(ValidatorCount::::get()) + Ok(Self::validator_count()) } fn electing_voters(bounds: DataProviderBounds) -> data_provider::Result>> { @@ -1229,10 +1229,10 @@ impl ElectionDataProvider for Pallet { } fn next_election_prediction(now: BlockNumberFor) -> BlockNumberFor { - let current_era = CurrentEra::::get().unwrap_or(0); - let current_session = CurrentPlannedSession::::get(); + let current_era = Self::current_era().unwrap_or(0); + let current_session = Self::current_planned_session(); let current_era_start_session_index = - ErasStartSessionIndex::::get(current_era).unwrap_or(0); + Self::eras_start_session_index(current_era).unwrap_or(0); // Number of session in the current era or the maximum session per era if reached. let era_progress = current_session .saturating_sub(current_era_start_session_index) @@ -1366,7 +1366,7 @@ impl historical::SessionManager Option>)>> { >::new_session(new_index).map(|validators| { - let current_era = CurrentEra::::get() + let current_era = Self::current_era() // Must be some as a new era has been created. .unwrap_or(0); @@ -1384,7 +1384,7 @@ impl historical::SessionManager Option>)>> { >::new_session_genesis(new_index).map( |validators| { - let current_era = CurrentEra::::get() + let current_era = Self::current_era() // Must be some as a new era has been created. .unwrap_or(0); @@ -1449,7 +1449,7 @@ where }; let active_era = { - let active_era = ActiveEra::::get(); + let active_era = Self::active_era(); add_db_reads_writes(1, 0); if active_era.is_none() { // This offence need not be re-submitted. @@ -1457,7 +1457,7 @@ where } active_era.expect("value checked not to be `None`; qed").index }; - let active_era_start_session_index = ErasStartSessionIndex::::get(active_era) + let active_era_start_session_index = Self::eras_start_session_index(active_era) .unwrap_or_else(|| { frame_support::print("Error: start_session_index must be set for current_era"); 0 @@ -1486,7 +1486,7 @@ where let slash_defer_duration = T::SlashDeferDuration::get(); - let invulnerables = Invulnerables::::get(); + let invulnerables = Self::invulnerables(); add_db_reads_writes(1, 0); for (details, slash_fraction) in offenders.iter().zip(slash_fraction) { @@ -1497,12 +1497,6 @@ where continue } - Self::deposit_event(Event::::SlashReported { - validator: stash.clone(), - fraction: *slash_fraction, - slash_era, - }); - let unapplied = slashing::compute_slash::(slashing::SlashParams { stash, slash: *slash_fraction, @@ -1513,6 +1507,12 @@ where reward_proportion, }); + Self::deposit_event(Event::::SlashReported { + validator: stash.clone(), + fraction: *slash_fraction, + slash_era, + }); + if let Some(mut unapplied) = unapplied { let nominators_len = unapplied.others.len() as u64; let reporters_len = details.reporters.len() as u64; @@ -1761,7 +1761,7 @@ impl StakingInterface for Pallet { } fn current_era() -> EraIndex { - CurrentEra::::get().unwrap_or(Zero::zero()) + Self::current_era().unwrap_or(Zero::zero()) } fn stake(who: &Self::AccountId) -> Result>, DispatchError> { @@ -1842,8 +1842,7 @@ impl StakingInterface for Pallet { } fn force_unstake(who: Self::AccountId) -> sp_runtime::DispatchResult { - let num_slashing_spans = - SlashingSpans::::get(&who).map_or(0, |s| s.iter().count() as u32); + let num_slashing_spans = Self::slashing_spans(&who).map_or(0, |s| s.iter().count() as u32); Self::force_unstake(RawOrigin::Root.into(), who.clone(), num_slashing_spans) } @@ -2143,7 +2142,7 @@ impl Pallet { /// * For each era exposed validator, check if the exposure total is sane (exposure.total = /// exposure.own + exposure.own). fn check_exposures() -> Result<(), TryRuntimeError> { - let era = ActiveEra::::get().unwrap().index; + let era = Self::active_era().unwrap().index; ErasStakers::::iter_prefix_values(era) .map(|expo| { ensure!( @@ -2171,7 +2170,7 @@ impl Pallet { // Sanity check for the paged exposure of the active era. let mut exposures: BTreeMap>> = BTreeMap::new(); - let era = ActiveEra::::get().unwrap().index; + let era = Self::active_era().unwrap().index; let accumulator_default = PagedExposureMetadata { total: Zero::zero(), own: Zero::zero(), @@ -2233,7 +2232,7 @@ impl Pallet { fn check_nominators() -> Result<(), TryRuntimeError> { // a check per nominator to ensure their entire stake is correctly distributed. Will only // kick-in if the nomination was submitted before the current era. - let era = ActiveEra::::get().unwrap().index; + let era = Self::active_era().unwrap().index; // cache era exposures to avoid too many db reads. let era_exposures = T::SessionInterface::validators() @@ -2303,10 +2302,9 @@ impl Pallet { Ok(()) } - // Sorted by index fn ensure_disabled_validators_sorted() -> Result<(), TryRuntimeError> { ensure!( - DisabledValidators::::get().windows(2).all(|pair| pair[0].0 <= pair[1].0), + DisabledValidators::::get().windows(2).all(|pair| pair[0] <= pair[1]), "DisabledValidators is not sorted" ); Ok(()) diff --git a/substrate/frame/staking/src/pallet/mod.rs b/substrate/frame/staking/src/pallet/mod.rs index b3f8c18f704c..5210bef853b2 100644 --- a/substrate/frame/staking/src/pallet/mod.rs +++ b/substrate/frame/staking/src/pallet/mod.rs @@ -38,7 +38,6 @@ use sp_runtime::{ }; use sp_staking::{ - offence::OffenceSeverity, EraIndex, Page, SessionIndex, StakingAccount::{self, Controller, Stash}, StakingInterface, @@ -70,7 +69,7 @@ pub mod pallet { use super::*; /// The in-code storage version. - const STORAGE_VERSION: StorageVersion = StorageVersion::new(16); + const STORAGE_VERSION: StorageVersion = StorageVersion::new(15); #[pallet::pallet] #[pallet::storage_version(STORAGE_VERSION)] @@ -352,16 +351,19 @@ pub mod pallet { /// The ideal number of active validators. #[pallet::storage] + #[pallet::getter(fn validator_count)] pub type ValidatorCount = StorageValue<_, u32, ValueQuery>; /// Minimum number of staking participants before emergency conditions are imposed. #[pallet::storage] + #[pallet::getter(fn minimum_validator_count)] pub type MinimumValidatorCount = StorageValue<_, u32, ValueQuery>; /// Any validators that may never be slashed or forcibly kicked. It's a Vec since they're /// easy to initialize and the performance hit is minimal (we expect no more than four /// invulnerables) and restricted to testnets. #[pallet::storage] + #[pallet::getter(fn invulnerables)] #[pallet::unbounded] pub type Invulnerables = StorageValue<_, Vec, ValueQuery>; @@ -407,6 +409,7 @@ pub mod pallet { /// /// TWOX-NOTE: SAFE since `AccountId` is a secure hash. #[pallet::storage] + #[pallet::getter(fn validators)] pub type Validators = CountedStorageMap<_, Twox64Concat, T::AccountId, ValidatorPrefs, ValueQuery>; @@ -436,6 +439,7 @@ pub mod pallet { /// /// TWOX-NOTE: SAFE since `AccountId` is a secure hash. #[pallet::storage] + #[pallet::getter(fn nominators)] pub type Nominators = CountedStorageMap<_, Twox64Concat, T::AccountId, Nominations>; @@ -459,6 +463,7 @@ pub mod pallet { /// This is the latest planned era, depending on how the Session pallet queues the validator /// set, it might be active or not. #[pallet::storage] + #[pallet::getter(fn current_era)] pub type CurrentEra = StorageValue<_, EraIndex>; /// The active era information, it holds index and start. @@ -466,6 +471,7 @@ pub mod pallet { /// The active era is the era being currently rewarded. Validator set of this era must be /// equal to [`SessionInterface::validators`]. #[pallet::storage] + #[pallet::getter(fn active_era)] pub type ActiveEra = StorageValue<_, ActiveEraInfo>; /// The session index at which the era start for the last [`Config::HistoryDepth`] eras. @@ -473,6 +479,7 @@ pub mod pallet { /// Note: This tracks the starting session (i.e. session index when era start being active) /// for the eras in `[CurrentEra - HISTORY_DEPTH, CurrentEra]`. #[pallet::storage] + #[pallet::getter(fn eras_start_session_index)] pub type ErasStartSessionIndex = StorageMap<_, Twox64Concat, EraIndex, SessionIndex>; /// Exposure of validator at era. @@ -536,6 +543,7 @@ pub mod pallet { /// Note: Deprecated since v14. Use `EraInfo` instead to work with exposures. #[pallet::storage] #[pallet::unbounded] + #[pallet::getter(fn eras_stakers_clipped)] pub type ErasStakersClipped = StorageDoubleMap< _, Twox64Concat, @@ -572,6 +580,7 @@ pub mod pallet { /// /// It is removed after [`Config::HistoryDepth`] eras. #[pallet::storage] + #[pallet::getter(fn claimed_rewards)] #[pallet::unbounded] pub type ClaimedRewards = StorageDoubleMap< _, @@ -590,6 +599,7 @@ pub mod pallet { /// Is it removed after [`Config::HistoryDepth`] eras. // If prefs hasn't been set or has been removed then 0 commission is returned. #[pallet::storage] + #[pallet::getter(fn eras_validator_prefs)] pub type ErasValidatorPrefs = StorageDoubleMap< _, Twox64Concat, @@ -604,23 +614,27 @@ pub mod pallet { /// /// Eras that haven't finished yet or has been removed doesn't have reward. #[pallet::storage] + #[pallet::getter(fn eras_validator_reward)] pub type ErasValidatorReward = StorageMap<_, Twox64Concat, EraIndex, BalanceOf>; /// Rewards for the last [`Config::HistoryDepth`] eras. /// If reward hasn't been set or has been removed then 0 reward is returned. #[pallet::storage] #[pallet::unbounded] + #[pallet::getter(fn eras_reward_points)] pub type ErasRewardPoints = StorageMap<_, Twox64Concat, EraIndex, EraRewardPoints, ValueQuery>; /// The total amount staked for the last [`Config::HistoryDepth`] eras. /// If total hasn't been set or has been removed then 0 stake is returned. #[pallet::storage] + #[pallet::getter(fn eras_total_stake)] pub type ErasTotalStake = StorageMap<_, Twox64Concat, EraIndex, BalanceOf, ValueQuery>; /// Mode of era forcing. #[pallet::storage] + #[pallet::getter(fn force_era)] pub type ForceEra = StorageValue<_, Forcing, ValueQuery>; /// Maximum staked rewards, i.e. the percentage of the era inflation that @@ -633,11 +647,13 @@ pub mod pallet { /// /// The rest of the slashed value is handled by the `Slash`. #[pallet::storage] + #[pallet::getter(fn slash_reward_fraction)] pub type SlashRewardFraction = StorageValue<_, Perbill, ValueQuery>; /// The amount of currency given to reporters of a slash event which was /// canceled by extraordinary circumstances (e.g. governance). #[pallet::storage] + #[pallet::getter(fn canceled_payout)] pub type CanceledSlashPayout = StorageValue<_, BalanceOf, ValueQuery>; /// All unapplied slashes that are queued for later. @@ -679,6 +695,7 @@ pub mod pallet { /// Slashing spans for stash accounts. #[pallet::storage] + #[pallet::getter(fn slashing_spans)] #[pallet::unbounded] pub type SlashingSpans = StorageMap<_, Twox64Concat, T::AccountId, slashing::SlashingSpans>; @@ -698,6 +715,7 @@ pub mod pallet { /// /// This is basically in sync with the call to [`pallet_session::SessionManager::new_session`]. #[pallet::storage] + #[pallet::getter(fn current_planned_session)] pub type CurrentPlannedSession = StorageValue<_, SessionIndex, ValueQuery>; /// Indices of validators that have offended in the active era. The offenders are disabled for a @@ -705,15 +723,11 @@ pub mod pallet { /// implementor of [`DisablingStrategy`] defines if a validator should be disabled which /// implicitly means that the implementor also controls the max number of disabled validators. /// - /// The vec is always kept sorted based on the u32 index so that we can find whether a given - /// validator has previously offended using binary search. - /// - /// Additionally, each disabled validator is associated with an `OffenceSeverity` which - /// represents how severe is the offence that got the validator disabled. + /// The vec is always kept sorted so that we can find whether a given validator has previously + /// offended using binary search. #[pallet::storage] #[pallet::unbounded] - pub type DisabledValidators = - StorageValue<_, Vec<(u32, OffenceSeverity)>, ValueQuery>; + pub type DisabledValidators = StorageValue<_, Vec, ValueQuery>; /// The threshold for when users can start calling `chill_other` for other validators / /// nominators. The threshold is compared to the actual number of validators / nominators @@ -854,10 +868,6 @@ pub mod pallet { ForceEra { mode: Forcing }, /// Report of a controller batch deprecation. ControllerBatchDeprecated { failures: u32 }, - /// Validator has been disabled. - ValidatorDisabled { stash: T::AccountId }, - /// Validator has been re-enabled. - ValidatorReenabled { stash: T::AccountId }, } #[pallet::error] @@ -940,7 +950,7 @@ pub mod pallet { fn on_finalize(_n: BlockNumberFor) { // Set the start of the first era. - if let Some(mut active_era) = ActiveEra::::get() { + if let Some(mut active_era) = Self::active_era() { if active_era.start.is_none() { let now_as_millis_u64 = T::UnixTime::now().as_millis().saturated_into::(); active_era.start = Some(now_as_millis_u64); @@ -981,156 +991,6 @@ pub mod pallet { } } - impl Pallet { - /// Get the ideal number of active validators. - pub fn validator_count() -> u32 { - ValidatorCount::::get() - } - - /// Get the minimum number of staking participants before emergency conditions are imposed. - pub fn minimum_validator_count() -> u32 { - MinimumValidatorCount::::get() - } - - /// Get the validators that may never be slashed or forcibly kicked out. - pub fn invulnerables() -> Vec { - Invulnerables::::get() - } - - /// Get the preferences of a given validator. - pub fn validators(account_id: EncodeLikeAccountId) -> ValidatorPrefs - where - EncodeLikeAccountId: codec::EncodeLike, - { - Validators::::get(account_id) - } - - /// Get the nomination preferences of a given nominator. - pub fn nominators( - account_id: EncodeLikeAccountId, - ) -> Option> - where - EncodeLikeAccountId: codec::EncodeLike, - { - Nominators::::get(account_id) - } - - /// Get the current era index. - pub fn current_era() -> Option { - CurrentEra::::get() - } - - /// Get the active era information. - pub fn active_era() -> Option { - ActiveEra::::get() - } - - /// Get the session index at which the era starts for the last [`Config::HistoryDepth`] - /// eras. - pub fn eras_start_session_index( - era_index: EncodeLikeEraIndex, - ) -> Option - where - EncodeLikeEraIndex: codec::EncodeLike, - { - ErasStartSessionIndex::::get(era_index) - } - - /// Get the clipped exposure of a given validator at an era. - pub fn eras_stakers_clipped( - era_index: EncodeLikeEraIndex, - account_id: EncodeLikeAccountId, - ) -> Exposure> - where - EncodeLikeEraIndex: codec::EncodeLike, - EncodeLikeAccountId: codec::EncodeLike, - { - ErasStakersClipped::::get(era_index, account_id) - } - - /// Get the paged history of claimed rewards by era for given validator. - pub fn claimed_rewards( - era_index: EncodeLikeEraIndex, - account_id: EncodeLikeAccountId, - ) -> Vec - where - EncodeLikeEraIndex: codec::EncodeLike, - EncodeLikeAccountId: codec::EncodeLike, - { - ClaimedRewards::::get(era_index, account_id) - } - - /// Get the preferences of given validator at given era. - pub fn eras_validator_prefs( - era_index: EncodeLikeEraIndex, - account_id: EncodeLikeAccountId, - ) -> ValidatorPrefs - where - EncodeLikeEraIndex: codec::EncodeLike, - EncodeLikeAccountId: codec::EncodeLike, - { - ErasValidatorPrefs::::get(era_index, account_id) - } - - /// Get the total validator era payout for the last [`Config::HistoryDepth`] eras. - pub fn eras_validator_reward( - era_index: EncodeLikeEraIndex, - ) -> Option> - where - EncodeLikeEraIndex: codec::EncodeLike, - { - ErasValidatorReward::::get(era_index) - } - - /// Get the rewards for the last [`Config::HistoryDepth`] eras. - pub fn eras_reward_points( - era_index: EncodeLikeEraIndex, - ) -> EraRewardPoints - where - EncodeLikeEraIndex: codec::EncodeLike, - { - ErasRewardPoints::::get(era_index) - } - - /// Get the total amount staked for the last [`Config::HistoryDepth`] eras. - pub fn eras_total_stake(era_index: EncodeLikeEraIndex) -> BalanceOf - where - EncodeLikeEraIndex: codec::EncodeLike, - { - ErasTotalStake::::get(era_index) - } - - /// Get the mode of era forcing. - pub fn force_era() -> Forcing { - ForceEra::::get() - } - - /// Get the percentage of the slash that is distributed to reporters. - pub fn slash_reward_fraction() -> Perbill { - SlashRewardFraction::::get() - } - - /// Get the amount of canceled slash payout. - pub fn canceled_payout() -> BalanceOf { - CanceledSlashPayout::::get() - } - - /// Get the slashing spans for given account. - pub fn slashing_spans( - account_id: EncodeLikeAccountId, - ) -> Option - where - EncodeLikeAccountId: codec::EncodeLike, - { - SlashingSpans::::get(account_id) - } - - /// Get the last planned session scheduled by the session pallet. - pub fn current_planned_session() -> SessionIndex { - CurrentPlannedSession::::get() - } - } - #[pallet::call] impl Pallet { /// Take the origin account as a stash and lock up `value` of its balance. `controller` will @@ -1247,7 +1107,7 @@ pub mod pallet { let maybe_withdraw_weight = { if unlocking == T::MaxUnlockingChunks::get() as usize { let real_num_slashing_spans = - SlashingSpans::::get(&controller).map_or(0, |s| s.iter().count()); + Self::slashing_spans(&controller).map_or(0, |s| s.iter().count()); Some(Self::do_withdraw_unbonded(&controller, real_num_slashing_spans as u32)?) } else { None @@ -1287,7 +1147,7 @@ pub mod pallet { ensure!(ledger.active >= min_active_bond, Error::::InsufficientBond); // Note: in case there is no current era it is fine to bond one era more. - let era = CurrentEra::::get() + let era = Self::current_era() .unwrap_or(0) .defensive_saturating_add(T::BondingDuration::get()); if let Some(chunk) = ledger.unlocking.last_mut().filter(|chunk| chunk.era == era) { @@ -1457,7 +1317,7 @@ pub mod pallet { let nominations = Nominations { targets, // Initial nominations are considered submitted at era 0. See `Nominations` doc. - submitted_in: CurrentEra::::get().unwrap_or(0), + submitted_in: Self::current_era().unwrap_or(0), suppressed: false, }; diff --git a/substrate/frame/staking/src/slashing.rs b/substrate/frame/staking/src/slashing.rs index ae76b0707dcb..9fb782265b8b 100644 --- a/substrate/frame/staking/src/slashing.rs +++ b/substrate/frame/staking/src/slashing.rs @@ -65,7 +65,7 @@ use sp_runtime::{ traits::{Saturating, Zero}, DispatchResult, RuntimeDebug, }; -use sp_staking::{offence::OffenceSeverity, EraIndex, StakingInterface}; +use sp_staking::{EraIndex, StakingInterface}; /// The proportion of the slashing reward to be paid out on the first slashing detection. /// This is f_1 in the paper. @@ -321,48 +321,17 @@ fn kick_out_if_recent(params: SlashParams) { } /// Inform the [`DisablingStrategy`] implementation about the new offender and disable the list of -/// validators provided by [`decision`]. +/// validators provided by [`make_disabling_decision`]. fn add_offending_validator(params: &SlashParams) { DisabledValidators::::mutate(|disabled| { - let new_severity = OffenceSeverity(params.slash); - let decision = - T::DisablingStrategy::decision(params.stash, new_severity, params.slash_era, &disabled); - - if let Some(offender_idx) = decision.disable { - // Check if the offender is already disabled - match disabled.binary_search_by_key(&offender_idx, |(index, _)| *index) { - // Offender is already disabled, update severity if the new one is higher - Ok(index) => { - let (_, old_severity) = &mut disabled[index]; - if new_severity > *old_severity { - *old_severity = new_severity; - } - }, - Err(index) => { - // Offender is not disabled, add to `DisabledValidators` and disable it - disabled.insert(index, (offender_idx, new_severity)); - // Propagate disablement to session level - T::SessionInterface::disable_validator(offender_idx); - // Emit event that a validator got disabled - >::deposit_event(super::Event::::ValidatorDisabled { - stash: params.stash.clone(), - }); - }, - } - } - - if let Some(reenable_idx) = decision.reenable { - // Remove the validator from `DisabledValidators` and re-enable it. - if let Ok(index) = disabled.binary_search_by_key(&reenable_idx, |(index, _)| *index) { - disabled.remove(index); - // Propagate re-enablement to session level - T::SessionInterface::enable_validator(reenable_idx); - // Emit event that a validator got re-enabled - let reenabled_stash = - T::SessionInterface::validators()[reenable_idx as usize].clone(); - >::deposit_event(super::Event::::ValidatorReenabled { - stash: reenabled_stash, - }); + if let Some(offender) = + T::DisablingStrategy::decision(params.stash, params.slash_era, &disabled) + { + // Add the validator to `DisabledValidators` and disable it. Do nothing if it is + // already disabled. + if let Err(index) = disabled.binary_search_by_key(&offender, |index| *index) { + disabled.insert(index, offender); + T::SessionInterface::disable_validator(offender); } } }); diff --git a/substrate/frame/staking/src/testing_utils.rs b/substrate/frame/staking/src/testing_utils.rs index 81337710aa90..efd4a40f1ab4 100644 --- a/substrate/frame/staking/src/testing_utils.rs +++ b/substrate/frame/staking/src/testing_utils.rs @@ -236,5 +236,5 @@ pub fn create_validators_with_nominators_for_era( /// get the current era. pub fn current_era() -> EraIndex { - CurrentEra::::get().unwrap_or(0) + >::current_era().unwrap_or(0) } diff --git a/substrate/frame/staking/src/tests.rs b/substrate/frame/staking/src/tests.rs index 6c2335e1aac8..d1dc6c3db659 100644 --- a/substrate/frame/staking/src/tests.rs +++ b/substrate/frame/staking/src/tests.rs @@ -200,7 +200,7 @@ fn basic_setup_works() { legacy_claimed_rewards: bounded_vec![], } ); - assert_eq!(Nominators::::get(101).unwrap().targets, vec![11, 21]); + assert_eq!(Staking::nominators(101).unwrap().targets, vec![11, 21]); assert_eq!( Staking::eras_stakers(active_era(), &11), @@ -220,10 +220,10 @@ fn basic_setup_works() { ); // initial total stake = 1125 + 1375 - assert_eq!(ErasTotalStake::::get(active_era()), 2500); + assert_eq!(Staking::eras_total_stake(active_era()), 2500); // The number of validators required. - assert_eq!(ValidatorCount::::get(), 2); + assert_eq!(Staking::validator_count(), 2); // Initial Era and session assert_eq!(active_era(), 0); @@ -233,7 +233,7 @@ fn basic_setup_works() { assert_eq!(asset::stakeable_balance::(&10), 1); // New era is not being forced - assert_eq!(ForceEra::::get(), Forcing::NotForcing); + assert_eq!(Staking::force_era(), Forcing::NotForcing); }); } @@ -336,7 +336,7 @@ fn rewards_should_work() { assert_eq!(asset::total_balance::(&21), init_balance_21); assert_eq!(asset::total_balance::(&101), init_balance_101); assert_eq!( - ErasRewardPoints::::get(active_era()), + Staking::eras_reward_points(active_era()), EraRewardPoints { total: 50 * 3, individual: vec![(11, 100), (21, 50)].into_iter().collect(), @@ -530,8 +530,8 @@ fn less_than_needed_candidates_works() { .validator_count(4) .nominate(false) .build_and_execute(|| { - assert_eq!(ValidatorCount::::get(), 4); - assert_eq!(MinimumValidatorCount::::get(), 1); + assert_eq!(Staking::validator_count(), 4); + assert_eq!(Staking::minimum_validator_count(), 1); assert_eq_uvec!(validator_controllers(), vec![31, 21, 11]); mock::start_active_era(1); @@ -1096,7 +1096,7 @@ fn reward_destination_works() { ); // (era 0, page 0) is claimed - assert_eq!(ClaimedRewards::::get(0, &11), vec![0]); + assert_eq!(Staking::claimed_rewards(0, &11), vec![0]); // Change RewardDestination to Stash >::insert(&11, RewardDestination::Stash); @@ -1127,7 +1127,7 @@ fn reward_destination_works() { ); // (era 1, page 0) is claimed - assert_eq!(ClaimedRewards::::get(1, &11), vec![0]); + assert_eq!(Staking::claimed_rewards(1, &11), vec![0]); // Change RewardDestination to Account >::insert(&11, RewardDestination::Account(11)); @@ -1159,7 +1159,7 @@ fn reward_destination_works() { ); // (era 2, page 0) is claimed - assert_eq!(ClaimedRewards::::get(2, &11), vec![0]); + assert_eq!(Staking::claimed_rewards(2, &11), vec![0]); }); } @@ -1852,7 +1852,7 @@ fn reward_to_stake_works() { .set_stake(21, 2000) .try_state(false) .build_and_execute(|| { - assert_eq!(ValidatorCount::::get(), 2); + assert_eq!(Staking::validator_count(), 2); // Confirm account 10 and 20 are validators assert!(>::contains_key(&11) && >::contains_key(&21)); @@ -2281,7 +2281,7 @@ fn bond_with_duplicate_vote_should_be_ignored_by_election_provider_elected() { #[test] fn new_era_elects_correct_number_of_validators() { ExtBuilder::default().nominate(true).validator_count(1).build_and_execute(|| { - assert_eq!(ValidatorCount::::get(), 1); + assert_eq!(Staking::validator_count(), 1); assert_eq!(validator_controllers().len(), 1); Session::on_initialize(System::block_number()); @@ -2431,11 +2431,11 @@ fn era_is_always_same_length() { let session_per_era = >::get(); mock::start_active_era(1); - assert_eq!(ErasStartSessionIndex::::get(current_era()).unwrap(), session_per_era); + assert_eq!(Staking::eras_start_session_index(current_era()).unwrap(), session_per_era); mock::start_active_era(2); assert_eq!( - ErasStartSessionIndex::::get(current_era()).unwrap(), + Staking::eras_start_session_index(current_era()).unwrap(), session_per_era * 2u32 ); @@ -2444,11 +2444,11 @@ fn era_is_always_same_length() { advance_session(); advance_session(); assert_eq!(current_era(), 3); - assert_eq!(ErasStartSessionIndex::::get(current_era()).unwrap(), session + 2); + assert_eq!(Staking::eras_start_session_index(current_era()).unwrap(), session + 2); mock::start_active_era(4); assert_eq!( - ErasStartSessionIndex::::get(current_era()).unwrap(), + Staking::eras_start_session_index(current_era()).unwrap(), session + 2u32 + session_per_era ); }); @@ -2465,7 +2465,7 @@ fn offence_doesnt_force_new_era() { &[Perbill::from_percent(5)], ); - assert_eq!(ForceEra::::get(), Forcing::NotForcing); + assert_eq!(Staking::force_era(), Forcing::NotForcing); }); } @@ -2473,7 +2473,7 @@ fn offence_doesnt_force_new_era() { fn offence_ensures_new_era_without_clobbering() { ExtBuilder::default().build_and_execute(|| { assert_ok!(Staking::force_new_era_always(RuntimeOrigin::root())); - assert_eq!(ForceEra::::get(), Forcing::ForceAlways); + assert_eq!(Staking::force_era(), Forcing::ForceAlways); on_offence_now( &[OffenceDetails { @@ -2483,7 +2483,7 @@ fn offence_ensures_new_era_without_clobbering() { &[Perbill::from_percent(5)], ); - assert_eq!(ForceEra::::get(), Forcing::ForceAlways); + assert_eq!(Staking::force_era(), Forcing::ForceAlways); }); } @@ -2507,7 +2507,7 @@ fn offence_deselects_validator_even_when_slash_is_zero() { &[Perbill::from_percent(0)], ); - assert_eq!(ForceEra::::get(), Forcing::NotForcing); + assert_eq!(Staking::force_era(), Forcing::NotForcing); assert!(is_disabled(11)); mock::start_active_era(1); @@ -2557,14 +2557,14 @@ fn validator_is_not_disabled_for_an_offence_in_previous_era() { &[Perbill::from_percent(0)], ); - assert_eq!(ForceEra::::get(), Forcing::NotForcing); + assert_eq!(Staking::force_era(), Forcing::NotForcing); assert!(is_disabled(11)); mock::start_active_era(2); // the validator is not disabled in the new era Staking::validate(RuntimeOrigin::signed(11), Default::default()).unwrap(); - assert_eq!(ForceEra::::get(), Forcing::NotForcing); + assert_eq!(Staking::force_era(), Forcing::NotForcing); assert!(>::contains_key(11)); assert!(Session::validators().contains(&11)); @@ -2585,7 +2585,7 @@ fn validator_is_not_disabled_for_an_offence_in_previous_era() { assert!(!is_disabled(11)); // and we are not forcing a new era - assert_eq!(ForceEra::::get(), Forcing::NotForcing); + assert_eq!(Staking::force_era(), Forcing::NotForcing); on_offence_in_era( &[OffenceDetails { @@ -2601,7 +2601,7 @@ fn validator_is_not_disabled_for_an_offence_in_previous_era() { assert!(Validators::::iter().any(|(stash, _)| stash == 11)); assert!(!is_disabled(11)); // and we are still not forcing a new era - assert_eq!(ForceEra::::get(), Forcing::NotForcing); + assert_eq!(Staking::force_era(), Forcing::NotForcing); }); } @@ -2733,7 +2733,7 @@ fn dont_slash_if_fraction_is_zero() { // The validator hasn't been slashed. The new era is not forced. assert_eq!(asset::stakeable_balance::(&11), 1000); - assert_eq!(ForceEra::::get(), Forcing::NotForcing); + assert_eq!(Staking::force_era(), Forcing::NotForcing); }); } @@ -2754,7 +2754,7 @@ fn only_slash_for_max_in_era() { // The validator has been slashed and has been force-chilled. assert_eq!(asset::stakeable_balance::(&11), 500); - assert_eq!(ForceEra::::get(), Forcing::NotForcing); + assert_eq!(Staking::force_era(), Forcing::NotForcing); on_offence_now( &[OffenceDetails { @@ -3033,7 +3033,7 @@ fn deferred_slashes_are_deferred() { ); // nominations are not removed regardless of the deferring. - assert_eq!(Nominators::::get(101).unwrap().targets, vec![11, 21]); + assert_eq!(Staking::nominators(101).unwrap().targets, vec![11, 21]); assert_eq!(asset::stakeable_balance::(&11), 1000); assert_eq!(asset::stakeable_balance::(&101), 2000); @@ -3078,7 +3078,7 @@ fn retroactive_deferred_slashes_two_eras_before() { mock::start_active_era(3); - assert_eq!(Nominators::::get(101).unwrap().targets, vec![11, 21]); + assert_eq!(Staking::nominators(101).unwrap().targets, vec![11, 21]); System::reset_events(); on_offence_in_era( @@ -3169,7 +3169,7 @@ fn staker_cannot_bail_deferred_slash() { assert_ok!(Staking::chill(RuntimeOrigin::signed(101))); assert_ok!(Staking::unbond(RuntimeOrigin::signed(101), 500)); - assert_eq!(CurrentEra::::get().unwrap(), 1); + assert_eq!(Staking::current_era().unwrap(), 1); assert_eq!(active_era(), 1); assert_eq!( @@ -3191,14 +3191,14 @@ fn staker_cannot_bail_deferred_slash() { mock::start_active_era(2); assert_eq!(asset::stakeable_balance::(&11), 1000); assert_eq!(asset::stakeable_balance::(&101), 2000); - assert_eq!(CurrentEra::::get().unwrap(), 2); + assert_eq!(Staking::current_era().unwrap(), 2); assert_eq!(active_era(), 2); // no slash yet. mock::start_active_era(3); assert_eq!(asset::stakeable_balance::(&11), 1000); assert_eq!(asset::stakeable_balance::(&101), 2000); - assert_eq!(CurrentEra::::get().unwrap(), 3); + assert_eq!(Staking::current_era().unwrap(), 3); assert_eq!(active_era(), 3); // and cannot yet unbond: @@ -3378,7 +3378,7 @@ fn slash_kicks_validators_not_nominators_and_disables_nominator_for_kicked_valid assert_eq!(asset::stakeable_balance::(&101), 2000); // 100 has approval for 11 as of now - assert!(Nominators::::get(101).unwrap().targets.contains(&11)); + assert!(Staking::nominators(101).unwrap().targets.contains(&11)); // 11 and 21 both have the support of 100 let exposure_11 = Staking::eras_stakers(active_era(), &11); @@ -3402,7 +3402,6 @@ fn slash_kicks_validators_not_nominators_and_disables_nominator_for_kicked_valid fraction: Perbill::from_percent(10), slash_era: 1 }, - Event::ValidatorDisabled { stash: 11 }, Event::Slashed { staker: 11, amount: 100 }, Event::Slashed { staker: 101, amount: 12 }, ] @@ -3444,8 +3443,8 @@ fn non_slashable_offence_disables_validator() { mock::start_active_era(1); assert_eq_uvec!(Session::validators(), vec![11, 21, 31, 41, 51, 201, 202]); - let exposure_11 = Staking::eras_stakers(ActiveEra::::get().unwrap().index, &11); - let exposure_21 = Staking::eras_stakers(ActiveEra::::get().unwrap().index, &21); + let exposure_11 = Staking::eras_stakers(Staking::active_era().unwrap().index, &11); + let exposure_21 = Staking::eras_stakers(Staking::active_era().unwrap().index, &21); // offence with no slash associated on_offence_now( @@ -3454,7 +3453,7 @@ fn non_slashable_offence_disables_validator() { ); // it does NOT affect the nominator. - assert_eq!(Nominators::::get(101).unwrap().targets, vec![11, 21]); + assert_eq!(Staking::nominators(101).unwrap().targets, vec![11, 21]); // offence that slashes 25% of the bond on_offence_now( @@ -3463,7 +3462,7 @@ fn non_slashable_offence_disables_validator() { ); // it DOES NOT affect the nominator. - assert_eq!(Nominators::::get(101).unwrap().targets, vec![11, 21]); + assert_eq!(Staking::nominators(101).unwrap().targets, vec![11, 21]); assert_eq!( staking_events_since_last_call(), @@ -3475,13 +3474,11 @@ fn non_slashable_offence_disables_validator() { fraction: Perbill::from_percent(0), slash_era: 1 }, - Event::ValidatorDisabled { stash: 11 }, Event::SlashReported { validator: 21, fraction: Perbill::from_percent(25), slash_era: 1 }, - Event::ValidatorDisabled { stash: 21 }, Event::Slashed { staker: 21, amount: 250 }, Event::Slashed { staker: 101, amount: 94 } ] @@ -3504,12 +3501,11 @@ fn slashing_independent_of_disabling_validator() { mock::start_active_era(1); assert_eq_uvec!(Session::validators(), vec![11, 21, 31, 41, 51]); - let exposure_11 = Staking::eras_stakers(ActiveEra::::get().unwrap().index, &11); - let exposure_21 = Staking::eras_stakers(ActiveEra::::get().unwrap().index, &21); + let exposure_11 = Staking::eras_stakers(Staking::active_era().unwrap().index, &11); + let exposure_21 = Staking::eras_stakers(Staking::active_era().unwrap().index, &21); - let now = ActiveEra::::get().unwrap().index; + let now = Staking::active_era().unwrap().index; - // --- Disable without a slash --- // offence with no slash associated on_offence_in_era( &[OffenceDetails { offender: (11, exposure_11.clone()), reporters: vec![] }], @@ -3518,20 +3514,9 @@ fn slashing_independent_of_disabling_validator() { ); // nomination remains untouched. - assert_eq!(Nominators::::get(101).unwrap().targets, vec![11, 21]); - - // first validator is disabled but not slashed - assert!(is_disabled(11)); - - // --- Slash without disabling --- - // offence that slashes 50% of the bond (setup for next slash) - on_offence_in_era( - &[OffenceDetails { offender: (11, exposure_11.clone()), reporters: vec![] }], - &[Perbill::from_percent(50)], - now, - ); + assert_eq!(Staking::nominators(101).unwrap().targets, vec![11, 21]); - // offence that slashes 25% of the bond but does not disable + // offence that slashes 25% of the bond on_offence_in_era( &[OffenceDetails { offender: (21, exposure_21.clone()), reporters: vec![] }], &[Perbill::from_percent(25)], @@ -3539,11 +3524,7 @@ fn slashing_independent_of_disabling_validator() { ); // nomination remains untouched. - assert_eq!(Nominators::::get(101).unwrap().targets, vec![11, 21]); - - // second validator is slashed but not disabled - assert!(!is_disabled(21)); - assert!(is_disabled(11)); + assert_eq!(Staking::nominators(101).unwrap().targets, vec![11, 21]); assert_eq!( staking_events_since_last_call(), @@ -3555,14 +3536,6 @@ fn slashing_independent_of_disabling_validator() { fraction: Perbill::from_percent(0), slash_era: 1 }, - Event::ValidatorDisabled { stash: 11 }, - Event::SlashReported { - validator: 11, - fraction: Perbill::from_percent(50), - slash_era: 1 - }, - Event::Slashed { staker: 11, amount: 500 }, - Event::Slashed { staker: 101, amount: 62 }, Event::SlashReported { validator: 21, fraction: Perbill::from_percent(25), @@ -3572,6 +3545,11 @@ fn slashing_independent_of_disabling_validator() { Event::Slashed { staker: 101, amount: 94 } ] ); + + // first validator is disabled but not slashed + assert!(is_disabled(11)); + // second validator is slashed but not disabled + assert!(!is_disabled(21)); }); } @@ -3585,7 +3563,7 @@ fn offence_threshold_doesnt_trigger_new_era() { assert_eq_uvec!(Session::validators(), vec![11, 21, 31, 41]); assert_eq!( - UpToLimitWithReEnablingDisablingStrategy::::disable_limit( + UpToLimitDisablingStrategy::::disable_limit( Session::validators().len() ), 1 @@ -3594,13 +3572,13 @@ fn offence_threshold_doesnt_trigger_new_era() { // we have 4 validators and an offending validator threshold of 1/3, // even if the third validator commits an offence a new era should not be forced - let exposure_11 = Staking::eras_stakers(ActiveEra::::get().unwrap().index, &11); - let exposure_21 = Staking::eras_stakers(ActiveEra::::get().unwrap().index, &21); - let exposure_31 = Staking::eras_stakers(ActiveEra::::get().unwrap().index, &31); + let exposure_11 = Staking::eras_stakers(Staking::active_era().unwrap().index, &11); + let exposure_21 = Staking::eras_stakers(Staking::active_era().unwrap().index, &21); + let exposure_31 = Staking::eras_stakers(Staking::active_era().unwrap().index, &31); on_offence_now( &[OffenceDetails { offender: (11, exposure_11.clone()), reporters: vec![] }], - &[Perbill::from_percent(50)], + &[Perbill::zero()], ); // 11 should be disabled because the byzantine threshold is 1 @@ -3644,8 +3622,8 @@ fn disabled_validators_are_kept_disabled_for_whole_era() { assert_eq_uvec!(Session::validators(), vec![11, 21, 31, 41, 51, 201, 202]); assert_eq!(::SessionsPerEra::get(), 3); - let exposure_11 = Staking::eras_stakers(ActiveEra::::get().unwrap().index, &11); - let exposure_21 = Staking::eras_stakers(ActiveEra::::get().unwrap().index, &21); + let exposure_11 = Staking::eras_stakers(Staking::active_era().unwrap().index, &11); + let exposure_21 = Staking::eras_stakers(Staking::active_era().unwrap().index, &21); on_offence_now( &[OffenceDetails { offender: (21, exposure_21.clone()), reporters: vec![] }], @@ -3653,7 +3631,7 @@ fn disabled_validators_are_kept_disabled_for_whole_era() { ); // nominations are not updated. - assert_eq!(Nominators::::get(101).unwrap().targets, vec![11, 21]); + assert_eq!(Staking::nominators(101).unwrap().targets, vec![11, 21]); // validator 21 gets disabled since it got slashed assert!(is_disabled(21)); @@ -3670,7 +3648,7 @@ fn disabled_validators_are_kept_disabled_for_whole_era() { ); // nominations are not updated. - assert_eq!(Nominators::::get(101).unwrap().targets, vec![11, 21]); + assert_eq!(Staking::nominators(101).unwrap().targets, vec![11, 21]); advance_session(); @@ -3735,7 +3713,7 @@ fn claim_reward_at_the_last_era_and_no_double_claim_and_invalid_claim() { let active_era = active_era(); // This is the latest planned era in staking, not the active era - let current_era = CurrentEra::::get().unwrap(); + let current_era = Staking::current_era().unwrap(); // Last kept is 1: assert!(current_era - HistoryDepth::get() == 1); @@ -3799,7 +3777,7 @@ fn zero_slash_keeps_nominators() { assert!(Validators::::iter().any(|(stash, _)| stash == 11)); assert!(is_disabled(11)); // and their nominations are kept. - assert_eq!(Nominators::::get(101).unwrap().targets, vec![11, 21]); + assert_eq!(Staking::nominators(101).unwrap().targets, vec![11, 21]); }); } @@ -3858,8 +3836,8 @@ fn six_session_delay() { assert_eq!(active_era(), init_active_era + 2); // That reward are correct - assert_eq!(ErasRewardPoints::::get(init_active_era).total, 1); - assert_eq!(ErasRewardPoints::::get(init_active_era + 1).total, 2); + assert_eq!(Staking::eras_reward_points(init_active_era).total, 1); + assert_eq!(Staking::eras_reward_points(init_active_era + 1).total, 2); }); } @@ -4104,7 +4082,7 @@ fn test_multi_page_payout_stakers_by_page() { } } - assert_eq!(ClaimedRewards::::get(14, &11), vec![0, 1]); + assert_eq!(Staking::claimed_rewards(14, &11), vec![0, 1]); let last_era = 99; let history_depth = HistoryDepth::get(); @@ -4119,7 +4097,7 @@ fn test_multi_page_payout_stakers_by_page() { // verify we clean up history as we go for era in 0..15 { - assert_eq!(ClaimedRewards::::get(era, &11), Vec::::new()); + assert_eq!(Staking::claimed_rewards(era, &11), Vec::::new()); } // verify only page 0 is marked as claimed @@ -4129,7 +4107,7 @@ fn test_multi_page_payout_stakers_by_page() { first_claimable_reward_era, 0 )); - assert_eq!(ClaimedRewards::::get(first_claimable_reward_era, &11), vec![0]); + assert_eq!(Staking::claimed_rewards(first_claimable_reward_era, &11), vec![0]); // verify page 0 and 1 are marked as claimed assert_ok!(Staking::payout_stakers_by_page( @@ -4138,7 +4116,7 @@ fn test_multi_page_payout_stakers_by_page() { first_claimable_reward_era, 1 )); - assert_eq!(ClaimedRewards::::get(first_claimable_reward_era, &11), vec![0, 1]); + assert_eq!(Staking::claimed_rewards(first_claimable_reward_era, &11), vec![0, 1]); // verify only page 0 is marked as claimed assert_ok!(Staking::payout_stakers_by_page( @@ -4147,7 +4125,7 @@ fn test_multi_page_payout_stakers_by_page() { last_reward_era, 0 )); - assert_eq!(ClaimedRewards::::get(last_reward_era, &11), vec![0]); + assert_eq!(Staking::claimed_rewards(last_reward_era, &11), vec![0]); // verify page 0 and 1 are marked as claimed assert_ok!(Staking::payout_stakers_by_page( @@ -4156,15 +4134,15 @@ fn test_multi_page_payout_stakers_by_page() { last_reward_era, 1 )); - assert_eq!(ClaimedRewards::::get(last_reward_era, &11), vec![0, 1]); + assert_eq!(Staking::claimed_rewards(last_reward_era, &11), vec![0, 1]); // Out of order claims works. assert_ok!(Staking::payout_stakers_by_page(RuntimeOrigin::signed(1337), 11, 69, 0)); - assert_eq!(ClaimedRewards::::get(69, &11), vec![0]); + assert_eq!(Staking::claimed_rewards(69, &11), vec![0]); assert_ok!(Staking::payout_stakers_by_page(RuntimeOrigin::signed(1337), 11, 23, 1)); - assert_eq!(ClaimedRewards::::get(23, &11), vec![1]); + assert_eq!(Staking::claimed_rewards(23, &11), vec![1]); assert_ok!(Staking::payout_stakers_by_page(RuntimeOrigin::signed(1337), 11, 42, 0)); - assert_eq!(ClaimedRewards::::get(42, &11), vec![0]); + assert_eq!(Staking::claimed_rewards(42, &11), vec![0]); }); } @@ -4315,7 +4293,7 @@ fn test_multi_page_payout_stakers_backward_compatible() { } } - assert_eq!(ClaimedRewards::::get(14, &11), vec![0, 1]); + assert_eq!(Staking::claimed_rewards(14, &11), vec![0, 1]); let last_era = 99; let history_depth = HistoryDepth::get(); @@ -4330,7 +4308,7 @@ fn test_multi_page_payout_stakers_backward_compatible() { // verify we clean up history as we go for era in 0..15 { - assert_eq!(ClaimedRewards::::get(era, &11), Vec::::new()); + assert_eq!(Staking::claimed_rewards(era, &11), Vec::::new()); } // verify only page 0 is marked as claimed @@ -4339,7 +4317,7 @@ fn test_multi_page_payout_stakers_backward_compatible() { 11, first_claimable_reward_era )); - assert_eq!(ClaimedRewards::::get(first_claimable_reward_era, &11), vec![0]); + assert_eq!(Staking::claimed_rewards(first_claimable_reward_era, &11), vec![0]); // verify page 0 and 1 are marked as claimed assert_ok!(Staking::payout_stakers( @@ -4347,7 +4325,7 @@ fn test_multi_page_payout_stakers_backward_compatible() { 11, first_claimable_reward_era, )); - assert_eq!(ClaimedRewards::::get(first_claimable_reward_era, &11), vec![0, 1]); + assert_eq!(Staking::claimed_rewards(first_claimable_reward_era, &11), vec![0, 1]); // change order and verify only page 1 is marked as claimed assert_ok!(Staking::payout_stakers_by_page( @@ -4356,12 +4334,12 @@ fn test_multi_page_payout_stakers_backward_compatible() { last_reward_era, 1 )); - assert_eq!(ClaimedRewards::::get(last_reward_era, &11), vec![1]); + assert_eq!(Staking::claimed_rewards(last_reward_era, &11), vec![1]); // verify page 0 is claimed even when explicit page is not passed assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, last_reward_era,)); - assert_eq!(ClaimedRewards::::get(last_reward_era, &11), vec![1, 0]); + assert_eq!(Staking::claimed_rewards(last_reward_era, &11), vec![1, 0]); // cannot claim any more pages assert_noop!( @@ -4385,10 +4363,10 @@ fn test_multi_page_payout_stakers_backward_compatible() { // Out of order claims works. assert_ok!(Staking::payout_stakers_by_page(RuntimeOrigin::signed(1337), 11, test_era, 2)); - assert_eq!(ClaimedRewards::::get(test_era, &11), vec![2]); + assert_eq!(Staking::claimed_rewards(test_era, &11), vec![2]); assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, test_era)); - assert_eq!(ClaimedRewards::::get(test_era, &11), vec![2, 0]); + assert_eq!(Staking::claimed_rewards(test_era, &11), vec![2, 0]); // cannot claim page 2 again assert_noop!( @@ -4397,10 +4375,10 @@ fn test_multi_page_payout_stakers_backward_compatible() { ); assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, test_era)); - assert_eq!(ClaimedRewards::::get(test_era, &11), vec![2, 0, 1]); + assert_eq!(Staking::claimed_rewards(test_era, &11), vec![2, 0, 1]); assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, test_era)); - assert_eq!(ClaimedRewards::::get(test_era, &11), vec![2, 0, 1, 3]); + assert_eq!(Staking::claimed_rewards(test_era, &11), vec![2, 0, 1, 3]); }); } @@ -7031,8 +7009,7 @@ mod staking_interface { Error::::IncorrectSlashingSpans ); - let num_slashing_spans = - SlashingSpans::::get(&11).map_or(0, |s| s.iter().count()); + let num_slashing_spans = Staking::slashing_spans(&11).map_or(0, |s| s.iter().count()); assert_ok!(Staking::withdraw_unbonded( RuntimeOrigin::signed(11), num_slashing_spans as u32 @@ -8299,14 +8276,11 @@ mod byzantine_threshold_disabling_strategy { use crate::{ tests::Test, ActiveEra, ActiveEraInfo, DisablingStrategy, UpToLimitDisablingStrategy, }; - use sp_runtime::Perbill; - use sp_staking::{offence::OffenceSeverity, EraIndex}; + use sp_staking::EraIndex; // Common test data - the stash of the offending validator, the era of the offence and the // active set const OFFENDER_ID: ::AccountId = 7; - const MAX_OFFENDER_SEVERITY: OffenceSeverity = OffenceSeverity(Perbill::from_percent(100)); - const MIN_OFFENDER_SEVERITY: OffenceSeverity = OffenceSeverity(Perbill::from_percent(0)); const SLASH_ERA: EraIndex = 1; const ACTIVE_SET: [::ValidatorId; 7] = [1, 2, 3, 4, 5, 6, 7]; const OFFENDER_VALIDATOR_IDX: u32 = 6; // the offender is with index 6 in the active set @@ -8318,766 +8292,48 @@ mod byzantine_threshold_disabling_strategy { pallet_session::Validators::::put(ACTIVE_SET.to_vec()); ActiveEra::::put(ActiveEraInfo { index: 2, start: None }); - let disabling_decision = + let disable_offender = >::decision( &OFFENDER_ID, - MAX_OFFENDER_SEVERITY, SLASH_ERA, &initially_disabled, ); - assert!(disabling_decision.disable.is_none() && disabling_decision.reenable.is_none()); + assert!(disable_offender.is_none()); }); } #[test] fn dont_disable_beyond_byzantine_threshold() { sp_io::TestExternalities::default().execute_with(|| { - let initially_disabled = vec![(1, MIN_OFFENDER_SEVERITY), (2, MAX_OFFENDER_SEVERITY)]; + let initially_disabled = vec![1, 2]; pallet_session::Validators::::put(ACTIVE_SET.to_vec()); - let disabling_decision = + let disable_offender = >::decision( &OFFENDER_ID, - MAX_OFFENDER_SEVERITY, SLASH_ERA, &initially_disabled, ); - assert!(disabling_decision.disable.is_none() && disabling_decision.reenable.is_none()); + assert!(disable_offender.is_none()); }); } #[test] fn disable_when_below_byzantine_threshold() { sp_io::TestExternalities::default().execute_with(|| { - let initially_disabled = vec![(1, MAX_OFFENDER_SEVERITY)]; + let initially_disabled = vec![1]; pallet_session::Validators::::put(ACTIVE_SET.to_vec()); - let disabling_decision = + let disable_offender = >::decision( &OFFENDER_ID, - MAX_OFFENDER_SEVERITY, SLASH_ERA, &initially_disabled, ); - assert_eq!(disabling_decision.disable, Some(OFFENDER_VALIDATOR_IDX)); - }); - } -} - -mod disabling_strategy_with_reenabling { - use crate::{ - tests::Test, ActiveEra, ActiveEraInfo, DisablingStrategy, - UpToLimitWithReEnablingDisablingStrategy, - }; - use sp_runtime::Perbill; - use sp_staking::{offence::OffenceSeverity, EraIndex}; - - // Common test data - the stash of the offending validator, the era of the offence and the - // active set - const OFFENDER_ID: ::AccountId = 7; - const MAX_OFFENDER_SEVERITY: OffenceSeverity = OffenceSeverity(Perbill::from_percent(100)); - const LOW_OFFENDER_SEVERITY: OffenceSeverity = OffenceSeverity(Perbill::from_percent(0)); - const SLASH_ERA: EraIndex = 1; - const ACTIVE_SET: [::ValidatorId; 7] = [1, 2, 3, 4, 5, 6, 7]; - const OFFENDER_VALIDATOR_IDX: u32 = 6; // the offender is with index 6 in the active set - - #[test] - fn dont_disable_for_ancient_offence() { - sp_io::TestExternalities::default().execute_with(|| { - let initially_disabled = vec![]; - pallet_session::Validators::::put(ACTIVE_SET.to_vec()); - ActiveEra::::put(ActiveEraInfo { index: 2, start: None }); - - let disabling_decision = - >::decision( - &OFFENDER_ID, - MAX_OFFENDER_SEVERITY, - SLASH_ERA, - &initially_disabled, - ); - - assert!(disabling_decision.disable.is_none() && disabling_decision.reenable.is_none()); - }); - } - - #[test] - fn disable_when_below_byzantine_threshold() { - sp_io::TestExternalities::default().execute_with(|| { - let initially_disabled = vec![(0, MAX_OFFENDER_SEVERITY)]; - pallet_session::Validators::::put(ACTIVE_SET.to_vec()); - - let disabling_decision = - >::decision( - &OFFENDER_ID, - MAX_OFFENDER_SEVERITY, - SLASH_ERA, - &initially_disabled, - ); - - // Disable Offender and do not re-enable anyone - assert_eq!(disabling_decision.disable, Some(OFFENDER_VALIDATOR_IDX)); - assert_eq!(disabling_decision.reenable, None); - }); - } - - #[test] - fn reenable_arbitrary_on_equal_severity() { - sp_io::TestExternalities::default().execute_with(|| { - let initially_disabled = vec![(0, MAX_OFFENDER_SEVERITY), (1, MAX_OFFENDER_SEVERITY)]; - pallet_session::Validators::::put(ACTIVE_SET.to_vec()); - - let disabling_decision = - >::decision( - &OFFENDER_ID, - MAX_OFFENDER_SEVERITY, - SLASH_ERA, - &initially_disabled, - ); - - assert!(disabling_decision.disable.is_some() && disabling_decision.reenable.is_some()); - // Disable 7 and enable 1 - assert_eq!(disabling_decision.disable.unwrap(), OFFENDER_VALIDATOR_IDX); - assert_eq!(disabling_decision.reenable.unwrap(), 0); - }); - } - - #[test] - fn do_not_reenable_higher_offenders() { - sp_io::TestExternalities::default().execute_with(|| { - let initially_disabled = vec![(0, MAX_OFFENDER_SEVERITY), (1, MAX_OFFENDER_SEVERITY)]; - pallet_session::Validators::::put(ACTIVE_SET.to_vec()); - - let disabling_decision = - >::decision( - &OFFENDER_ID, - LOW_OFFENDER_SEVERITY, - SLASH_ERA, - &initially_disabled, - ); - - assert!(disabling_decision.disable.is_none() && disabling_decision.reenable.is_none()); - }); - } - - #[test] - fn reenable_lower_offenders() { - sp_io::TestExternalities::default().execute_with(|| { - let initially_disabled = vec![(0, LOW_OFFENDER_SEVERITY), (1, LOW_OFFENDER_SEVERITY)]; - pallet_session::Validators::::put(ACTIVE_SET.to_vec()); - - let disabling_decision = - >::decision( - &OFFENDER_ID, - MAX_OFFENDER_SEVERITY, - SLASH_ERA, - &initially_disabled, - ); - - assert!(disabling_decision.disable.is_some() && disabling_decision.reenable.is_some()); - // Disable 7 and enable 1 - assert_eq!(disabling_decision.disable.unwrap(), OFFENDER_VALIDATOR_IDX); - assert_eq!(disabling_decision.reenable.unwrap(), 0); - }); - } - - #[test] - fn reenable_lower_offenders_unordered() { - sp_io::TestExternalities::default().execute_with(|| { - let initially_disabled = vec![(0, MAX_OFFENDER_SEVERITY), (1, LOW_OFFENDER_SEVERITY)]; - pallet_session::Validators::::put(ACTIVE_SET.to_vec()); - - let disabling_decision = - >::decision( - &OFFENDER_ID, - MAX_OFFENDER_SEVERITY, - SLASH_ERA, - &initially_disabled, - ); - - assert!(disabling_decision.disable.is_some() && disabling_decision.reenable.is_some()); - // Disable 7 and enable 1 - assert_eq!(disabling_decision.disable.unwrap(), OFFENDER_VALIDATOR_IDX); - assert_eq!(disabling_decision.reenable.unwrap(), 1); - }); - } - - #[test] - fn update_severity() { - sp_io::TestExternalities::default().execute_with(|| { - let initially_disabled = - vec![(OFFENDER_VALIDATOR_IDX, LOW_OFFENDER_SEVERITY), (0, MAX_OFFENDER_SEVERITY)]; - pallet_session::Validators::::put(ACTIVE_SET.to_vec()); - - let disabling_decision = - >::decision( - &OFFENDER_ID, - MAX_OFFENDER_SEVERITY, - SLASH_ERA, - &initially_disabled, - ); - - assert!(disabling_decision.disable.is_some() && disabling_decision.reenable.is_none()); - // Disable 7 "again" AKA update their severity - assert_eq!(disabling_decision.disable.unwrap(), OFFENDER_VALIDATOR_IDX); - }); - } - - #[test] - fn update_cannot_lower_severity() { - sp_io::TestExternalities::default().execute_with(|| { - let initially_disabled = - vec![(OFFENDER_VALIDATOR_IDX, MAX_OFFENDER_SEVERITY), (0, MAX_OFFENDER_SEVERITY)]; - pallet_session::Validators::::put(ACTIVE_SET.to_vec()); - - let disabling_decision = - >::decision( - &OFFENDER_ID, - LOW_OFFENDER_SEVERITY, - SLASH_ERA, - &initially_disabled, - ); - - assert!(disabling_decision.disable.is_none() && disabling_decision.reenable.is_none()); - }); - } - - #[test] - fn no_accidental_reenablement_on_repeated_offence() { - sp_io::TestExternalities::default().execute_with(|| { - let initially_disabled = - vec![(OFFENDER_VALIDATOR_IDX, MAX_OFFENDER_SEVERITY), (0, LOW_OFFENDER_SEVERITY)]; - pallet_session::Validators::::put(ACTIVE_SET.to_vec()); - - let disabling_decision = - >::decision( - &OFFENDER_ID, - MAX_OFFENDER_SEVERITY, - SLASH_ERA, - &initially_disabled, - ); - - assert!(disabling_decision.disable.is_none() && disabling_decision.reenable.is_none()); - }); - } -} - -#[test] -fn reenable_lower_offenders_mock() { - ExtBuilder::default() - .validator_count(7) - .set_status(41, StakerStatus::Validator) - .set_status(51, StakerStatus::Validator) - .set_status(201, StakerStatus::Validator) - .set_status(202, StakerStatus::Validator) - .build_and_execute(|| { - mock::start_active_era(1); - assert_eq_uvec!(Session::validators(), vec![11, 21, 31, 41, 51, 201, 202]); - - let exposure_11 = Staking::eras_stakers(Staking::active_era().unwrap().index, &11); - let exposure_21 = Staking::eras_stakers(Staking::active_era().unwrap().index, &21); - let exposure_31 = Staking::eras_stakers(Staking::active_era().unwrap().index, &31); - - // offence with a low slash - on_offence_now( - &[OffenceDetails { offender: (11, exposure_11.clone()), reporters: vec![] }], - &[Perbill::from_percent(10)], - ); - on_offence_now( - &[OffenceDetails { offender: (21, exposure_21.clone()), reporters: vec![] }], - &[Perbill::from_percent(20)], - ); - - // it does NOT affect the nominator. - assert_eq!(Staking::nominators(101).unwrap().targets, vec![11, 21]); - - // both validators should be disabled - assert!(is_disabled(11)); - assert!(is_disabled(21)); - - // offence with a higher slash - on_offence_now( - &[OffenceDetails { offender: (31, exposure_31.clone()), reporters: vec![] }], - &[Perbill::from_percent(50)], - ); - - // First offender is no longer disabled - assert!(!is_disabled(11)); - // Mid offender is still disabled - assert!(is_disabled(21)); - // New offender is disabled - assert!(is_disabled(31)); - - assert_eq!( - staking_events_since_last_call(), - vec![ - Event::StakersElected, - Event::EraPaid { era_index: 0, validator_payout: 11075, remainder: 33225 }, - Event::SlashReported { - validator: 11, - fraction: Perbill::from_percent(10), - slash_era: 1 - }, - Event::ValidatorDisabled { stash: 11 }, - Event::Slashed { staker: 11, amount: 100 }, - Event::Slashed { staker: 101, amount: 12 }, - Event::SlashReported { - validator: 21, - fraction: Perbill::from_percent(20), - slash_era: 1 - }, - Event::ValidatorDisabled { stash: 21 }, - Event::Slashed { staker: 21, amount: 200 }, - Event::Slashed { staker: 101, amount: 75 }, - Event::SlashReported { - validator: 31, - fraction: Perbill::from_percent(50), - slash_era: 1 - }, - Event::ValidatorDisabled { stash: 31 }, - Event::ValidatorReenabled { stash: 11 }, - Event::Slashed { staker: 31, amount: 250 }, - ] - ); - }); -} - -#[test] -fn do_not_reenable_higher_offenders_mock() { - ExtBuilder::default() - .validator_count(7) - .set_status(41, StakerStatus::Validator) - .set_status(51, StakerStatus::Validator) - .set_status(201, StakerStatus::Validator) - .set_status(202, StakerStatus::Validator) - .build_and_execute(|| { - mock::start_active_era(1); - assert_eq_uvec!(Session::validators(), vec![11, 21, 31, 41, 51, 201, 202]); - - let exposure_11 = Staking::eras_stakers(Staking::active_era().unwrap().index, &11); - let exposure_21 = Staking::eras_stakers(Staking::active_era().unwrap().index, &21); - let exposure_31 = Staking::eras_stakers(Staking::active_era().unwrap().index, &31); - - // offence with a major slash - on_offence_now( - &[OffenceDetails { offender: (11, exposure_11.clone()), reporters: vec![] }], - &[Perbill::from_percent(50)], - ); - on_offence_now( - &[OffenceDetails { offender: (21, exposure_21.clone()), reporters: vec![] }], - &[Perbill::from_percent(50)], - ); - - // both validators should be disabled - assert!(is_disabled(11)); - assert!(is_disabled(21)); - - // offence with a minor slash - on_offence_now( - &[OffenceDetails { offender: (31, exposure_31.clone()), reporters: vec![] }], - &[Perbill::from_percent(10)], - ); - - // First and second offenders are still disabled - assert!(is_disabled(11)); - assert!(is_disabled(21)); - // New offender is not disabled as limit is reached and his prio is lower - assert!(!is_disabled(31)); - - assert_eq!( - staking_events_since_last_call(), - vec![ - Event::StakersElected, - Event::EraPaid { era_index: 0, validator_payout: 11075, remainder: 33225 }, - Event::SlashReported { - validator: 11, - fraction: Perbill::from_percent(50), - slash_era: 1 - }, - Event::ValidatorDisabled { stash: 11 }, - Event::Slashed { staker: 11, amount: 500 }, - Event::Slashed { staker: 101, amount: 62 }, - Event::SlashReported { - validator: 21, - fraction: Perbill::from_percent(50), - slash_era: 1 - }, - Event::ValidatorDisabled { stash: 21 }, - Event::Slashed { staker: 21, amount: 500 }, - Event::Slashed { staker: 101, amount: 187 }, - Event::SlashReported { - validator: 31, - fraction: Perbill::from_percent(10), - slash_era: 1 - }, - Event::Slashed { staker: 31, amount: 50 }, - ] - ); - }); -} - -#[cfg(all(feature = "try-runtime", test))] -mod migration_tests { - use super::*; - use frame_support::traits::UncheckedOnRuntimeUpgrade; - use migrations::{v15, v16}; - - #[test] - fn migrate_v15_to_v16_with_try_runtime() { - ExtBuilder::default().validator_count(7).build_and_execute(|| { - // Initial setup: Create old `DisabledValidators` in the form of `Vec` - let old_disabled_validators = vec![1u32, 2u32]; - v15::DisabledValidators::::put(old_disabled_validators.clone()); - - // Run pre-upgrade checks - let pre_upgrade_result = v16::VersionUncheckedMigrateV15ToV16::::pre_upgrade(); - assert!(pre_upgrade_result.is_ok()); - let pre_upgrade_state = pre_upgrade_result.unwrap(); - - // Run the migration - v16::VersionUncheckedMigrateV15ToV16::::on_runtime_upgrade(); - - // Run post-upgrade checks - let post_upgrade_result = - v16::VersionUncheckedMigrateV15ToV16::::post_upgrade(pre_upgrade_state); - assert!(post_upgrade_result.is_ok()); - }); - } -} - -mod getters { - use crate::{ - mock::{self}, - pallet::pallet::{Invulnerables, MinimumValidatorCount, ValidatorCount}, - slashing, - tests::{Staking, Test}, - ActiveEra, ActiveEraInfo, BalanceOf, CanceledSlashPayout, ClaimedRewards, CurrentEra, - CurrentPlannedSession, EraRewardPoints, ErasRewardPoints, ErasStakersClipped, - ErasStartSessionIndex, ErasTotalStake, ErasValidatorPrefs, ErasValidatorReward, ForceEra, - Forcing, Nominations, Nominators, Perbill, SlashRewardFraction, SlashingSpans, - ValidatorPrefs, Validators, - }; - use sp_staking::{EraIndex, Exposure, IndividualExposure, Page, SessionIndex}; - - #[test] - fn get_validator_count_returns_value_from_storage() { - sp_io::TestExternalities::default().execute_with(|| { - // given - let v: u32 = 12; - ValidatorCount::::put(v); - - // when - let result = Staking::validator_count(); - - // then - assert_eq!(result, v); - }); - } - - #[test] - fn get_minimum_validator_count_returns_value_from_storage() { - sp_io::TestExternalities::default().execute_with(|| { - // given - let v: u32 = 12; - MinimumValidatorCount::::put(v); - - // when - let result = Staking::minimum_validator_count(); - - // then - assert_eq!(result, v); - }); - } - - #[test] - fn get_invulnerables_returns_value_from_storage() { - sp_io::TestExternalities::default().execute_with(|| { - // given - let v: Vec = vec![1, 2, 3]; - Invulnerables::::put(v.clone()); - - // when - let result = Staking::invulnerables(); - - // then - assert_eq!(result, v); - }); - } - - #[test] - fn get_validators_returns_value_from_storage() { - sp_io::TestExternalities::default().execute_with(|| { - // given - let account_id: mock::AccountId = 1; - let validator_prefs = ValidatorPrefs::default(); - - Validators::::insert(account_id, validator_prefs.clone()); - - // when - let result = Staking::validators(&account_id); - - // then - assert_eq!(result, validator_prefs); - }); - } - - #[test] - fn get_nominators_returns_value_from_storage() { - sp_io::TestExternalities::default().execute_with(|| { - // given - let account_id: mock::AccountId = 1; - let nominations: Nominations = Nominations { - targets: Default::default(), - submitted_in: Default::default(), - suppressed: false, - }; - - Nominators::::insert(account_id, nominations.clone()); - - // when - let result = Staking::nominators(account_id); - - // then - assert_eq!(result, Some(nominations)); - }); - } - - #[test] - fn get_current_era_returns_value_from_storage() { - sp_io::TestExternalities::default().execute_with(|| { - // given - let era: EraIndex = 12; - CurrentEra::::put(era); - - // when - let result = Staking::current_era(); - - // then - assert_eq!(result, Some(era)); - }); - } - - #[test] - fn get_active_era_returns_value_from_storage() { - sp_io::TestExternalities::default().execute_with(|| { - // given - let era = ActiveEraInfo { index: 2, start: None }; - ActiveEra::::put(era); - - // when - let result: Option = Staking::active_era(); - - // then - if let Some(era_info) = result { - assert_eq!(era_info.index, 2); - assert_eq!(era_info.start, None); - } else { - panic!("Expected Some(era_info), got None"); - }; - }); - } - - #[test] - fn get_eras_start_session_index_returns_value_from_storage() { - sp_io::TestExternalities::default().execute_with(|| { - // given - let era: EraIndex = 12; - let session_index: SessionIndex = 14; - ErasStartSessionIndex::::insert(era, session_index); - - // when - let result = Staking::eras_start_session_index(era); - - // then - assert_eq!(result, Some(session_index)); - }); - } - - #[test] - fn get_eras_stakers_clipped_returns_value_from_storage() { - sp_io::TestExternalities::default().execute_with(|| { - // given - let era: EraIndex = 12; - let account_id: mock::AccountId = 1; - let exposure: Exposure> = Exposure { - total: 1125, - own: 1000, - others: vec![IndividualExposure { who: 101, value: 125 }], - }; - ErasStakersClipped::::insert(era, account_id, exposure.clone()); - - // when - let result = Staking::eras_stakers_clipped(era, &account_id); - - // then - assert_eq!(result, exposure); - }); - } - - #[test] - fn get_claimed_rewards_returns_value_from_storage() { - sp_io::TestExternalities::default().execute_with(|| { - // given - let era: EraIndex = 12; - let account_id: mock::AccountId = 1; - let rewards = Vec::::new(); - ClaimedRewards::::insert(era, account_id, rewards.clone()); - - // when - let result = Staking::claimed_rewards(era, &account_id); - - // then - assert_eq!(result, rewards); - }); - } - - #[test] - fn get_eras_validator_prefs_returns_value_from_storage() { - sp_io::TestExternalities::default().execute_with(|| { - // given - let era: EraIndex = 12; - let account_id: mock::AccountId = 1; - let validator_prefs = ValidatorPrefs::default(); - - ErasValidatorPrefs::::insert(era, account_id, validator_prefs.clone()); - - // when - let result = Staking::eras_validator_prefs(era, &account_id); - - // then - assert_eq!(result, validator_prefs); - }); - } - - #[test] - fn get_eras_validator_reward_returns_value_from_storage() { - sp_io::TestExternalities::default().execute_with(|| { - // given - let era: EraIndex = 12; - let balance_of = BalanceOf::::default(); - - ErasValidatorReward::::insert(era, balance_of); - - // when - let result = Staking::eras_validator_reward(era); - - // then - assert_eq!(result, Some(balance_of)); - }); - } - - #[test] - fn get_eras_reward_points_returns_value_from_storage() { - sp_io::TestExternalities::default().execute_with(|| { - // given - let era: EraIndex = 12; - let reward_points = EraRewardPoints:: { - total: 1, - individual: vec![(11, 1)].into_iter().collect(), - }; - ErasRewardPoints::::insert(era, reward_points); - - // when - let result = Staking::eras_reward_points(era); - - // then - assert_eq!(result.total, 1); - }); - } - - #[test] - fn get_eras_total_stake_returns_value_from_storage() { - sp_io::TestExternalities::default().execute_with(|| { - // given - let era: EraIndex = 12; - let balance_of = BalanceOf::::default(); - - ErasTotalStake::::insert(era, balance_of); - - // when - let result = Staking::eras_total_stake(era); - - // then - assert_eq!(result, balance_of); - }); - } - - #[test] - fn get_force_era_returns_value_from_storage() { - sp_io::TestExternalities::default().execute_with(|| { - // given - let forcing = Forcing::NotForcing; - ForceEra::::put(forcing); - - // when - let result = Staking::force_era(); - - // then - assert_eq!(result, forcing); - }); - } - - #[test] - fn get_slash_reward_fraction_returns_value_from_storage() { - sp_io::TestExternalities::default().execute_with(|| { - // given - let perbill = Perbill::one(); - SlashRewardFraction::::put(perbill); - - // when - let result = Staking::slash_reward_fraction(); - - // then - assert_eq!(result, perbill); - }); - } - - #[test] - fn get_canceled_payout_returns_value_from_storage() { - sp_io::TestExternalities::default().execute_with(|| { - // given - let balance_of = BalanceOf::::default(); - CanceledSlashPayout::::put(balance_of); - - // when - let result = Staking::canceled_payout(); - - // then - assert_eq!(result, balance_of); - }); - } - - #[test] - fn get_slashing_spans_returns_value_from_storage() { - sp_io::TestExternalities::default().execute_with(|| { - // given - let account_id: mock::AccountId = 1; - let spans = slashing::SlashingSpans::new(2); - SlashingSpans::::insert(account_id, spans); - - // when - let result: Option = Staking::slashing_spans(&account_id); - - // then - // simple check so as not to add extra macros to slashing::SlashingSpans struct - assert!(result.is_some()); - }); - } - - #[test] - fn get_current_planned_session_returns_value_from_storage() { - sp_io::TestExternalities::default().execute_with(|| { - // given - let session_index = SessionIndex::default(); - CurrentPlannedSession::::put(session_index); - - // when - let result = Staking::current_planned_session(); - - // then - assert_eq!(result, session_index); + assert_eq!(disable_offender, Some(OFFENDER_VALIDATOR_IDX)); }); } } diff --git a/substrate/frame/staking/src/weights.rs b/substrate/frame/staking/src/weights.rs index 56f561679cfc..cd4e7f973ce3 100644 --- a/substrate/frame/staking/src/weights.rs +++ b/substrate/frame/staking/src/weights.rs @@ -1584,4 +1584,4 @@ impl WeightInfo for () { .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } -} \ No newline at end of file +} diff --git a/substrate/frame/state-trie-migration/Cargo.toml b/substrate/frame/state-trie-migration/Cargo.toml index 1f1f6fc5be3a..8c82bc38da97 100644 --- a/substrate/frame/state-trie-migration/Cargo.toml +++ b/substrate/frame/state-trie-migration/Cargo.toml @@ -16,25 +16,25 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { workspace = true } +log = { workspace = true } +scale-info = { features = ["derive"], workspace = true } +serde = { optional = true, workspace = true, default-features = true } +thousands = { optional = true, workspace = true } +zstd = { optional = true, workspace = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } -log = { workspace = true } remote-externalities = { optional = true, workspace = true, default-features = true } -scale-info = { features = ["derive"], workspace = true } -serde = { optional = true, workspace = true, default-features = true } sp-core = { workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } substrate-state-trie-migration-rpc = { optional = true, workspace = true, default-features = true } -thousands = { optional = true, workspace = true } -zstd = { optional = true, workspace = true } [dev-dependencies] -pallet-balances = { workspace = true, default-features = true } parking_lot = { workspace = true, default-features = true } -sp-tracing = { workspace = true, default-features = true } tokio = { features = ["macros"], workspace = true, default-features = true } +pallet-balances = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/frame/state-trie-migration/src/lib.rs b/substrate/frame/state-trie-migration/src/lib.rs index 61323b70b33d..3fe5abb81031 100644 --- a/substrate/frame/state-trie-migration/src/lib.rs +++ b/substrate/frame/state-trie-migration/src/lib.rs @@ -249,13 +249,13 @@ pub mod pallet { if limits.item.is_zero() || limits.size.is_zero() { // handle this minor edge case, else we would call `migrate_tick` at least once. log!(warn, "limits are zero. stopping"); - return Ok(()); + return Ok(()) } while !self.exhausted(limits) && !self.finished() { if let Err(e) = self.migrate_tick() { log!(error, "migrate_until_exhaustion failed: {:?}", e); - return Err(e); + return Err(e) } } @@ -332,7 +332,7 @@ pub mod pallet { _ => { // defensive: there must be an ongoing top migration. frame_support::defensive!("cannot migrate child key."); - return Ok(()); + return Ok(()) }, }; @@ -374,7 +374,7 @@ pub mod pallet { Progress::Complete => { // defensive: there must be an ongoing top migration. frame_support::defensive!("cannot migrate top key."); - return Ok(()); + return Ok(()) }, }; @@ -669,7 +669,7 @@ pub mod pallet { // ensure that the migration witness data was correct. if real_size_upper < task.dyn_size { Self::slash(who, deposit)?; - return Ok(().into()); + return Ok(().into()) } Self::deposit_event(Event::::Migrated { @@ -957,7 +957,6 @@ pub mod pallet { mod benchmarks { use super::{pallet::Pallet as StateTrieMigration, *}; use alloc::vec; - use frame_benchmarking::v2::*; use frame_support::traits::fungible::{Inspect, Mutate}; // The size of the key seemingly makes no difference in the read/write time, so we make it @@ -971,12 +970,8 @@ mod benchmarks { stash } - #[benchmarks] - mod inner_benchmarks { - use super::*; - - #[benchmark] - fn continue_migrate() -> Result<(), BenchmarkError> { + frame_benchmarking::benchmarks! { + continue_migrate { // note that this benchmark should migrate nothing, as we only want the overhead weight // of the bookkeeping, and the migration cost itself is noted via the `dynamic_weight` // function. @@ -985,151 +980,116 @@ mod benchmarks { let stash = set_balance_for_deposit::(&caller, null.item); // Allow signed migrations. SignedMigrationMaxLimits::::put(MigrationLimits { size: 1024, item: 5 }); - - #[extrinsic_call] - _( - frame_system::RawOrigin::Signed(caller.clone()), - null, - 0, - StateTrieMigration::::migration_process(), - ); - + }: _(frame_system::RawOrigin::Signed(caller.clone()), null, 0, StateTrieMigration::::migration_process()) + verify { assert_eq!(StateTrieMigration::::migration_process(), Default::default()); - assert_eq!(T::Currency::balance(&caller), stash); - - Ok(()) + assert_eq!(T::Currency::balance(&caller), stash) } - #[benchmark] - fn continue_migrate_wrong_witness() -> Result<(), BenchmarkError> { + continue_migrate_wrong_witness { let null = MigrationLimits::default(); let caller = frame_benchmarking::whitelisted_caller(); - let bad_witness = MigrationTask { - progress_top: Progress::LastKey(vec![1u8].try_into().unwrap()), - ..Default::default() - }; - #[block] - { - assert!(StateTrieMigration::::continue_migrate( + let bad_witness = MigrationTask { progress_top: Progress::LastKey(vec![1u8].try_into().unwrap()), ..Default::default() }; + }: { + assert!( + StateTrieMigration::::continue_migrate( frame_system::RawOrigin::Signed(caller).into(), null, 0, bad_witness, ) - .is_err()); - } - - assert_eq!(StateTrieMigration::::migration_process(), Default::default()); - - Ok(()) + .is_err() + ) + } + verify { + assert_eq!(StateTrieMigration::::migration_process(), Default::default()) } - #[benchmark] - fn migrate_custom_top_success() -> Result<(), BenchmarkError> { + migrate_custom_top_success { let null = MigrationLimits::default(); let caller: T::AccountId = frame_benchmarking::whitelisted_caller(); let stash = set_balance_for_deposit::(&caller, null.item); - #[extrinsic_call] - migrate_custom_top( - frame_system::RawOrigin::Signed(caller.clone()), - Default::default(), - 0, - ); - + }: migrate_custom_top(frame_system::RawOrigin::Signed(caller.clone()), Default::default(), 0) + verify { assert_eq!(StateTrieMigration::::migration_process(), Default::default()); - assert_eq!(T::Currency::balance(&caller), stash); - Ok(()) + assert_eq!(T::Currency::balance(&caller), stash) } - #[benchmark] - fn migrate_custom_top_fail() -> Result<(), BenchmarkError> { + migrate_custom_top_fail { let null = MigrationLimits::default(); let caller: T::AccountId = frame_benchmarking::whitelisted_caller(); let stash = set_balance_for_deposit::(&caller, null.item); // for tests, we need to make sure there is _something_ in storage that is being // migrated. - sp_io::storage::set(b"foo", vec![1u8; 33].as_ref()); - #[block] - { - assert!(StateTrieMigration::::migrate_custom_top( + sp_io::storage::set(b"foo", vec![1u8;33].as_ref()); + }: { + assert!( + StateTrieMigration::::migrate_custom_top( frame_system::RawOrigin::Signed(caller.clone()).into(), vec![b"foo".to_vec()], 1, - ) - .is_ok()); - - frame_system::Pallet::::assert_last_event( - ::RuntimeEvent::from(crate::Event::Slashed { - who: caller.clone(), - amount: StateTrieMigration::::calculate_deposit_for(1u32), - }) - .into(), - ); - } + ).is_ok() + ); + frame_system::Pallet::::assert_last_event( + ::RuntimeEvent::from(crate::Event::Slashed { + who: caller.clone(), + amount: StateTrieMigration::::calculate_deposit_for(1u32), + }).into(), + ); + } + verify { assert_eq!(StateTrieMigration::::migration_process(), Default::default()); // must have gotten slashed - assert!(T::Currency::balance(&caller) < stash); - - Ok(()) + assert!(T::Currency::balance(&caller) < stash) } - #[benchmark] - fn migrate_custom_child_success() -> Result<(), BenchmarkError> { + migrate_custom_child_success { let caller: T::AccountId = frame_benchmarking::whitelisted_caller(); let stash = set_balance_for_deposit::(&caller, 0); - - #[extrinsic_call] - migrate_custom_child( - frame_system::RawOrigin::Signed(caller.clone()), - StateTrieMigration::::childify(Default::default()), - Default::default(), - 0, - ); - + }: migrate_custom_child( + frame_system::RawOrigin::Signed(caller.clone()), + StateTrieMigration::::childify(Default::default()), + Default::default(), + 0 + ) + verify { assert_eq!(StateTrieMigration::::migration_process(), Default::default()); assert_eq!(T::Currency::balance(&caller), stash); - - Ok(()) } - #[benchmark] - fn migrate_custom_child_fail() -> Result<(), BenchmarkError> { + migrate_custom_child_fail { let caller: T::AccountId = frame_benchmarking::whitelisted_caller(); let stash = set_balance_for_deposit::(&caller, 1); // for tests, we need to make sure there is _something_ in storage that is being // migrated. - sp_io::default_child_storage::set(b"top", b"foo", vec![1u8; 33].as_ref()); - - #[block] - { - assert!(StateTrieMigration::::migrate_custom_child( + sp_io::default_child_storage::set(b"top", b"foo", vec![1u8;33].as_ref()); + }: { + assert!( + StateTrieMigration::::migrate_custom_child( frame_system::RawOrigin::Signed(caller.clone()).into(), StateTrieMigration::::childify("top"), vec![b"foo".to_vec()], 1, - ) - .is_ok()); - } + ).is_ok() + ) + } + verify { assert_eq!(StateTrieMigration::::migration_process(), Default::default()); // must have gotten slashed - assert!(T::Currency::balance(&caller) < stash); - Ok(()) + assert!(T::Currency::balance(&caller) < stash) } - #[benchmark] - fn process_top_key(v: Linear<1, { 4 * 1024 * 1024 }>) -> Result<(), BenchmarkError> { + process_top_key { + let v in 1 .. (4 * 1024 * 1024); + let value = alloc::vec![1u8; v as usize]; sp_io::storage::set(KEY, &value); - #[block] - { - let data = sp_io::storage::get(KEY).unwrap(); - sp_io::storage::set(KEY, &data); - let _next = sp_io::storage::next_key(KEY); - assert_eq!(data, value); - } - - Ok(()) + }: { + let data = sp_io::storage::get(KEY).unwrap(); + sp_io::storage::set(KEY, &data); + let _next = sp_io::storage::next_key(KEY); + assert_eq!(data, value); } impl_benchmark_test_suite!( @@ -1781,7 +1741,7 @@ pub(crate) mod remote_tests { let ((finished, weight), proof) = ext.execute_and_prove(|| { let weight = run_to_block::(now + One::one()).1; if StateTrieMigration::::migration_process().finished() { - return (true, weight); + return (true, weight) } duration += One::one(); now += One::one(); @@ -1808,7 +1768,7 @@ pub(crate) mod remote_tests { ext.commit_all().unwrap(); if finished { - break; + break } } diff --git a/substrate/frame/state-trie-migration/src/weights.rs b/substrate/frame/state-trie-migration/src/weights.rs index 478960392bca..ddc9236f7af6 100644 --- a/substrate/frame/state-trie-migration/src/weights.rs +++ b/substrate/frame/state-trie-migration/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_state_trie_migration` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-04-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -66,15 +66,15 @@ impl WeightInfo for SubstrateWeight { /// Storage: `StateTrieMigration::SignedMigrationMaxLimits` (r:1 w:0) /// Proof: `StateTrieMigration::SignedMigrationMaxLimits` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:0) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) /// Storage: `StateTrieMigration::MigrationProcess` (r:1 w:1) /// Proof: `StateTrieMigration::MigrationProcess` (`max_values`: Some(1), `max_size`: Some(1042), added: 1537, mode: `MaxEncodedLen`) fn continue_migrate() -> Weight { // Proof Size summary in bytes: // Measured: `108` - // Estimated: `3820` - // Minimum execution time: 19_111_000 picoseconds. - Weight::from_parts(19_611_000, 3820) + // Estimated: `3658` + // Minimum execution time: 18_293_000 picoseconds. + Weight::from_parts(18_577_000, 3658) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -84,53 +84,53 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `76` // Estimated: `1493` - // Minimum execution time: 4_751_000 picoseconds. - Weight::from_parts(5_052_000, 1493) + // Minimum execution time: 4_240_000 picoseconds. + Weight::from_parts(4_369_000, 1493) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: `Balances::Holds` (r:1 w:0) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) fn migrate_custom_top_success() -> Weight { // Proof Size summary in bytes: // Measured: `0` - // Estimated: `3820` - // Minimum execution time: 11_907_000 picoseconds. - Weight::from_parts(12_264_000, 3820) + // Estimated: `3658` + // Minimum execution time: 11_909_000 picoseconds. + Weight::from_parts(12_453_000, 3658) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) /// Storage: UNKNOWN KEY `0x666f6f` (r:1 w:1) /// Proof: UNKNOWN KEY `0x666f6f` (r:1 w:1) fn migrate_custom_top_fail() -> Weight { // Proof Size summary in bytes: // Measured: `113` - // Estimated: `3820` - // Minimum execution time: 68_089_000 picoseconds. - Weight::from_parts(68_998_000, 3820) + // Estimated: `3658` + // Minimum execution time: 65_631_000 picoseconds. + Weight::from_parts(66_506_000, 3658) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } /// Storage: `Balances::Holds` (r:1 w:0) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) fn migrate_custom_child_success() -> Weight { // Proof Size summary in bytes: // Measured: `0` - // Estimated: `3820` - // Minimum execution time: 12_021_000 picoseconds. - Weight::from_parts(12_466_000, 3820) + // Estimated: `3658` + // Minimum execution time: 12_208_000 picoseconds. + Weight::from_parts(12_690_000, 3658) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) /// Storage: UNKNOWN KEY `0x666f6f` (r:1 w:1) /// Proof: UNKNOWN KEY `0x666f6f` (r:1 w:1) fn migrate_custom_child_fail() -> Weight { // Proof Size summary in bytes: // Measured: `106` - // Estimated: `3820` - // Minimum execution time: 69_553_000 picoseconds. - Weight::from_parts(71_125_000, 3820) + // Estimated: `3658` + // Minimum execution time: 66_988_000 picoseconds. + Weight::from_parts(68_616_000, 3658) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -139,12 +139,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `v` is `[1, 4194304]`. fn process_top_key(v: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `192 + v * (1 ±0)` - // Estimated: `3657 + v * (1 ±0)` - // Minimum execution time: 5_418_000 picoseconds. - Weight::from_parts(5_526_000, 3657) - // Standard Error: 17 - .saturating_add(Weight::from_parts(1_914, 0).saturating_mul(v.into())) + // Measured: `197 + v * (1 ±0)` + // Estimated: `3662 + v * (1 ±0)` + // Minimum execution time: 5_365_000 picoseconds. + Weight::from_parts(5_460_000, 3662) + // Standard Error: 1 + .saturating_add(Weight::from_parts(1_150, 0).saturating_mul(v.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(v.into())) @@ -156,15 +156,15 @@ impl WeightInfo for () { /// Storage: `StateTrieMigration::SignedMigrationMaxLimits` (r:1 w:0) /// Proof: `StateTrieMigration::SignedMigrationMaxLimits` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:0) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) /// Storage: `StateTrieMigration::MigrationProcess` (r:1 w:1) /// Proof: `StateTrieMigration::MigrationProcess` (`max_values`: Some(1), `max_size`: Some(1042), added: 1537, mode: `MaxEncodedLen`) fn continue_migrate() -> Weight { // Proof Size summary in bytes: // Measured: `108` - // Estimated: `3820` - // Minimum execution time: 19_111_000 picoseconds. - Weight::from_parts(19_611_000, 3820) + // Estimated: `3658` + // Minimum execution time: 18_293_000 picoseconds. + Weight::from_parts(18_577_000, 3658) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -174,53 +174,53 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `76` // Estimated: `1493` - // Minimum execution time: 4_751_000 picoseconds. - Weight::from_parts(5_052_000, 1493) + // Minimum execution time: 4_240_000 picoseconds. + Weight::from_parts(4_369_000, 1493) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: `Balances::Holds` (r:1 w:0) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) fn migrate_custom_top_success() -> Weight { // Proof Size summary in bytes: // Measured: `0` - // Estimated: `3820` - // Minimum execution time: 11_907_000 picoseconds. - Weight::from_parts(12_264_000, 3820) + // Estimated: `3658` + // Minimum execution time: 11_909_000 picoseconds. + Weight::from_parts(12_453_000, 3658) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) /// Storage: UNKNOWN KEY `0x666f6f` (r:1 w:1) /// Proof: UNKNOWN KEY `0x666f6f` (r:1 w:1) fn migrate_custom_top_fail() -> Weight { // Proof Size summary in bytes: // Measured: `113` - // Estimated: `3820` - // Minimum execution time: 68_089_000 picoseconds. - Weight::from_parts(68_998_000, 3820) + // Estimated: `3658` + // Minimum execution time: 65_631_000 picoseconds. + Weight::from_parts(66_506_000, 3658) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } /// Storage: `Balances::Holds` (r:1 w:0) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) fn migrate_custom_child_success() -> Weight { // Proof Size summary in bytes: // Measured: `0` - // Estimated: `3820` - // Minimum execution time: 12_021_000 picoseconds. - Weight::from_parts(12_466_000, 3820) + // Estimated: `3658` + // Minimum execution time: 12_208_000 picoseconds. + Weight::from_parts(12_690_000, 3658) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) /// Storage: UNKNOWN KEY `0x666f6f` (r:1 w:1) /// Proof: UNKNOWN KEY `0x666f6f` (r:1 w:1) fn migrate_custom_child_fail() -> Weight { // Proof Size summary in bytes: // Measured: `106` - // Estimated: `3820` - // Minimum execution time: 69_553_000 picoseconds. - Weight::from_parts(71_125_000, 3820) + // Estimated: `3658` + // Minimum execution time: 66_988_000 picoseconds. + Weight::from_parts(68_616_000, 3658) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -229,12 +229,12 @@ impl WeightInfo for () { /// The range of component `v` is `[1, 4194304]`. fn process_top_key(v: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `192 + v * (1 ±0)` - // Estimated: `3657 + v * (1 ±0)` - // Minimum execution time: 5_418_000 picoseconds. - Weight::from_parts(5_526_000, 3657) - // Standard Error: 17 - .saturating_add(Weight::from_parts(1_914, 0).saturating_mul(v.into())) + // Measured: `197 + v * (1 ±0)` + // Estimated: `3662 + v * (1 ±0)` + // Minimum execution time: 5_365_000 picoseconds. + Weight::from_parts(5_460_000, 3662) + // Standard Error: 1 + .saturating_add(Weight::from_parts(1_150, 0).saturating_mul(v.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(v.into())) diff --git a/substrate/frame/statement/Cargo.toml b/substrate/frame/statement/Cargo.toml index b1449fa24416..e601881cd720 100644 --- a/substrate/frame/statement/Cargo.toml +++ b/substrate/frame/statement/Cargo.toml @@ -16,15 +16,15 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } -log = { workspace = true } -scale-info = { features = ["derive"], workspace = true } +sp-statement-store = { workspace = true } sp-api = { workspace = true } -sp-core = { workspace = true } -sp-io = { workspace = true } sp-runtime = { workspace = true } -sp-statement-store = { workspace = true } +sp-io = { workspace = true } +sp-core = { workspace = true } +log = { workspace = true } [dev-dependencies] pallet-balances = { workspace = true, default-features = true } diff --git a/substrate/frame/sudo/Cargo.toml b/substrate/frame/sudo/Cargo.toml index e2096bf0668a..9b362019b29b 100644 --- a/substrate/frame/sudo/Cargo.toml +++ b/substrate/frame/sudo/Cargo.toml @@ -18,9 +18,9 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { features = ["derive"], workspace = true } frame-benchmarking = { optional = true, workspace = true } +scale-info = { features = ["derive"], workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } -scale-info = { features = ["derive"], workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } diff --git a/substrate/frame/sudo/src/benchmarking.rs b/substrate/frame/sudo/src/benchmarking.rs index cf96562a30cf..ff34cc3a7003 100644 --- a/substrate/frame/sudo/src/benchmarking.rs +++ b/substrate/frame/sudo/src/benchmarking.rs @@ -110,7 +110,7 @@ mod benchmarks { #[block] { assert!(ext - .test_run(RawOrigin::Signed(caller).into(), &call, &info, 0, 0, |_| Ok( + .test_run(RawOrigin::Signed(caller).into(), &call, &info, 0, |_| Ok( Default::default() )) .unwrap() diff --git a/substrate/frame/sudo/src/extension.rs b/substrate/frame/sudo/src/extension.rs index d2669de79e54..573de45ba32d 100644 --- a/substrate/frame/sudo/src/extension.rs +++ b/substrate/frame/sudo/src/extension.rs @@ -18,7 +18,7 @@ use crate::{Config, Key}; use codec::{Decode, Encode}; use core::{fmt, marker::PhantomData}; -use frame_support::{dispatch::DispatchInfo, ensure, pallet_prelude::TransactionSource}; +use frame_support::{dispatch::DispatchInfo, ensure}; use scale_info::TypeInfo; use sp_runtime::{ impl_tx_ext_default, @@ -94,7 +94,6 @@ where _len: usize, _self_implicit: Self::Implicit, _inherited_implication: &impl Encode, - _source: TransactionSource, ) -> Result< ( ValidTransaction, diff --git a/substrate/frame/sudo/src/weights.rs b/substrate/frame/sudo/src/weights.rs index 1b3bdbaaf42c..ac5557e68a63 100644 --- a/substrate/frame/sudo/src/weights.rs +++ b/substrate/frame/sudo/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_sudo` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-04-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -65,10 +65,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Sudo::Key` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) fn set_key() -> Weight { // Proof Size summary in bytes: - // Measured: `198` + // Measured: `165` // Estimated: `1517` - // Minimum execution time: 10_426_000 picoseconds. - Weight::from_parts(10_822_000, 1517) + // Minimum execution time: 9_486_000 picoseconds. + Weight::from_parts(9_663_000, 1517) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -76,30 +76,30 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Sudo::Key` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) fn sudo() -> Weight { // Proof Size summary in bytes: - // Measured: `198` + // Measured: `165` // Estimated: `1517` - // Minimum execution time: 11_218_000 picoseconds. - Weight::from_parts(11_501_000, 1517) + // Minimum execution time: 10_501_000 picoseconds. + Weight::from_parts(10_729_000, 1517) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: `Sudo::Key` (r:1 w:0) /// Proof: `Sudo::Key` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) fn sudo_as() -> Weight { // Proof Size summary in bytes: - // Measured: `198` + // Measured: `165` // Estimated: `1517` - // Minimum execution time: 11_161_000 picoseconds. - Weight::from_parts(11_618_000, 1517) + // Minimum execution time: 10_742_000 picoseconds. + Weight::from_parts(11_003_000, 1517) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: `Sudo::Key` (r:1 w:1) /// Proof: `Sudo::Key` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) fn remove_key() -> Weight { // Proof Size summary in bytes: - // Measured: `198` + // Measured: `165` // Estimated: `1517` - // Minimum execution time: 9_617_000 picoseconds. - Weight::from_parts(10_092_000, 1517) + // Minimum execution time: 8_837_000 picoseconds. + Weight::from_parts(9_127_000, 1517) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -107,10 +107,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Sudo::Key` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) fn check_only_sudo_account() -> Weight { // Proof Size summary in bytes: - // Measured: `198` + // Measured: `165` // Estimated: `1517` - // Minimum execution time: 4_903_000 picoseconds. - Weight::from_parts(5_046_000, 1517) + // Minimum execution time: 3_416_000 picoseconds. + Weight::from_parts(3_645_000, 1517) .saturating_add(T::DbWeight::get().reads(1_u64)) } } @@ -121,10 +121,10 @@ impl WeightInfo for () { /// Proof: `Sudo::Key` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) fn set_key() -> Weight { // Proof Size summary in bytes: - // Measured: `198` + // Measured: `165` // Estimated: `1517` - // Minimum execution time: 10_426_000 picoseconds. - Weight::from_parts(10_822_000, 1517) + // Minimum execution time: 9_486_000 picoseconds. + Weight::from_parts(9_663_000, 1517) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -132,30 +132,30 @@ impl WeightInfo for () { /// Proof: `Sudo::Key` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) fn sudo() -> Weight { // Proof Size summary in bytes: - // Measured: `198` + // Measured: `165` // Estimated: `1517` - // Minimum execution time: 11_218_000 picoseconds. - Weight::from_parts(11_501_000, 1517) + // Minimum execution time: 10_501_000 picoseconds. + Weight::from_parts(10_729_000, 1517) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: `Sudo::Key` (r:1 w:0) /// Proof: `Sudo::Key` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) fn sudo_as() -> Weight { // Proof Size summary in bytes: - // Measured: `198` + // Measured: `165` // Estimated: `1517` - // Minimum execution time: 11_161_000 picoseconds. - Weight::from_parts(11_618_000, 1517) + // Minimum execution time: 10_742_000 picoseconds. + Weight::from_parts(11_003_000, 1517) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: `Sudo::Key` (r:1 w:1) /// Proof: `Sudo::Key` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) fn remove_key() -> Weight { // Proof Size summary in bytes: - // Measured: `198` + // Measured: `165` // Estimated: `1517` - // Minimum execution time: 9_617_000 picoseconds. - Weight::from_parts(10_092_000, 1517) + // Minimum execution time: 8_837_000 picoseconds. + Weight::from_parts(9_127_000, 1517) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -163,10 +163,10 @@ impl WeightInfo for () { /// Proof: `Sudo::Key` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) fn check_only_sudo_account() -> Weight { // Proof Size summary in bytes: - // Measured: `198` + // Measured: `165` // Estimated: `1517` - // Minimum execution time: 4_903_000 picoseconds. - Weight::from_parts(5_046_000, 1517) + // Minimum execution time: 3_416_000 picoseconds. + Weight::from_parts(3_645_000, 1517) .saturating_add(RocksDbWeight::get().reads(1_u64)) } } diff --git a/substrate/frame/support/Cargo.toml b/substrate/frame/support/Cargo.toml index 1f4fdd5d46cd..d7da034b3492 100644 --- a/substrate/frame/support/Cargo.toml +++ b/substrate/frame/support/Cargo.toml @@ -18,59 +18,60 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] array-bytes = { workspace = true } binary-merkle-tree.workspace = true -bitflags = { workspace = true } +serde = { features = ["alloc", "derive"], workspace = true } codec = { features = [ "derive", "max-encoded-len", ], workspace = true } -docify = { workspace = true } -environmental = { workspace = true } -frame-metadata = { features = [ - "current", - "unstable", -], workspace = true } -frame-support-procedural = { workspace = true } -impl-trait-for-tuples = { workspace = true } -k256 = { features = ["ecdsa"], workspace = true } -log = { workspace = true } -macro_magic = { workspace = true } -paste = { workspace = true, default-features = true } scale-info = { features = [ "derive", ], workspace = true } -serde = { features = ["alloc", "derive"], workspace = true } -serde_json = { features = ["alloc"], workspace = true } -smallvec = { workspace = true, default-features = true } +frame-metadata = { features = [ + "current", +], workspace = true } sp-api = { features = [ "frame-metadata", ], workspace = true } -sp-arithmetic = { workspace = true } +sp-std = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { features = [ + "serde", +], workspace = true } +sp-tracing = { workspace = true } sp-core = { workspace = true } -sp-crypto-hashing-proc-macro = { workspace = true, default-features = true } -sp-debug-derive = { workspace = true } -sp-genesis-builder = { workspace = true } +sp-arithmetic = { workspace = true } sp-inherents = { workspace = true } -sp-io = { workspace = true } -sp-metadata-ir = { workspace = true } -sp-runtime = { features = ["serde"], workspace = true } sp-staking = { workspace = true } -sp-state-machine = { optional = true, workspace = true } -sp-std = { workspace = true } -sp-tracing = { workspace = true } -sp-trie = { workspace = true } sp-weights = { workspace = true } -static_assertions = { workspace = true, default-features = true } +sp-debug-derive = { workspace = true } +sp-metadata-ir = { workspace = true } +sp-trie = { workspace = true } tt-call = { workspace = true } +macro_magic = { workspace = true } +frame-support-procedural = { workspace = true } +paste = { workspace = true, default-features = true } +sp-state-machine = { optional = true, workspace = true } +bitflags = { workspace = true } +impl-trait-for-tuples = { workspace = true } +smallvec = { workspace = true, default-features = true } +log = { workspace = true } +sp-crypto-hashing-proc-macro = { workspace = true, default-features = true } +k256 = { features = ["ecdsa"], workspace = true } +environmental = { workspace = true } +sp-genesis-builder = { workspace = true } +serde_json = { features = ["alloc"], workspace = true } +docify = { workspace = true } +static_assertions = { workspace = true, default-features = true } aquamarine = { workspace = true } [dev-dependencies] -Inflector = { workspace = true } assert_matches = { workspace = true } -frame-system = { workspace = true, default-features = true } pretty_assertions = { workspace = true } -sp-crypto-hashing = { workspace = true, default-features = true } sp-timestamp = { workspace = true } +frame-system = { workspace = true, default-features = true } +sp-crypto-hashing = { workspace = true, default-features = true } +Inflector = { workspace = true } [features] default = ["std"] diff --git a/substrate/frame/support/procedural/Cargo.toml b/substrate/frame/support/procedural/Cargo.toml index 624562187617..51790062b2c2 100644 --- a/substrate/frame/support/procedural/Cargo.toml +++ b/substrate/frame/support/procedural/Cargo.toml @@ -18,36 +18,36 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -Inflector = { workspace = true } -cfg-expr = { workspace = true } derive-syn-parse = { workspace = true } docify = { workspace = true } -expander = { workspace = true } -frame-support-procedural-tools = { workspace = true, default-features = true } +Inflector = { workspace = true } +cfg-expr = { workspace = true } itertools = { workspace = true } -macro_magic = { features = ["proc_support"], workspace = true } -proc-macro-warning = { workspace = true } proc-macro2 = { workspace = true } quote = { workspace = true } -sp-crypto-hashing = { workspace = true } syn = { features = ["full", "parsing", "visit-mut"], workspace = true } +frame-support-procedural-tools = { workspace = true, default-features = true } +macro_magic = { features = ["proc_support"], workspace = true } +proc-macro-warning = { workspace = true } +expander = { workspace = true } +sp-crypto-hashing = { workspace = true } [dev-dependencies] codec = { features = [ "derive", "max-encoded-len", ], workspace = true } -frame-support = { workspace = true } -frame-system = { workspace = true } -pretty_assertions = { workspace = true } regex = { workspace = true } +sp-metadata-ir = { workspace = true } scale-info = { features = ["derive"], workspace = true } sp-core = { workspace = true } sp-io = { workspace = true } -sp-metadata-ir = { workspace = true } sp-runtime = { features = [ "serde", ], workspace = true } +frame-system = { workspace = true } +frame-support = { workspace = true } +pretty_assertions = { workspace = true } static_assertions = { workspace = true } [features] diff --git a/substrate/frame/support/procedural/src/construct_runtime/expand/metadata.rs b/substrate/frame/support/procedural/src/construct_runtime/expand/metadata.rs index 0b3bd5168865..c12fc20bc8b8 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/expand/metadata.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/expand/metadata.rs @@ -113,13 +113,11 @@ pub fn expand_runtime_metadata( <#extrinsic as #scrate::traits::SignedTransactionBuilder>::Extension >(); - use #scrate::__private::metadata_ir::InternalImplRuntimeApis; - #scrate::__private::metadata_ir::MetadataIR { pallets: #scrate::__private::vec![ #(#pallets),* ], extrinsic: #scrate::__private::metadata_ir::ExtrinsicMetadataIR { ty, - versions: <#extrinsic as #scrate::sp_runtime::traits::ExtrinsicMetadata>::VERSIONS.into_iter().map(|ref_version| *ref_version).collect(), + version: <#extrinsic as #scrate::sp_runtime::traits::ExtrinsicMetadata>::VERSION, address_ty, call_ty, signature_ty, diff --git a/substrate/frame/support/procedural/src/construct_runtime/mod.rs b/substrate/frame/support/procedural/src/construct_runtime/mod.rs index 087faf37252d..17042c248780 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/mod.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/mod.rs @@ -466,6 +466,7 @@ fn construct_runtime_final_expansion( // Therefore, the `Deref` trait will resolve the `runtime_metadata` from `impl_runtime_apis!` // when both macros are called; and will resolve an empty `runtime_metadata` when only the `construct_runtime!` // is called. + #[doc(hidden)] trait InternalConstructRuntime { #[inline(always)] @@ -476,8 +477,6 @@ fn construct_runtime_final_expansion( #[doc(hidden)] impl InternalConstructRuntime for &#name {} - use #scrate::__private::metadata_ir::InternalImplRuntimeApis; - #outer_event #outer_error diff --git a/substrate/frame/support/procedural/src/runtime/parse/pallet.rs b/substrate/frame/support/procedural/src/runtime/parse/pallet.rs index 1397b7266a18..52f57cd2cd8b 100644 --- a/substrate/frame/support/procedural/src/runtime/parse/pallet.rs +++ b/substrate/frame/support/procedural/src/runtime/parse/pallet.rs @@ -21,7 +21,7 @@ use crate::{ }; use frame_support_procedural_tools::get_doc_literals; use quote::ToTokens; -use syn::{punctuated::Punctuated, spanned::Spanned, token, Error}; +use syn::{punctuated::Punctuated, token, Error}; impl Pallet { pub fn try_from( @@ -78,18 +78,7 @@ impl Pallet { }) .collect(); - let cfg_pattern = item - .attrs - .iter() - .filter(|attr| attr.path().segments.first().map_or(false, |s| s.ident == "cfg")) - .map(|attr| { - attr.parse_args_with(|input: syn::parse::ParseStream| { - let input = input.parse::()?; - cfg_expr::Expression::parse(&input.to_string()) - .map_err(|e| syn::Error::new(attr.span(), e.to_string())) - }) - }) - .collect::>>()?; + let cfg_pattern = vec![]; let docs = get_doc_literals(&item.attrs); diff --git a/substrate/frame/support/procedural/tools/Cargo.toml b/substrate/frame/support/procedural/tools/Cargo.toml index cbb2fde9e816..e61e17e8ac75 100644 --- a/substrate/frame/support/procedural/tools/Cargo.toml +++ b/substrate/frame/support/procedural/tools/Cargo.toml @@ -15,8 +15,8 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -frame-support-procedural-tools-derive = { workspace = true, default-features = true } proc-macro-crate = { workspace = true } proc-macro2 = { workspace = true } quote = { workspace = true } syn = { features = ["extra-traits", "full", "visit"], workspace = true } +frame-support-procedural-tools-derive = { workspace = true, default-features = true } diff --git a/substrate/frame/support/src/dispatch.rs b/substrate/frame/support/src/dispatch.rs index 483a3dce77f6..3678f958980a 100644 --- a/substrate/frame/support/src/dispatch.rs +++ b/substrate/frame/support/src/dispatch.rs @@ -1403,7 +1403,7 @@ mod extension_weight_tests { let mut info = call.get_dispatch_info(); assert_eq!(info.total_weight(), Weight::from_parts(1000, 0)); info.extension_weight = ext.weight(&call); - let (pre, _) = ext.validate_and_prepare(Some(0).into(), &call, &info, 0, 0).unwrap(); + let (pre, _) = ext.validate_and_prepare(Some(0).into(), &call, &info, 0).unwrap(); let res = call.dispatch(Some(0).into()); let mut post_info = res.unwrap(); assert!(post_info.actual_weight.is_none()); @@ -1430,7 +1430,7 @@ mod extension_weight_tests { assert_eq!(info.total_weight(), Weight::from_parts(1000, 0)); info.extension_weight = ext.weight(&call); let post_info = - ext.dispatch_transaction(Some(0).into(), call, &info, 0, 0).unwrap().unwrap(); + ext.dispatch_transaction(Some(0).into(), call, &info, 0).unwrap().unwrap(); // 1000 call weight + 50 + 200 + 0 assert_eq!(post_info.actual_weight, Some(Weight::from_parts(1250, 0))); }); @@ -1449,7 +1449,7 @@ mod extension_weight_tests { assert_eq!(info.call_weight, Weight::from_parts(1000, 0)); info.extension_weight = ext.weight(&call); assert_eq!(info.total_weight(), Weight::from_parts(1600, 0)); - let (pre, _) = ext.validate_and_prepare(Some(0).into(), &call, &info, 0, 0).unwrap(); + let (pre, _) = ext.validate_and_prepare(Some(0).into(), &call, &info, 0).unwrap(); let res = call.clone().dispatch(Some(0).into()); let mut post_info = res.unwrap(); // 500 actual call weight @@ -1469,7 +1469,7 @@ mod extension_weight_tests { // Second testcase let ext: TxExtension = (HalfCostIf(false), FreeIfUnder(1100), ActualWeightIs(200)); - let (pre, _) = ext.validate_and_prepare(Some(0).into(), &call, &info, 0, 0).unwrap(); + let (pre, _) = ext.validate_and_prepare(Some(0).into(), &call, &info, 0).unwrap(); let res = call.clone().dispatch(Some(0).into()); let mut post_info = res.unwrap(); // 500 actual call weight @@ -1489,7 +1489,7 @@ mod extension_weight_tests { // Third testcase let ext: TxExtension = (HalfCostIf(true), FreeIfUnder(1060), ActualWeightIs(200)); - let (pre, _) = ext.validate_and_prepare(Some(0).into(), &call, &info, 0, 0).unwrap(); + let (pre, _) = ext.validate_and_prepare(Some(0).into(), &call, &info, 0).unwrap(); let res = call.clone().dispatch(Some(0).into()); let mut post_info = res.unwrap(); // 500 actual call weight @@ -1509,7 +1509,7 @@ mod extension_weight_tests { // Fourth testcase let ext: TxExtension = (HalfCostIf(false), FreeIfUnder(100), ActualWeightIs(300)); - let (pre, _) = ext.validate_and_prepare(Some(0).into(), &call, &info, 0, 0).unwrap(); + let (pre, _) = ext.validate_and_prepare(Some(0).into(), &call, &info, 0).unwrap(); let res = call.clone().dispatch(Some(0).into()); let mut post_info = res.unwrap(); // 500 actual call weight diff --git a/substrate/frame/support/src/generate_genesis_config.rs b/substrate/frame/support/src/generate_genesis_config.rs index 283840d70c7c..fc21e76c7427 100644 --- a/substrate/frame/support/src/generate_genesis_config.rs +++ b/substrate/frame/support/src/generate_genesis_config.rs @@ -23,44 +23,41 @@ use alloc::{borrow::Cow, format, string::String}; /// Represents the initialization method of a field within a struct. /// -/// This enum provides information about how it was initialized. +/// This enum provides information about how it was initialized and the field name (as a `String`). /// /// Intended to be used in `build_struct_json_patch` macro. #[derive(Debug)] -pub enum InitilizationType { +pub enum InitializedField<'a> { /// The field was partially initialized (e.g., specific fields within the struct were set /// manually). - Partial, - /// The field was fully initialized (e.g., using `new()` or `default()` like methods - Full, + Partial(Cow<'a, str>), + /// The field was fully initialized (e.g., using `new()` or `default()` like methods). + Full(Cow<'a, str>), } -/// This struct provides information about how the struct field was initialized and the field name -/// (as a `&str`). -/// -/// Intended to be used in `build_struct_json_patch` macro. -#[derive(Debug)] -pub struct InitializedField<'a>(InitilizationType, Cow<'a, str>); - impl<'a> InitializedField<'a> { /// Returns a name of the field. pub fn get_name(&'a self) -> &'a str { - &self.1 + match self { + Self::Partial(s) | Self::Full(s) => s, + } } /// Injects a prefix to the field name. pub fn add_prefix(&mut self, prefix: &str) { - self.1 = format!("{prefix}.{}", self.1).into() + match self { + Self::Partial(s) | Self::Full(s) => *s = format!("{prefix}.{s}").into(), + }; } /// Creates new partial field instiance. pub fn partial(s: &'a str) -> Self { - Self(InitilizationType::Partial, s.into()) + Self::Partial(s.into()) } /// Creates new full field instiance. pub fn full(s: &'a str) -> Self { - Self(InitilizationType::Full, s.into()) + Self::Full(s.into()) } } @@ -76,15 +73,9 @@ impl PartialEq for InitializedField<'_> { .map(|c| c.to_ascii_uppercase()) .eq(camel_chars.map(|c| c.to_ascii_uppercase())) } - *self.1 == *other || compare_keys(self.1.chars(), other.chars()) - } -} - -impl<'a> From<(InitilizationType, &'a str)> for InitializedField<'a> { - fn from(value: (InitilizationType, &'a str)) -> Self { - match value.0 { - InitilizationType::Full => InitializedField::full(value.1), - InitilizationType::Partial => InitializedField::partial(value.1), + match self { + InitializedField::Partial(field_name) | InitializedField::Full(field_name) => + field_name == other || compare_keys(field_name.chars(), other.chars()), } } } @@ -113,8 +104,8 @@ pub fn retain_initialized_fields( let current_key = if current_root.is_empty() { key.clone() } else { format!("{current_root}.{key}") }; match keys_to_retain.iter().find(|key| **key == current_key) { - Some(InitializedField(InitilizationType::Full, _)) => true, - Some(InitializedField(InitilizationType::Partial, _)) => { + Some(InitializedField::Full(_)) => true, + Some(InitializedField::Partial(_)) => { retain_initialized_fields(value, keys_to_retain, current_key.clone()); true }, @@ -217,154 +208,89 @@ pub fn retain_initialized_fields( #[macro_export] macro_rules! build_struct_json_patch { ( - $($struct_type:ident)::+ { $($body:tt)* } + $($struct_type:ident)::+ { $($tail:tt)* } ) => { { - let mut __keys = $crate::__private::Vec::<$crate::generate_genesis_config::InitializedField>::default(); + let mut keys = $crate::__private::Vec::<$crate::generate_genesis_config::InitializedField>::default(); #[allow(clippy::needless_update)] - let __struct_instance = $crate::build_struct_json_patch!($($struct_type)::+, __keys @ { $($body)* }).0; - let mut __json_value = - $crate::__private::serde_json::to_value(__struct_instance).expect("serialization to json should work. qed"); - $crate::generate_genesis_config::retain_initialized_fields(&mut __json_value, &__keys, Default::default()); - __json_value + let struct_instance = $crate::build_struct_json_patch!($($struct_type)::+, keys @ { $($tail)* }); + let mut json_value = + $crate::__private::serde_json::to_value(struct_instance).expect("serialization to json should work. qed"); + $crate::generate_genesis_config::retain_initialized_fields(&mut json_value, &keys, Default::default()); + json_value } }; - ($($struct_type:ident)::+, $all_keys:ident @ { $($body:tt)* }) => { - { - let __value = $crate::build_struct_json_patch!($($struct_type)::+, $all_keys @ $($body)*); - ( - $($struct_type)::+ { ..__value.0 }, - __value.1 - ) + ($($struct_type:ident)::+, $all_keys:ident @ { $($tail:tt)* }) => { + $($struct_type)::+ { + ..$crate::build_struct_json_patch!($($struct_type)::+, $all_keys @ $($tail)*) } }; - ($($struct_type:ident)::+, $all_keys:ident @ $key:ident: $($type:ident)::+ { $($body:tt)* } ) => { - ( - $($struct_type)::+ { - $key: { - let mut __inner_keys = - $crate::__private::Vec::<$crate::generate_genesis_config::InitializedField>::default(); - let __value = $crate::build_struct_json_patch!($($type)::+, __inner_keys @ { $($body)* }); - for i in __inner_keys.iter_mut() { - i.add_prefix(stringify!($key)); - }; - $all_keys.push((__value.1,stringify!($key)).into()); - $all_keys.extend(__inner_keys); - __value.0 - }, - ..Default::default() + ($($struct_type:ident)::+, $all_keys:ident @ $key:ident: $($type:ident)::+ { $keyi:ident : $value:tt } ) => { + $($struct_type)::+ { + $key: { + $all_keys.push($crate::generate_genesis_config::InitializedField::partial(stringify!($key))); + $all_keys.push( + $crate::generate_genesis_config::InitializedField::full(concat!(stringify!($key), ".", stringify!($keyi))) + ); + $($type)::+ { + $keyi:$value, + ..Default::default() + } }, - $crate::generate_genesis_config::InitilizationType::Partial - ) - }; - ($($struct_type:ident)::+, $all_keys:ident @ $key:ident: $($type:ident)::+ { $($body:tt)* }, $($tail:tt)*) => { - { - let mut __initialization_type; - ( - $($struct_type)::+ { - $key : { - let mut __inner_keys = - $crate::__private::Vec::<$crate::generate_genesis_config::InitializedField>::default(); - let __value = $crate::build_struct_json_patch!($($type)::+, __inner_keys @ { $($body)* }); - $all_keys.push((__value.1,stringify!($key)).into()); - - for i in __inner_keys.iter_mut() { - i.add_prefix(stringify!($key)); - }; - $all_keys.extend(__inner_keys); - __value.0 - }, - .. { - let (__value, __tmp) = - $crate::build_struct_json_patch!($($struct_type)::+, $all_keys @ $($tail)*); - __initialization_type = __tmp; - __value - } - }, - __initialization_type - ) + ..Default::default() } }; - ($($struct_type:ident)::+, $all_keys:ident @ $key:ident: $value:expr, $($tail:tt)* ) => { - { - let mut __initialization_type; - ( - $($struct_type)::+ { - $key: { - $all_keys.push($crate::generate_genesis_config::InitializedField::full( - stringify!($key)) - ); - $value - }, - .. { - let (__value, __tmp) = - $crate::build_struct_json_patch!($($struct_type)::+, $all_keys @ $($tail)*); - __initialization_type = __tmp; - __value - } - }, - __initialization_type - ) + ($($struct_type:ident)::+, $all_keys:ident @ $key:ident: $($type:ident)::+ { $($body:tt)* } ) => { + $($struct_type)::+ { + $key: { + $all_keys.push($crate::generate_genesis_config::InitializedField::partial(stringify!($key))); + let mut inner_keys = $crate::__private::Vec::<$crate::generate_genesis_config::InitializedField>::default(); + let value = $crate::build_struct_json_patch!($($type)::+, inner_keys @ { $($body)* }); + for i in inner_keys.iter_mut() { + i.add_prefix(stringify!($key)); + }; + $all_keys.extend(inner_keys); + value + }, + ..Default::default() } }; - ($($struct_type:ident)::+, $all_keys:ident @ $key:ident: $value:expr ) => { - ( - $($struct_type)::+ { - $key: { - $all_keys.push($crate::generate_genesis_config::InitializedField::full(stringify!($key))); - $value - }, - ..Default::default() + ($($struct_type:ident)::+, $all_keys:ident @ $key:ident: $($type:ident)::+ { $($body:tt)* }, $($tail:tt)* ) => { + $($struct_type)::+ { + $key : { + $all_keys.push($crate::generate_genesis_config::InitializedField::partial(stringify!($key))); + let mut inner_keys = $crate::__private::Vec::<$crate::generate_genesis_config::InitializedField>::default(); + let value = $crate::build_struct_json_patch!($($type)::+, inner_keys @ { $($body)* }); + for i in inner_keys.iter_mut() { + i.add_prefix(stringify!($key)); + }; + $all_keys.extend(inner_keys); + value }, - $crate::generate_genesis_config::InitilizationType::Partial - ) - }; - // field init shorthand - ($($struct_type:ident)::+, $all_keys:ident @ $key:ident, $($tail:tt)* ) => { - { - let __update = $crate::build_struct_json_patch!($($struct_type)::+, $all_keys @ $($tail)*); - ( - $($struct_type)::+ { - $key: { - $all_keys.push($crate::generate_genesis_config::InitializedField::full( - stringify!($key)) - ); - $key - }, - ..__update.0 - }, - __update.1 - ) + .. $crate::build_struct_json_patch!($($struct_type)::+, $all_keys @ $($tail)*) } }; - ($($struct_type:ident)::+, $all_keys:ident @ $key:ident ) => { - ( - $($struct_type)::+ { - $key: { - $all_keys.push($crate::generate_genesis_config::InitializedField::full(stringify!($key))); - $key - }, - ..Default::default() + ($($struct_type:ident)::+, $all_keys:ident @ $key:ident: $value:expr, $($tail:tt)* ) => { + $($struct_type)::+ { + $key: { + $all_keys.push($crate::generate_genesis_config::InitializedField::full(stringify!($key))); + $value }, - $crate::generate_genesis_config::InitilizationType::Partial - ) + ..$crate::build_struct_json_patch!($($struct_type)::+, $all_keys @ $($tail)*) + } }; - // update struct - ($($struct_type:ident)::+, $all_keys:ident @ ..$update:expr ) => { - ( - $($struct_type)::+ { - ..$update + ($($struct_type:ident)::+, $all_keys:ident @ $key:ident: $value:expr ) => { + $($struct_type)::+ { + $key: { + $all_keys.push($crate::generate_genesis_config::InitializedField::full(stringify!($key))); + $value }, - $crate::generate_genesis_config::InitilizationType::Full - ) + ..Default::default() + } }; + ($($struct_type:ident)::+, $all_keys:ident @ $(,)?) => { - ( - $($struct_type)::+ { - ..Default::default() - }, - $crate::generate_genesis_config::InitilizationType::Partial - ) + $($struct_type)::+ { ..Default::default() } }; } @@ -475,8 +401,11 @@ mod test { macro_rules! test { ($($struct:ident)::+ { $($v:tt)* }, { $($j:tt)* } ) => {{ + println!("--"); let expected = serde_json::json!({ $($j)* }); + println!("json: {}", serde_json::to_string_pretty(&expected).unwrap()); let value = build_struct_json_patch!($($struct)::+ { $($v)* }); + println!("gc: {}", serde_json::to_string_pretty(&value).unwrap()); assert_eq!(value, expected); }}; } @@ -486,7 +415,6 @@ mod test { let t = 5; const C: u32 = 5; test!(TestStruct { b: 5 }, { "b": 5 }); - test!(TestStruct { b: 5, }, { "b": 5 }); #[allow(unused_braces)] { test!(TestStruct { b: { 4 + 34 } } , { "b": 38 }); @@ -777,324 +705,6 @@ mod test { ); } - #[test] - fn test_generate_config_macro_field_init_shorthand() { - { - let x = 5; - test!(TestStruct { s: S { x } }, { "s": { "x": 5 } }); - } - { - let s = nested_mod::InsideMod { a: 34, b: 8 }; - test!( - TestStruct { - t: nested_mod::InsideMod { a: 32 }, - u: nested_mod::nested_mod2::nested_mod3::InsideMod3 { - s, - a: 32, - } - }, - { - "t" : { "a": 32 }, - "u" : { "a": 32, "s": { "a": 34, "b": 8} } - } - ); - } - { - let s = nested_mod::InsideMod { a: 34, b: 8 }; - test!( - TestStruct { - t: nested_mod::InsideMod { a: 32 }, - u: nested_mod::nested_mod2::nested_mod3::InsideMod3 { - a: 32, - s, - } - }, - { - "t" : { "a": 32 }, - "u" : { "a": 32, "s": { "a": 34, "b": 8} } - } - ); - } - } - - #[test] - fn test_generate_config_macro_struct_update() { - { - let s = S { x: 5 }; - test!(TestStruct { s: S { ..s } }, { "s": { "x": 5 } }); - } - { - mod nested { - use super::*; - pub fn function() -> S { - S { x: 5 } - } - } - test!(TestStruct { s: S { ..nested::function() } }, { "s": { "x": 5 } }); - } - { - let s = nested_mod::InsideMod { a: 34, b: 8 }; - let s1 = nested_mod::InsideMod { a: 34, b: 8 }; - test!( - TestStruct { - t: nested_mod::InsideMod { ..s1 }, - u: nested_mod::nested_mod2::nested_mod3::InsideMod3 { - s, - a: 32, - } - }, - { - "t" : { "a": 34, "b": 8 }, - "u" : { "a": 32, "s": { "a": 34, "b": 8} } - } - ); - } - { - let i3 = nested_mod::nested_mod2::nested_mod3::InsideMod3 { - a: 1, - b: 2, - s: nested_mod::InsideMod { a: 55, b: 88 }, - }; - test!( - TestStruct { - t: nested_mod::InsideMod { a: 32 }, - u: nested_mod::nested_mod2::nested_mod3::InsideMod3 { - a: 32, - ..i3 - } - }, - { - "t" : { "a": 32 }, - "u" : { "a": 32, "b": 2, "s": { "a": 55, "b": 88} } - } - ); - } - { - let s = nested_mod::InsideMod { a: 34, b: 8 }; - test!( - TestStruct { - t: nested_mod::InsideMod { a: 32 }, - u: nested_mod::nested_mod2::nested_mod3::InsideMod3 { - a: 32, - s: nested_mod::InsideMod { - b: 66, - ..s - } - } - }, - { - "t" : { "a": 32 }, - "u" : { "a": 32, "s": { "a": 34, "b": 66} } - } - ); - } - { - let s = nested_mod::InsideMod { a: 34, b: 8 }; - test!( - TestStruct { - t: nested_mod::InsideMod { a: 32 }, - u: nested_mod::nested_mod2::nested_mod3::InsideMod3 { - s: nested_mod::InsideMod { - b: 66, - ..s - }, - a: 32 - } - }, - { - "t" : { "a": 32 }, - "u" : { "a": 32, "s": { "a": 34, "b": 66} } - } - ); - } - } - - #[test] - fn test_generate_config_macro_with_execution_order() { - #[derive(Debug, Default, serde::Serialize, serde::Deserialize, PartialEq)] - struct X { - x: Vec, - x2: Vec, - y2: Y, - } - #[derive(Debug, Default, serde::Serialize, serde::Deserialize, PartialEq)] - struct Y { - y: Vec, - } - #[derive(Debug, Default, serde::Serialize, serde::Deserialize, PartialEq)] - struct Z { - a: u32, - x: X, - y: Y, - } - { - let v = vec![1, 2, 3]; - test!(Z { a: 0, x: X { x: v }, }, { - "a": 0, "x": { "x": [1,2,3] } - }); - } - { - let v = vec![1, 2, 3]; - test!(Z { a: 3, x: X { x: v.clone() }, y: Y { y: v } }, { - "a": 3, "x": { "x": [1,2,3] }, "y": { "y": [1,2,3] } - }); - } - { - let v = vec![1, 2, 3]; - test!(Z { a: 3, x: X { y2: Y { y: v.clone() }, x: v.clone() }, y: Y { y: v } }, { - "a": 3, "x": { "x": [1,2,3], "y2":{ "y":[1,2,3] } }, "y": { "y": [1,2,3] } - }); - } - { - let v = vec![1, 2, 3]; - test!(Z { a: 3, y: Y { y: v.clone() }, x: X { y2: Y { y: v.clone() }, x: v }, }, { - "a": 3, "x": { "x": [1,2,3], "y2":{ "y":[1,2,3] } }, "y": { "y": [1,2,3] } - }); - } - { - let v = vec![1, 2, 3]; - test!( - Z { - y: Y { - y: v.clone() - }, - x: X { - y2: Y { - y: v.clone() - }, - x: v.clone(), - x2: v.clone() - }, - }, - { - "x": { - "x": [1,2,3], - "x2": [1,2,3], - "y2": { - "y":[1,2,3] - } - }, - "y": { - "y": [1,2,3] - } - }); - } - { - let v = vec![1, 2, 3]; - test!( - Z { - y: Y { - y: v.clone() - }, - x: X { - y2: Y { - y: v.clone() - }, - x: v - }, - }, - { - "x": { - "x": [1,2,3], - "y2": { - "y":[1,2,3] - } - }, - "y": { - "y": [1,2,3] - } - }); - } - { - let mut v = vec![0, 1, 2]; - let f = |vec: &mut Vec| -> Vec { - vec.iter_mut().for_each(|x| *x += 1); - vec.clone() - }; - let z = Z { - a: 0, - y: Y { y: f(&mut v) }, - x: X { y2: Y { y: f(&mut v) }, x: f(&mut v), x2: vec![] }, - }; - let z_expected = Z { - a: 0, - y: Y { y: vec![1, 2, 3] }, - x: X { y2: Y { y: vec![2, 3, 4] }, x: vec![3, 4, 5], x2: vec![] }, - }; - assert_eq!(z, z_expected); - v = vec![0, 1, 2]; - println!("{z:?}"); - test!( - Z { - y: Y { - y: f(&mut v) - }, - x: X { - y2: Y { - y: f(&mut v) - }, - x: f(&mut v) - }, - }, - { - "y": { - "y": [1,2,3] - }, - "x": { - "y2": { - "y":[2,3,4] - }, - "x": [3,4,5], - }, - }); - } - { - let mut v = vec![0, 1, 2]; - let f = |vec: &mut Vec| -> Vec { - vec.iter_mut().for_each(|x| *x += 1); - vec.clone() - }; - let z = Z { - a: 0, - y: Y { y: f(&mut v) }, - x: X { y2: Y { y: f(&mut v) }, x: f(&mut v), x2: f(&mut v) }, - }; - let z_expected = Z { - a: 0, - y: Y { y: vec![1, 2, 3] }, - x: X { y2: Y { y: vec![2, 3, 4] }, x: vec![3, 4, 5], x2: vec![4, 5, 6] }, - }; - assert_eq!(z, z_expected); - v = vec![0, 1, 2]; - println!("{z:?}"); - test!( - Z { - y: Y { - y: f(&mut v) - }, - x: X { - y2: Y { - y: f(&mut v) - }, - x: f(&mut v), - x2: f(&mut v) - }, - }, - { - "y": { - "y": [1,2,3] - }, - "x": { - "y2": { - "y":[2,3,4] - }, - "x": [3,4,5], - "x2": [4,5,6], - }, - }); - } - } - #[test] fn test_generate_config_macro_with_nested_mods() { test!( @@ -1187,6 +797,7 @@ mod retain_keys_test { ( $s:literal ) => { let field = InitializedField::full($s); let cc = inflector::cases::camelcase::to_camel_case($s); + println!("field: {:?}, cc: {}", field, cc); assert_eq!(field,cc); } ; ( &[ $f:literal $(, $r:literal)* ]) => { @@ -1197,6 +808,7 @@ mod retain_keys_test { .map(|s| inflector::cases::camelcase::to_camel_case(s)) .collect::>() .join("."); + println!("field: {:?}, cc: {}", field, cc); assert_eq!(field,cc); } ; ); diff --git a/substrate/frame/support/src/lib.rs b/substrate/frame/support/src/lib.rs index c64987b17d35..2e7ea0a07d7d 100644 --- a/substrate/frame/support/src/lib.rs +++ b/substrate/frame/support/src/lib.rs @@ -904,9 +904,8 @@ pub mod pallet_prelude { StorageList, }, traits::{ - BuildGenesisConfig, ConstU32, ConstUint, EnsureOrigin, Get, GetDefault, - GetStorageVersion, Hooks, IsType, PalletInfoAccess, StorageInfoTrait, StorageVersion, - Task, TypedGet, + BuildGenesisConfig, ConstU32, EnsureOrigin, Get, GetDefault, GetStorageVersion, Hooks, + IsType, PalletInfoAccess, StorageInfoTrait, StorageVersion, Task, TypedGet, }, Blake2_128, Blake2_128Concat, Blake2_256, CloneNoBound, DebugNoBound, EqNoBound, Identity, PartialEqNoBound, RuntimeDebugNoBound, Twox128, Twox256, Twox64Concat, @@ -1883,16 +1882,11 @@ pub mod pallet_macros { /// } /// ``` /// - /// Please note that this only works for signed dispatchables and requires a transaction + /// Please note that this only works for signed dispatchables and requires a signed /// extension such as [`pallet_skip_feeless_payment::SkipCheckIfFeeless`] to wrap the /// existing payment extension. Else, this is completely ignored and the dispatchable is /// still charged. /// - /// Also this will not allow accountless caller to send a transaction if some transaction - /// extension such as `frame_system::CheckNonce` is used. - /// Extensions such as `frame_system::CheckNonce` require a funded account to validate - /// the transaction. - /// /// ### Macro expansion /// /// The macro implements the [`pallet_skip_feeless_payment::CheckIfFeeless`] trait on the diff --git a/substrate/frame/support/src/traits.rs b/substrate/frame/support/src/traits.rs index 728426cc84c7..635036d488df 100644 --- a/substrate/frame/support/src/traits.rs +++ b/substrate/frame/support/src/traits.rs @@ -57,13 +57,13 @@ pub use filter::{ClearFilterGuard, FilterStack, FilterStackGuard, InstanceFilter mod misc; pub use misc::{ defensive_prelude::{self, *}, - AccountTouch, Backing, ConstBool, ConstI128, ConstI16, ConstI32, ConstI64, ConstI8, ConstInt, - ConstU128, ConstU16, ConstU32, ConstU64, ConstU8, ConstUint, DefensiveMax, DefensiveMin, - DefensiveSaturating, DefensiveTruncateFrom, EnsureInherentsAreFirst, EqualPrivilegeOnly, - EstimateCallFee, ExecuteBlock, ExtrinsicCall, Get, GetBacking, GetDefault, HandleLifetime, - InherentBuilder, IsInherent, IsSubType, IsType, Len, OffchainWorker, OnKilledAccount, - OnNewAccount, PrivilegeCmp, SameOrOther, SignedTransactionBuilder, Time, TryCollect, TryDrop, - TypedGet, UnixTime, VariantCount, VariantCountOf, WrapperKeepOpaque, WrapperOpaque, + AccountTouch, Backing, ConstBool, ConstI128, ConstI16, ConstI32, ConstI64, ConstI8, ConstU128, + ConstU16, ConstU32, ConstU64, ConstU8, DefensiveMax, DefensiveMin, DefensiveSaturating, + DefensiveTruncateFrom, EnsureInherentsAreFirst, EqualPrivilegeOnly, EstimateCallFee, + ExecuteBlock, ExtrinsicCall, Get, GetBacking, GetDefault, HandleLifetime, InherentBuilder, + IsInherent, IsSubType, IsType, Len, OffchainWorker, OnKilledAccount, OnNewAccount, + PrivilegeCmp, SameOrOther, SignedTransactionBuilder, Time, TryCollect, TryDrop, TypedGet, + UnixTime, VariantCount, VariantCountOf, WrapperKeepOpaque, WrapperOpaque, }; #[allow(deprecated)] pub use misc::{PreimageProvider, PreimageRecipient}; @@ -110,7 +110,7 @@ pub use dispatch::{ }; mod voting; -pub use voting::{ClassCountOf, NoOpPoll, PollStatus, Polling, VoteTally}; +pub use voting::{ClassCountOf, PollStatus, Polling, VoteTally}; mod preimages; pub use preimages::{Bounded, BoundedInline, FetchResult, QueryPreimage, StorePreimage}; diff --git a/substrate/frame/support/src/traits/misc.rs b/substrate/frame/support/src/traits/misc.rs index 0dc3abdce956..a914b3a914c1 100644 --- a/substrate/frame/support/src/traits/misc.rs +++ b/substrate/frame/support/src/traits/misc.rs @@ -28,8 +28,8 @@ use sp_core::bounded::bounded_vec::TruncateFrom; use core::cmp::Ordering; #[doc(hidden)] pub use sp_runtime::traits::{ - ConstBool, ConstI128, ConstI16, ConstI32, ConstI64, ConstI8, ConstInt, ConstU128, ConstU16, - ConstU32, ConstU64, ConstU8, ConstUint, Get, GetDefault, TryCollect, TypedGet, + ConstBool, ConstI128, ConstI16, ConstI32, ConstI64, ConstI8, ConstU128, ConstU16, ConstU32, + ConstU64, ConstU8, Get, GetDefault, TryCollect, TypedGet, }; use sp_runtime::{traits::Block as BlockT, DispatchError}; diff --git a/substrate/frame/support/src/traits/voting.rs b/substrate/frame/support/src/traits/voting.rs index 697134e4ca47..958ef5dce6c1 100644 --- a/substrate/frame/support/src/traits/voting.rs +++ b/substrate/frame/support/src/traits/voting.rs @@ -19,7 +19,7 @@ //! votes. use crate::dispatch::Parameter; -use alloc::{vec, vec::Vec}; +use alloc::vec::Vec; use codec::{HasCompact, MaxEncodedLen}; use sp_arithmetic::Perbill; use sp_runtime::{traits::Member, DispatchError}; @@ -126,49 +126,3 @@ pub trait Polling { (Self::classes().into_iter().next().expect("Always one class"), u32::max_value()) } } - -/// NoOp polling is required if pallet-referenda functionality not needed. -pub struct NoOpPoll; -impl Polling for NoOpPoll { - type Index = u8; - type Votes = u32; - type Class = u16; - type Moment = u64; - - fn classes() -> Vec { - vec![] - } - - fn as_ongoing(_index: Self::Index) -> Option<(Tally, Self::Class)> { - None - } - - fn access_poll( - _index: Self::Index, - f: impl FnOnce(PollStatus<&mut Tally, Self::Moment, Self::Class>) -> R, - ) -> R { - f(PollStatus::None) - } - - fn try_access_poll( - _index: Self::Index, - f: impl FnOnce(PollStatus<&mut Tally, Self::Moment, Self::Class>) -> Result, - ) -> Result { - f(PollStatus::None) - } - - #[cfg(feature = "runtime-benchmarks")] - fn create_ongoing(_class: Self::Class) -> Result { - Err(()) - } - - #[cfg(feature = "runtime-benchmarks")] - fn end_ongoing(_index: Self::Index, _approved: bool) -> Result<(), ()> { - Err(()) - } - - #[cfg(feature = "runtime-benchmarks")] - fn max_ongoing() -> (Self::Class, u32) { - (0, 0) - } -} diff --git a/substrate/frame/support/src/weights/block_weights.rs b/substrate/frame/support/src/weights/block_weights.rs index b4c12aa5d421..38f2ba3f023d 100644 --- a/substrate/frame/support/src/weights/block_weights.rs +++ b/substrate/frame/support/src/weights/block_weights.rs @@ -16,8 +16,8 @@ // limitations under the License. //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-11-08 (Y/M/D) -//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! DATE: 2024-04-08 (Y/M/D) +//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! //! SHORT-NAME: `block`, LONG-NAME: `BlockExecution`, RUNTIME: `Development` //! WARMUPS: `10`, REPEAT: `100` @@ -39,21 +39,21 @@ use sp_core::parameter_types; use sp_weights::{constants::WEIGHT_REF_TIME_PER_NANOS, Weight}; parameter_types! { - /// Weight of executing an empty block. + /// Time to execute an empty block. /// Calculated by multiplying the *Average* with `1.0` and adding `0`. /// /// Stats nanoseconds: - /// Min, Max: 419_969, 685_012 - /// Average: 431_614 - /// Median: 427_388 - /// Std-Dev: 26437.34 + /// Min, Max: 440_235, 661_535 + /// Average: 453_383 + /// Median: 449_925 + /// Std-Dev: 22021.99 /// /// Percentiles nanoseconds: - /// 99th: 456_205 - /// 95th: 443_420 - /// 75th: 431_833 + /// 99th: 474_045 + /// 95th: 466_455 + /// 75th: 455_056 pub const BlockExecutionWeight: Weight = - Weight::from_parts(WEIGHT_REF_TIME_PER_NANOS.saturating_mul(431_614), 0); + Weight::from_parts(WEIGHT_REF_TIME_PER_NANOS.saturating_mul(453_383), 0); } #[cfg(test)] diff --git a/substrate/frame/support/src/weights/extrinsic_weights.rs b/substrate/frame/support/src/weights/extrinsic_weights.rs index 95d966a412d0..75c7ffa60705 100644 --- a/substrate/frame/support/src/weights/extrinsic_weights.rs +++ b/substrate/frame/support/src/weights/extrinsic_weights.rs @@ -16,8 +16,8 @@ // limitations under the License. //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-11-08 (Y/M/D) -//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! DATE: 2024-04-08 (Y/M/D) +//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! //! SHORT-NAME: `extrinsic`, LONG-NAME: `ExtrinsicBase`, RUNTIME: `Development` //! WARMUPS: `10`, REPEAT: `100` @@ -39,21 +39,21 @@ use sp_core::parameter_types; use sp_weights::{constants::WEIGHT_REF_TIME_PER_NANOS, Weight}; parameter_types! { - /// Weight of executing a NO-OP extrinsic, for example `System::remark`. + /// Time to execute a NO-OP extrinsic, for example `System::remark`. /// Calculated by multiplying the *Average* with `1.0` and adding `0`. /// /// Stats nanoseconds: - /// Min, Max: 107_464, 109_127 - /// Average: 108_157 - /// Median: 108_119 - /// Std-Dev: 353.52 + /// Min, Max: 106_559, 107_788 + /// Average: 107_074 + /// Median: 107_067 + /// Std-Dev: 242.67 /// /// Percentiles nanoseconds: - /// 99th: 109_041 - /// 95th: 108_748 - /// 75th: 108_405 + /// 99th: 107_675 + /// 95th: 107_513 + /// 75th: 107_225 pub const ExtrinsicBaseWeight: Weight = - Weight::from_parts(WEIGHT_REF_TIME_PER_NANOS.saturating_mul(108_157), 0); + Weight::from_parts(WEIGHT_REF_TIME_PER_NANOS.saturating_mul(107_074), 0); } #[cfg(test)] diff --git a/substrate/frame/support/test/Cargo.toml b/substrate/frame/support/test/Cargo.toml index ca122e6bd544..2187ee22b395 100644 --- a/substrate/frame/support/test/Cargo.toml +++ b/substrate/frame/support/test/Cargo.toml @@ -15,26 +15,26 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] +static_assertions = { workspace = true, default-features = true } +serde = { features = ["derive"], workspace = true } codec = { features = ["derive"], workspace = true } -frame-benchmarking = { workspace = true } -frame-executive = { workspace = true } -frame-metadata = { features = ["current", "unstable"], workspace = true } -frame-support = { features = ["experimental"], workspace = true } -frame-system = { workspace = true } -pretty_assertions = { workspace = true } -rustversion = { workspace = true } scale-info = { features = ["derive"], workspace = true } -serde = { features = ["derive"], workspace = true } +frame-metadata = { features = ["current"], workspace = true } sp-api = { workspace = true } sp-arithmetic = { workspace = true } -sp-core = { workspace = true } sp-io = { workspace = true } -sp-metadata-ir = { workspace = true } -sp-runtime = { workspace = true } sp-state-machine = { optional = true, workspace = true, default-features = true } +frame-support = { features = ["experimental"], workspace = true } +frame-benchmarking = { workspace = true } +sp-runtime = { workspace = true } +sp-core = { workspace = true } sp-version = { workspace = true } -static_assertions = { workspace = true, default-features = true } +sp-metadata-ir = { workspace = true } trybuild = { features = ["diff"], workspace = true } +pretty_assertions = { workspace = true } +rustversion = { workspace = true } +frame-system = { workspace = true } +frame-executive = { workspace = true } # The "std" feature for this pallet is never activated on purpose, in order to test construct_runtime error message test-pallet = { workspace = true } diff --git a/substrate/frame/support/test/compile_pass/Cargo.toml b/substrate/frame/support/test/compile_pass/Cargo.toml index 988135d64dbf..9e0a7ff7c675 100644 --- a/substrate/frame/support/test/compile_pass/Cargo.toml +++ b/substrate/frame/support/test/compile_pass/Cargo.toml @@ -16,9 +16,9 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } -scale-info = { features = ["derive"], workspace = true } sp-core = { workspace = true } sp-runtime = { workspace = true } sp-version = { workspace = true } diff --git a/substrate/frame/support/test/pallet/Cargo.toml b/substrate/frame/support/test/pallet/Cargo.toml index dc5558b1d4b8..f03377dc21eb 100644 --- a/substrate/frame/support/test/pallet/Cargo.toml +++ b/substrate/frame/support/test/pallet/Cargo.toml @@ -16,10 +16,10 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { features = ["derive"], workspace = true } -frame-support = { workspace = true } -frame-system = { workspace = true } scale-info = { features = ["derive"], workspace = true } serde = { features = ["derive"], workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } sp-runtime = { workspace = true } [features] diff --git a/substrate/frame/support/test/tests/pallet.rs b/substrate/frame/support/test/tests/pallet.rs index 9df1f461bba2..b0b83f772499 100644 --- a/substrate/frame/support/test/tests/pallet.rs +++ b/substrate/frame/support/test/tests/pallet.rs @@ -53,9 +53,6 @@ parameter_types! { /// Latest stable metadata version used for testing. const LATEST_METADATA_VERSION: u32 = 15; -/// Unstable metadata version. -const UNSTABLE_METADATA_VERSION: u32 = u32::MAX; - pub struct SomeType1; impl From for u64 { fn from(_t: SomeType1) -> Self { @@ -802,43 +799,20 @@ where } } -#[frame_support::runtime] -mod runtime { - #[runtime::runtime] - #[runtime::derive( - RuntimeCall, - RuntimeEvent, - RuntimeError, - RuntimeOrigin, - RuntimeFreezeReason, - RuntimeHoldReason, - RuntimeSlashReason, - RuntimeLockId, - RuntimeTask - )] - pub struct Runtime; - - #[runtime::pallet_index(0)] - pub type System = frame_system + Call + Event; - - #[runtime::pallet_index(1)] - pub type Example = pallet; - - #[runtime::pallet_index(2)] - #[runtime::disable_call] - pub type Example2 = pallet2; - - #[cfg(feature = "frame-feature-testing")] - #[runtime::pallet_index(3)] - pub type Example3 = pallet3; - - #[runtime::pallet_index(4)] - pub type Example4 = pallet4; +frame_support::construct_runtime!( + pub struct Runtime { + // Exclude part `Storage` in order not to check its metadata in tests. + System: frame_system exclude_parts { Pallet, Storage }, + Example: pallet, + Example2: pallet2 exclude_parts { Call }, + #[cfg(feature = "frame-feature-testing")] + Example3: pallet3, + Example4: pallet4 use_parts { Call }, - #[cfg(feature = "frame-feature-testing-2")] - #[runtime::pallet_index(5)] - pub type Example5 = pallet5; -} + #[cfg(feature = "frame-feature-testing-2")] + Example5: pallet5, + } +); // Test that the part `RuntimeCall` is excluded from Example2 and included in Example4. fn _ensure_call_is_correctly_excluded_and_included(call: RuntimeCall) { @@ -1873,16 +1847,6 @@ fn metadata() { error: None, docs: vec![" Test that the supertrait check works when we pass some parameter to the `frame_system::Config`."], }, - PalletMetadata { - index: 4, - name: "Example4", - storage: None, - calls: Some(meta_type::>().into()), - event: None, - constants: vec![], - error: None, - docs: vec![], - }, #[cfg(feature = "frame-feature-testing-2")] PalletMetadata { index: 5, @@ -1980,10 +1944,7 @@ fn metadata_at_version() { #[test] fn metadata_versions() { - assert_eq!( - vec![14, LATEST_METADATA_VERSION, UNSTABLE_METADATA_VERSION], - Runtime::metadata_versions() - ); + assert_eq!(vec![14, LATEST_METADATA_VERSION], Runtime::metadata_versions()); } #[test] diff --git a/substrate/frame/support/test/tests/runtime_metadata.rs b/substrate/frame/support/test/tests/runtime_metadata.rs index a098643abb91..7523a415d458 100644 --- a/substrate/frame/support/test/tests/runtime_metadata.rs +++ b/substrate/frame/support/test/tests/runtime_metadata.rs @@ -80,39 +80,34 @@ sp_api::decl_runtime_apis! { } } -// Module to emulate having the implementation in a different file. -mod apis { - use super::{Block, BlockT, Runtime}; +sp_api::impl_runtime_apis! { + impl self::Api for Runtime { + fn test(_data: u64) { + unimplemented!() + } - sp_api::impl_runtime_apis! { - impl crate::Api for Runtime { - fn test(_data: u64) { - unimplemented!() - } + fn something_with_block(_: Block) -> Block { + unimplemented!() + } - fn something_with_block(_: Block) -> Block { - unimplemented!() - } + fn function_with_two_args(_: u64, _: Block) { + unimplemented!() + } - fn function_with_two_args(_: u64, _: Block) { - unimplemented!() - } + fn same_name() {} - fn same_name() {} + fn wild_card(_: u32) {} + } - fn wild_card(_: u32) {} + impl sp_api::Core for Runtime { + fn version() -> sp_version::RuntimeVersion { + unimplemented!() } - - impl sp_api::Core for Runtime { - fn version() -> sp_version::RuntimeVersion { - unimplemented!() - } - fn execute_block(_: Block) { - unimplemented!() - } - fn initialize_block(_: &::Header) -> sp_runtime::ExtrinsicInclusionMode { - unimplemented!() - } + fn execute_block(_: Block) { + unimplemented!() + } + fn initialize_block(_: &::Header) -> sp_runtime::ExtrinsicInclusionMode { + unimplemented!() } } } diff --git a/substrate/frame/system/Cargo.toml b/substrate/frame/system/Cargo.toml index 1340b2c55c53..38349c7edbd9 100644 --- a/substrate/frame/system/Cargo.toml +++ b/substrate/frame/system/Cargo.toml @@ -18,17 +18,17 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] cfg-if = { workspace = true } codec = { features = ["derive"], workspace = true } -docify = { workspace = true } -frame-support = { workspace = true } log = { workspace = true } scale-info = { features = ["derive", "serde"], workspace = true } serde = { features = ["alloc", "derive"], workspace = true } +frame-support = { workspace = true } sp-core = { features = ["serde"], workspace = true } sp-io = { workspace = true } sp-runtime = { features = ["serde"], workspace = true } sp-std = { workspace = true } sp-version = { features = ["serde"], workspace = true } sp-weights = { features = ["serde"], workspace = true } +docify = { workspace = true } [dev-dependencies] criterion = { workspace = true, default-features = true } diff --git a/substrate/frame/system/benchmarking/Cargo.toml b/substrate/frame/system/benchmarking/Cargo.toml index e9aac6e519f3..d9b5e7083bd2 100644 --- a/substrate/frame/system/benchmarking/Cargo.toml +++ b/substrate/frame/system/benchmarking/Cargo.toml @@ -17,16 +17,16 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { workspace = true } +scale-info = { features = ["derive"], workspace = true } frame-benchmarking = { workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } -scale-info = { features = ["derive"], workspace = true } sp-core = { workspace = true } sp-runtime = { workspace = true } [dev-dependencies] -sp-externalities = { workspace = true, default-features = true } sp-io = { workspace = true, default-features = true } +sp-externalities = { workspace = true, default-features = true } sp-version = { workspace = true, default-features = true } [features] diff --git a/substrate/frame/system/benchmarking/src/extensions.rs b/substrate/frame/system/benchmarking/src/extensions.rs index 01e4687bc4bc..3c6626030e22 100644 --- a/substrate/frame/system/benchmarking/src/extensions.rs +++ b/substrate/frame/system/benchmarking/src/extensions.rs @@ -23,7 +23,6 @@ use alloc::vec; use frame_benchmarking::{account, v2::*, BenchmarkError}; use frame_support::{ dispatch::{DispatchClass, DispatchInfo, PostDispatchInfo}, - pallet_prelude::Zero, weights::Weight, }; use frame_system::{ @@ -54,14 +53,11 @@ mod benchmarks { let caller = account("caller", 0, 0); let info = DispatchInfo { call_weight: Weight::zero(), ..Default::default() }; let call: T::RuntimeCall = frame_system::Call::remark { remark: vec![] }.into(); - frame_benchmarking::benchmarking::add_to_whitelist( - frame_system::BlockHash::::hashed_key_for(BlockNumberFor::::zero()).into(), - ); #[block] { CheckGenesis::::new() - .test_run(RawOrigin::Signed(caller).into(), &call, &info, len, 0, |_| Ok(().into())) + .test_run(RawOrigin::Signed(caller).into(), &call, &info, len, |_| Ok(().into())) .unwrap() .unwrap(); } @@ -85,13 +81,10 @@ mod benchmarks { ..Default::default() }; let call: T::RuntimeCall = frame_system::Call::remark { remark: vec![] }.into(); - frame_benchmarking::benchmarking::add_to_whitelist( - frame_system::BlockHash::::hashed_key_for(prev_block).into(), - ); #[block] { - ext.test_run(RawOrigin::Signed(caller).into(), &call, &info, len, 0, |_| Ok(().into())) + ext.test_run(RawOrigin::Signed(caller).into(), &call, &info, len, |_| Ok(().into())) .unwrap() .unwrap(); } @@ -116,13 +109,10 @@ mod benchmarks { ..Default::default() }; let call: T::RuntimeCall = frame_system::Call::remark { remark: vec![] }.into(); - frame_benchmarking::benchmarking::add_to_whitelist( - frame_system::BlockHash::::hashed_key_for(BlockNumberFor::::zero()).into(), - ); #[block] { - ext.test_run(RawOrigin::Signed(caller).into(), &call, &info, len, 0, |_| Ok(().into())) + ext.test_run(RawOrigin::Signed(caller).into(), &call, &info, len, |_| Ok(().into())) .unwrap() .unwrap(); } @@ -139,7 +129,7 @@ mod benchmarks { #[block] { - ext.test_run(RawOrigin::Signed(caller).into(), &call, &info, len, 0, |_| Ok(().into())) + ext.test_run(RawOrigin::Signed(caller).into(), &call, &info, len, |_| Ok(().into())) .unwrap() .unwrap(); } @@ -161,7 +151,7 @@ mod benchmarks { #[block] { - ext.test_run(RawOrigin::Signed(caller.clone()).into(), &call, &info, len, 0, |_| { + ext.test_run(RawOrigin::Signed(caller.clone()).into(), &call, &info, len, |_| { Ok(().into()) }) .unwrap() @@ -183,7 +173,7 @@ mod benchmarks { #[block] { CheckSpecVersion::::new() - .test_run(RawOrigin::Signed(caller).into(), &call, &info, len, 0, |_| Ok(().into())) + .test_run(RawOrigin::Signed(caller).into(), &call, &info, len, |_| Ok(().into())) .unwrap() .unwrap(); } @@ -200,7 +190,7 @@ mod benchmarks { #[block] { CheckTxVersion::::new() - .test_run(RawOrigin::Signed(caller).into(), &call, &info, len, 0, |_| Ok(().into())) + .test_run(RawOrigin::Signed(caller).into(), &call, &info, len, |_| Ok(().into())) .unwrap() .unwrap(); } @@ -240,7 +230,7 @@ mod benchmarks { #[block] { - ext.test_run(RawOrigin::Signed(caller).into(), &call, &info, len, 0, |_| Ok(post_info)) + ext.test_run(RawOrigin::Signed(caller).into(), &call, &info, len, |_| Ok(post_info)) .unwrap() .unwrap(); } diff --git a/substrate/frame/system/rpc/runtime-api/Cargo.toml b/substrate/frame/system/rpc/runtime-api/Cargo.toml index 3fd1985619bd..8e968a536756 100644 --- a/substrate/frame/system/rpc/runtime-api/Cargo.toml +++ b/substrate/frame/system/rpc/runtime-api/Cargo.toml @@ -17,8 +17,8 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { workspace = true } -docify = { workspace = true } sp-api = { workspace = true } +docify = { workspace = true } [features] default = ["std"] diff --git a/substrate/frame/system/src/extensions/check_mortality.rs b/substrate/frame/system/src/extensions/check_mortality.rs index e2c22a07a3fe..7da5521f353d 100644 --- a/substrate/frame/system/src/extensions/check_mortality.rs +++ b/substrate/frame/system/src/extensions/check_mortality.rs @@ -17,7 +17,6 @@ use crate::{pallet_prelude::BlockNumberFor, BlockHash, Config, Pallet}; use codec::{Decode, Encode}; -use frame_support::pallet_prelude::TransactionSource; use scale_info::TypeInfo; use sp_runtime::{ generic::Era, @@ -92,7 +91,6 @@ impl TransactionExtension for CheckMort _len: usize, _self_implicit: Self::Implicit, _inherited_implication: &impl Encode, - _source: TransactionSource, ) -> ValidateResult { let current_u64 = >::block_number().saturated_into::(); let valid_till = self.0.death(current_u64); @@ -117,9 +115,7 @@ mod tests { weights::Weight, }; use sp_core::H256; - use sp_runtime::{ - traits::DispatchTransaction, transaction_validity::TransactionSource::External, - }; + use sp_runtime::traits::DispatchTransaction; #[test] fn signed_ext_check_era_should_work() { @@ -155,10 +151,7 @@ mod tests { >::insert(16, H256::repeat_byte(1)); assert_eq!( - ext.validate_only(Some(1).into(), CALL, &normal, len, External, 0) - .unwrap() - .0 - .longevity, + ext.validate_only(Some(1).into(), CALL, &normal, len).unwrap().0.longevity, 15 ); }) diff --git a/substrate/frame/system/src/extensions/check_non_zero_sender.rs b/substrate/frame/system/src/extensions/check_non_zero_sender.rs index 978eebaf3dac..ec8c12b790d2 100644 --- a/substrate/frame/system/src/extensions/check_non_zero_sender.rs +++ b/substrate/frame/system/src/extensions/check_non_zero_sender.rs @@ -18,7 +18,7 @@ use crate::Config; use codec::{Decode, Encode}; use core::marker::PhantomData; -use frame_support::{pallet_prelude::TransactionSource, traits::OriginTrait, DefaultNoBound}; +use frame_support::{traits::OriginTrait, DefaultNoBound}; use scale_info::TypeInfo; use sp_runtime::{ impl_tx_ext_default, @@ -68,7 +68,6 @@ impl TransactionExtension for CheckNonZ _len: usize, _self_implicit: Self::Implicit, _inherited_implication: &impl Encode, - _source: TransactionSource, ) -> sp_runtime::traits::ValidateResult { if let Some(who) = origin.as_signer() { if who.using_encoded(|d| d.iter().all(|x| *x == 0)) { @@ -86,8 +85,8 @@ mod tests { use crate::mock::{new_test_ext, Test, CALL}; use frame_support::{assert_ok, dispatch::DispatchInfo}; use sp_runtime::{ - traits::{AsTransactionAuthorizedOrigin, DispatchTransaction, TxBaseImplication}, - transaction_validity::{TransactionSource::External, TransactionValidityError}, + traits::{AsTransactionAuthorizedOrigin, DispatchTransaction}, + transaction_validity::TransactionValidityError, }; #[test] @@ -97,7 +96,7 @@ mod tests { let len = 0_usize; assert_eq!( CheckNonZeroSender::::new() - .validate_only(Some(0).into(), CALL, &info, len, External, 0) + .validate_only(Some(0).into(), CALL, &info, len) .unwrap_err(), TransactionValidityError::from(InvalidTransaction::BadSigner) ); @@ -105,9 +104,7 @@ mod tests { Some(1).into(), CALL, &info, - len, - External, - 0, + len )); }) } @@ -118,7 +115,7 @@ mod tests { let info = DispatchInfo::default(); let len = 0_usize; let (_, _, origin) = CheckNonZeroSender::::new() - .validate(None.into(), CALL, &info, len, (), &TxBaseImplication(CALL), External) + .validate(None.into(), CALL, &info, len, (), CALL) .unwrap(); assert!(!origin.is_transaction_authorized()); }) diff --git a/substrate/frame/system/src/extensions/check_nonce.rs b/substrate/frame/system/src/extensions/check_nonce.rs index bc19a09e06a9..d96d2c2c0662 100644 --- a/substrate/frame/system/src/extensions/check_nonce.rs +++ b/substrate/frame/system/src/extensions/check_nonce.rs @@ -18,9 +18,7 @@ use crate::Config; use alloc::vec; use codec::{Decode, Encode}; -use frame_support::{ - dispatch::DispatchInfo, pallet_prelude::TransactionSource, RuntimeDebugNoBound, -}; +use frame_support::{dispatch::DispatchInfo, RuntimeDebugNoBound}; use scale_info::TypeInfo; use sp_runtime::{ traits::{ @@ -110,7 +108,6 @@ where _len: usize, _self_implicit: Self::Implicit, _inherited_implication: &impl Encode, - _source: TransactionSource, ) -> ValidateResult { let Some(who) = origin.as_system_origin_signer() else { return Ok((Default::default(), Val::Refund(self.weight(call)), origin)) @@ -185,10 +182,7 @@ mod tests { use frame_support::{ assert_ok, assert_storage_noop, dispatch::GetDispatchInfo, traits::OriginTrait, }; - use sp_runtime::{ - traits::{AsTransactionAuthorizedOrigin, DispatchTransaction, TxBaseImplication}, - transaction_validity::TransactionSource::External, - }; + use sp_runtime::traits::{AsTransactionAuthorizedOrigin, DispatchTransaction}; #[test] fn signed_ext_check_nonce_works() { @@ -209,13 +203,13 @@ mod tests { assert_storage_noop!({ assert_eq!( CheckNonce::(0u64.into()) - .validate_only(Some(1).into(), CALL, &info, len, External, 0) + .validate_only(Some(1).into(), CALL, &info, len) .unwrap_err(), TransactionValidityError::Invalid(InvalidTransaction::Stale) ); assert_eq!( CheckNonce::(0u64.into()) - .validate_and_prepare(Some(1).into(), CALL, &info, len, 0) + .validate_and_prepare(Some(1).into(), CALL, &info, len) .unwrap_err(), TransactionValidityError::Invalid(InvalidTransaction::Stale) ); @@ -225,29 +219,24 @@ mod tests { Some(1).into(), CALL, &info, - len, - External, - 0, + len )); assert_ok!(CheckNonce::(1u64.into()).validate_and_prepare( Some(1).into(), CALL, &info, - len, - 0, + len )); // future assert_ok!(CheckNonce::(5u64.into()).validate_only( Some(1).into(), CALL, &info, - len, - External, - 0, + len )); assert_eq!( CheckNonce::(5u64.into()) - .validate_and_prepare(Some(1).into(), CALL, &info, len, 0) + .validate_and_prepare(Some(1).into(), CALL, &info, len) .unwrap_err(), TransactionValidityError::Invalid(InvalidTransaction::Future) ); @@ -283,13 +272,13 @@ mod tests { assert_storage_noop!({ assert_eq!( CheckNonce::(1u64.into()) - .validate_only(Some(1).into(), CALL, &info, len, External, 0) + .validate_only(Some(1).into(), CALL, &info, len) .unwrap_err(), TransactionValidityError::Invalid(InvalidTransaction::Payment) ); assert_eq!( CheckNonce::(1u64.into()) - .validate_and_prepare(Some(1).into(), CALL, &info, len, 0) + .validate_and_prepare(Some(1).into(), CALL, &info, len) .unwrap_err(), TransactionValidityError::Invalid(InvalidTransaction::Payment) ); @@ -299,32 +288,26 @@ mod tests { Some(2).into(), CALL, &info, - len, - External, - 0, + len )); assert_ok!(CheckNonce::(1u64.into()).validate_and_prepare( Some(2).into(), CALL, &info, - len, - 0, + len )); // Non-zero sufficients assert_ok!(CheckNonce::(1u64.into()).validate_only( Some(3).into(), CALL, &info, - len, - External, - 0, + len )); assert_ok!(CheckNonce::(1u64.into()).validate_and_prepare( Some(3).into(), CALL, &info, - len, - 0, + len )); }) } @@ -335,7 +318,7 @@ mod tests { let info = DispatchInfo::default(); let len = 0_usize; let (_, val, origin) = CheckNonce::(1u64.into()) - .validate(None.into(), CALL, &info, len, (), &TxBaseImplication(CALL), External) + .validate(None.into(), CALL, &info, len, (), CALL) .unwrap(); assert!(!origin.is_transaction_authorized()); assert_ok!(CheckNonce::(1u64.into()).prepare(val, &origin, CALL, &info, len)); @@ -359,7 +342,7 @@ mod tests { let len = 0_usize; // run the validation step let (_, val, origin) = CheckNonce::(1u64.into()) - .validate(Some(1).into(), CALL, &info, len, (), &TxBaseImplication(CALL), External) + .validate(Some(1).into(), CALL, &info, len, (), CALL) .unwrap(); // mutate `AccountData` for the caller crate::Account::::mutate(1, |info| { @@ -393,7 +376,7 @@ mod tests { let len = CALL.encoded_size(); let origin = crate::RawOrigin::Root.into(); - let (pre, origin) = ext.validate_and_prepare(origin, CALL, &info, len, 0).unwrap(); + let (pre, origin) = ext.validate_and_prepare(origin, CALL, &info, len).unwrap(); assert!(origin.as_system_ref().unwrap().is_root()); diff --git a/substrate/frame/system/src/extensions/check_weight.rs b/substrate/frame/system/src/extensions/check_weight.rs index ee91478b90f3..131057f54a78 100644 --- a/substrate/frame/system/src/extensions/check_weight.rs +++ b/substrate/frame/system/src/extensions/check_weight.rs @@ -19,7 +19,6 @@ use crate::{limits::BlockWeights, Config, Pallet, LOG_TARGET}; use codec::{Decode, Encode}; use frame_support::{ dispatch::{DispatchInfo, PostDispatchInfo}, - pallet_prelude::TransactionSource, traits::Get, }; use scale_info::TypeInfo; @@ -255,7 +254,6 @@ where len: usize, _self_implicit: Self::Implicit, _inherited_implication: &impl Encode, - _source: TransactionSource, ) -> ValidateResult { let (validity, next_len) = Self::do_validate(info, len)?; Ok((validity, next_len, origin)) @@ -548,7 +546,7 @@ mod tests { // will not fit. assert_eq!( CheckWeight::(PhantomData) - .validate_and_prepare(Some(1).into(), CALL, &normal, len, 0) + .validate_and_prepare(Some(1).into(), CALL, &normal, len) .unwrap_err(), InvalidTransaction::ExhaustsResources.into() ); @@ -557,8 +555,7 @@ mod tests { Some(1).into(), CALL, &op, - len, - 0, + len )); // likewise for length limit. @@ -566,7 +563,7 @@ mod tests { AllExtrinsicsLen::::put(normal_length_limit()); assert_eq!( CheckWeight::(PhantomData) - .validate_and_prepare(Some(1).into(), CALL, &normal, len, 0) + .validate_and_prepare(Some(1).into(), CALL, &normal, len) .unwrap_err(), InvalidTransaction::ExhaustsResources.into() ); @@ -574,8 +571,7 @@ mod tests { Some(1).into(), CALL, &op, - len, - 0, + len )); }) } @@ -592,7 +588,6 @@ mod tests { CALL, tx, s, - 0, ); if f { assert!(r.is_err()) @@ -643,7 +638,6 @@ mod tests { CALL, i, len, - 0, ); if f { assert!(r.is_err()) @@ -679,7 +673,7 @@ mod tests { }); let pre = CheckWeight::(PhantomData) - .validate_and_prepare(Some(1).into(), CALL, &info, len, 0) + .validate_and_prepare(Some(1).into(), CALL, &info, len) .unwrap() .0; assert_eq!( @@ -718,7 +712,7 @@ mod tests { }); let pre = CheckWeight::(PhantomData) - .validate_and_prepare(Some(1).into(), CALL, &info, len, 0) + .validate_and_prepare(Some(1).into(), CALL, &info, len) .unwrap() .0; assert_eq!( @@ -757,8 +751,7 @@ mod tests { Some(1).into(), CALL, &free, - len, - 0, + len )); assert_eq!( System::block_weight().total(), diff --git a/substrate/frame/system/src/extensions/weights.rs b/substrate/frame/system/src/extensions/weights.rs index b3c296899be5..1c0136ae7802 100644 --- a/substrate/frame/system/src/extensions/weights.rs +++ b/substrate/frame/system/src/extensions/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `frame_system_extensions` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -68,38 +68,38 @@ impl WeightInfo for SubstrateWeight { /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) fn check_genesis() -> Weight { // Proof Size summary in bytes: - // Measured: `30` + // Measured: `54` // Estimated: `3509` - // Minimum execution time: 3_388_000 picoseconds. - Weight::from_parts(3_577_000, 3509) + // Minimum execution time: 3_876_000 picoseconds. + Weight::from_parts(4_160_000, 3509) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: `System::BlockHash` (r:1 w:0) /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) fn check_mortality_mortal_transaction() -> Weight { // Proof Size summary in bytes: - // Measured: `68` + // Measured: `92` // Estimated: `3509` - // Minimum execution time: 6_442_000 picoseconds. - Weight::from_parts(6_703_000, 3509) + // Minimum execution time: 6_296_000 picoseconds. + Weight::from_parts(6_523_000, 3509) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: `System::BlockHash` (r:1 w:0) /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) fn check_mortality_immortal_transaction() -> Weight { // Proof Size summary in bytes: - // Measured: `68` + // Measured: `92` // Estimated: `3509` - // Minimum execution time: 6_357_000 picoseconds. - Weight::from_parts(6_605_000, 3509) + // Minimum execution time: 6_296_000 picoseconds. + Weight::from_parts(6_523_000, 3509) .saturating_add(T::DbWeight::get().reads(1_u64)) } fn check_non_zero_sender() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 457_000 picoseconds. - Weight::from_parts(570_000, 0) + // Minimum execution time: 449_000 picoseconds. + Weight::from_parts(527_000, 0) } /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) @@ -107,8 +107,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `101` // Estimated: `3593` - // Minimum execution time: 6_936_000 picoseconds. - Weight::from_parts(7_261_000, 3593) + // Minimum execution time: 5_689_000 picoseconds. + Weight::from_parts(6_000_000, 3593) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -116,22 +116,26 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 336_000 picoseconds. - Weight::from_parts(430_000, 0) + // Minimum execution time: 399_000 picoseconds. + Weight::from_parts(461_000, 0) } fn check_tx_version() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 348_000 picoseconds. - Weight::from_parts(455_000, 0) + // Minimum execution time: 390_000 picoseconds. + Weight::from_parts(439_000, 0) } + /// Storage: `System::AllExtrinsicsLen` (r:1 w:1) + /// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn check_weight() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_887_000 picoseconds. - Weight::from_parts(3_006_000, 0) + // Measured: `24` + // Estimated: `1489` + // Minimum execution time: 4_375_000 picoseconds. + Weight::from_parts(4_747_000, 1489) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) } } @@ -141,38 +145,38 @@ impl WeightInfo for () { /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) fn check_genesis() -> Weight { // Proof Size summary in bytes: - // Measured: `30` + // Measured: `54` // Estimated: `3509` - // Minimum execution time: 3_388_000 picoseconds. - Weight::from_parts(3_577_000, 3509) + // Minimum execution time: 3_876_000 picoseconds. + Weight::from_parts(4_160_000, 3509) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: `System::BlockHash` (r:1 w:0) /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) fn check_mortality_mortal_transaction() -> Weight { // Proof Size summary in bytes: - // Measured: `68` + // Measured: `92` // Estimated: `3509` - // Minimum execution time: 6_442_000 picoseconds. - Weight::from_parts(6_703_000, 3509) + // Minimum execution time: 6_296_000 picoseconds. + Weight::from_parts(6_523_000, 3509) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: `System::BlockHash` (r:1 w:0) /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) fn check_mortality_immortal_transaction() -> Weight { // Proof Size summary in bytes: - // Measured: `68` + // Measured: `92` // Estimated: `3509` - // Minimum execution time: 6_357_000 picoseconds. - Weight::from_parts(6_605_000, 3509) + // Minimum execution time: 6_296_000 picoseconds. + Weight::from_parts(6_523_000, 3509) .saturating_add(RocksDbWeight::get().reads(1_u64)) } fn check_non_zero_sender() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 457_000 picoseconds. - Weight::from_parts(570_000, 0) + // Minimum execution time: 449_000 picoseconds. + Weight::from_parts(527_000, 0) } /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) @@ -180,8 +184,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `101` // Estimated: `3593` - // Minimum execution time: 6_936_000 picoseconds. - Weight::from_parts(7_261_000, 3593) + // Minimum execution time: 5_689_000 picoseconds. + Weight::from_parts(6_000_000, 3593) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -189,21 +193,25 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 336_000 picoseconds. - Weight::from_parts(430_000, 0) + // Minimum execution time: 399_000 picoseconds. + Weight::from_parts(461_000, 0) } fn check_tx_version() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 348_000 picoseconds. - Weight::from_parts(455_000, 0) + // Minimum execution time: 390_000 picoseconds. + Weight::from_parts(439_000, 0) } + /// Storage: `System::AllExtrinsicsLen` (r:1 w:1) + /// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn check_weight() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_887_000 picoseconds. - Weight::from_parts(3_006_000, 0) + // Measured: `24` + // Estimated: `1489` + // Minimum execution time: 4_375_000 picoseconds. + Weight::from_parts(4_747_000, 1489) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) } } diff --git a/substrate/frame/system/src/lib.rs b/substrate/frame/system/src/lib.rs index 862fb4cf9faf..04bedd78cc12 100644 --- a/substrate/frame/system/src/lib.rs +++ b/substrate/frame/system/src/lib.rs @@ -311,7 +311,7 @@ pub mod pallet { type Hash = sp_core::hash::H256; type Hashing = sp_runtime::traits::BlakeTwo256; type AccountId = u64; - type Lookup = sp_runtime::traits::IdentityLookup; + type Lookup = sp_runtime::traits::IdentityLookup; type MaxConsumers = frame_support::traits::ConstU32<16>; type AccountData = (); type OnNewAccount = (); @@ -973,7 +973,6 @@ pub mod pallet { /// Digest of the current block, also part of the block header. #[pallet::storage] - #[pallet::whitelist_storage] #[pallet::unbounded] #[pallet::getter(fn digest)] pub(super) type Digest = StorageValue<_, generic::Digest, ValueQuery>; diff --git a/substrate/frame/system/src/weights.rs b/substrate/frame/system/src/weights.rs index 8450e0e7fb94..fca14e452657 100644 --- a/substrate/frame/system/src/weights.rs +++ b/substrate/frame/system/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `frame_system` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-04-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -70,8 +70,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_093_000 picoseconds. - Weight::from_parts(2_169_000, 0) + // Minimum execution time: 2_078_000 picoseconds. + Weight::from_parts(1_137_744, 0) // Standard Error: 0 .saturating_add(Weight::from_parts(387, 0).saturating_mul(b.into())) } @@ -80,33 +80,38 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_750_000 picoseconds. - Weight::from_parts(23_611_490, 0) - // Standard Error: 8 - .saturating_add(Weight::from_parts(1_613, 0).saturating_mul(b.into())) + // Minimum execution time: 5_980_000 picoseconds. + Weight::from_parts(2_562_415, 0) + // Standard Error: 1 + .saturating_add(Weight::from_parts(1_391, 0).saturating_mul(b.into())) } + /// Storage: `System::Digest` (r:1 w:1) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: UNKNOWN KEY `0x3a686561707061676573` (r:0 w:1) /// Proof: UNKNOWN KEY `0x3a686561707061676573` (r:0 w:1) fn set_heap_pages() -> Weight { // Proof Size summary in bytes: // Measured: `0` - // Estimated: `0` - // Minimum execution time: 3_465_000 picoseconds. - Weight::from_parts(3_616_000, 0) - .saturating_add(T::DbWeight::get().writes(1_u64)) + // Estimated: `1485` + // Minimum execution time: 3_834_000 picoseconds. + Weight::from_parts(4_109_000, 1485) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) } /// Storage: `MultiBlockMigrations::Cursor` (r:1 w:0) /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) + /// Storage: `System::Digest` (r:1 w:1) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: UNKNOWN KEY `0x3a636f6465` (r:0 w:1) /// Proof: UNKNOWN KEY `0x3a636f6465` (r:0 w:1) fn set_code() -> Weight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `67035` - // Minimum execution time: 90_830_152_000 picoseconds. - Weight::from_parts(96_270_304_000, 67035) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) + // Minimum execution time: 81_326_496_000 picoseconds. + Weight::from_parts(81_880_651_000, 67035) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -115,10 +120,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_147_000 picoseconds. - Weight::from_parts(2_239_000, 0) - // Standard Error: 2_137 - .saturating_add(Weight::from_parts(748_304, 0).saturating_mul(i.into())) + // Minimum execution time: 2_059_000 picoseconds. + Weight::from_parts(2_192_000, 0) + // Standard Error: 720 + .saturating_add(Weight::from_parts(742_610, 0).saturating_mul(i.into())) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(i.into()))) } /// Storage: `Skipped::Metadata` (r:0 w:0) @@ -128,10 +133,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_053_000 picoseconds. - Weight::from_parts(2_188_000, 0) - // Standard Error: 878 - .saturating_add(Weight::from_parts(560_728, 0).saturating_mul(i.into())) + // Minimum execution time: 2_038_000 picoseconds. + Weight::from_parts(2_159_000, 0) + // Standard Error: 774 + .saturating_add(Weight::from_parts(569_424, 0).saturating_mul(i.into())) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(i.into()))) } /// Storage: `Skipped::Metadata` (r:0 w:0) @@ -139,12 +144,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `p` is `[0, 1000]`. fn kill_prefix(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `120 + p * (69 ±0)` + // Measured: `127 + p * (69 ±0)` // Estimated: `134 + p * (70 ±0)` - // Minimum execution time: 4_244_000 picoseconds. - Weight::from_parts(4_397_000, 134) - // Standard Error: 1_410 - .saturating_add(Weight::from_parts(1_307_089, 0).saturating_mul(p.into())) + // Minimum execution time: 3_990_000 picoseconds. + Weight::from_parts(4_172_000, 134) + // Standard Error: 1_485 + .saturating_add(Weight::from_parts(1_227_281, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(p.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(p.into()))) .saturating_add(Weight::from_parts(0, 70).saturating_mul(p.into())) @@ -155,24 +160,26 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 10_037_000 picoseconds. - Weight::from_parts(16_335_000, 0) + // Minimum execution time: 8_851_000 picoseconds. + Weight::from_parts(9_643_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `System::AuthorizedUpgrade` (r:1 w:1) /// Proof: `System::AuthorizedUpgrade` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) /// Storage: `MultiBlockMigrations::Cursor` (r:1 w:0) /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) + /// Storage: `System::Digest` (r:1 w:1) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: UNKNOWN KEY `0x3a636f6465` (r:0 w:1) /// Proof: UNKNOWN KEY `0x3a636f6465` (r:0 w:1) fn apply_authorized_upgrade() -> Weight { // Proof Size summary in bytes: // Measured: `164` // Estimated: `67035` - // Minimum execution time: 95_970_737_000 picoseconds. - Weight::from_parts(98_826_505_000, 67035) - .saturating_add(T::DbWeight::get().reads(2_u64)) - .saturating_add(T::DbWeight::get().writes(2_u64)) + // Minimum execution time: 86_295_879_000 picoseconds. + Weight::from_parts(87_636_595_000, 67035) + .saturating_add(T::DbWeight::get().reads(3_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)) } } @@ -183,8 +190,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_093_000 picoseconds. - Weight::from_parts(2_169_000, 0) + // Minimum execution time: 2_078_000 picoseconds. + Weight::from_parts(1_137_744, 0) // Standard Error: 0 .saturating_add(Weight::from_parts(387, 0).saturating_mul(b.into())) } @@ -193,33 +200,38 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_750_000 picoseconds. - Weight::from_parts(23_611_490, 0) - // Standard Error: 8 - .saturating_add(Weight::from_parts(1_613, 0).saturating_mul(b.into())) + // Minimum execution time: 5_980_000 picoseconds. + Weight::from_parts(2_562_415, 0) + // Standard Error: 1 + .saturating_add(Weight::from_parts(1_391, 0).saturating_mul(b.into())) } + /// Storage: `System::Digest` (r:1 w:1) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: UNKNOWN KEY `0x3a686561707061676573` (r:0 w:1) /// Proof: UNKNOWN KEY `0x3a686561707061676573` (r:0 w:1) fn set_heap_pages() -> Weight { // Proof Size summary in bytes: // Measured: `0` - // Estimated: `0` - // Minimum execution time: 3_465_000 picoseconds. - Weight::from_parts(3_616_000, 0) - .saturating_add(RocksDbWeight::get().writes(1_u64)) + // Estimated: `1485` + // Minimum execution time: 3_834_000 picoseconds. + Weight::from_parts(4_109_000, 1485) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) } /// Storage: `MultiBlockMigrations::Cursor` (r:1 w:0) /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) + /// Storage: `System::Digest` (r:1 w:1) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: UNKNOWN KEY `0x3a636f6465` (r:0 w:1) /// Proof: UNKNOWN KEY `0x3a636f6465` (r:0 w:1) fn set_code() -> Weight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `67035` - // Minimum execution time: 90_830_152_000 picoseconds. - Weight::from_parts(96_270_304_000, 67035) - .saturating_add(RocksDbWeight::get().reads(1_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) + // Minimum execution time: 81_326_496_000 picoseconds. + Weight::from_parts(81_880_651_000, 67035) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -228,10 +240,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_147_000 picoseconds. - Weight::from_parts(2_239_000, 0) - // Standard Error: 2_137 - .saturating_add(Weight::from_parts(748_304, 0).saturating_mul(i.into())) + // Minimum execution time: 2_059_000 picoseconds. + Weight::from_parts(2_192_000, 0) + // Standard Error: 720 + .saturating_add(Weight::from_parts(742_610, 0).saturating_mul(i.into())) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(i.into()))) } /// Storage: `Skipped::Metadata` (r:0 w:0) @@ -241,10 +253,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_053_000 picoseconds. - Weight::from_parts(2_188_000, 0) - // Standard Error: 878 - .saturating_add(Weight::from_parts(560_728, 0).saturating_mul(i.into())) + // Minimum execution time: 2_038_000 picoseconds. + Weight::from_parts(2_159_000, 0) + // Standard Error: 774 + .saturating_add(Weight::from_parts(569_424, 0).saturating_mul(i.into())) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(i.into()))) } /// Storage: `Skipped::Metadata` (r:0 w:0) @@ -252,12 +264,12 @@ impl WeightInfo for () { /// The range of component `p` is `[0, 1000]`. fn kill_prefix(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `120 + p * (69 ±0)` + // Measured: `127 + p * (69 ±0)` // Estimated: `134 + p * (70 ±0)` - // Minimum execution time: 4_244_000 picoseconds. - Weight::from_parts(4_397_000, 134) - // Standard Error: 1_410 - .saturating_add(Weight::from_parts(1_307_089, 0).saturating_mul(p.into())) + // Minimum execution time: 3_990_000 picoseconds. + Weight::from_parts(4_172_000, 134) + // Standard Error: 1_485 + .saturating_add(Weight::from_parts(1_227_281, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(p.into()))) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(p.into()))) .saturating_add(Weight::from_parts(0, 70).saturating_mul(p.into())) @@ -268,23 +280,25 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 10_037_000 picoseconds. - Weight::from_parts(16_335_000, 0) + // Minimum execution time: 8_851_000 picoseconds. + Weight::from_parts(9_643_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `System::AuthorizedUpgrade` (r:1 w:1) /// Proof: `System::AuthorizedUpgrade` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) /// Storage: `MultiBlockMigrations::Cursor` (r:1 w:0) /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) + /// Storage: `System::Digest` (r:1 w:1) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: UNKNOWN KEY `0x3a636f6465` (r:0 w:1) /// Proof: UNKNOWN KEY `0x3a636f6465` (r:0 w:1) fn apply_authorized_upgrade() -> Weight { // Proof Size summary in bytes: // Measured: `164` // Estimated: `67035` - // Minimum execution time: 95_970_737_000 picoseconds. - Weight::from_parts(98_826_505_000, 67035) - .saturating_add(RocksDbWeight::get().reads(2_u64)) - .saturating_add(RocksDbWeight::get().writes(2_u64)) + // Minimum execution time: 86_295_879_000 picoseconds. + Weight::from_parts(87_636_595_000, 67035) + .saturating_add(RocksDbWeight::get().reads(3_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) } } diff --git a/substrate/frame/timestamp/Cargo.toml b/substrate/frame/timestamp/Cargo.toml index 75788aef348a..0eff0530c7e2 100644 --- a/substrate/frame/timestamp/Cargo.toml +++ b/substrate/frame/timestamp/Cargo.toml @@ -18,11 +18,11 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { features = ["derive", "max-encoded-len"], workspace = true } +log = { workspace = true } +scale-info = { features = ["derive"], workspace = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } -log = { workspace = true } -scale-info = { features = ["derive"], workspace = true } sp-inherents = { workspace = true } sp-io = { optional = true, workspace = true } sp-runtime = { workspace = true } diff --git a/substrate/frame/timestamp/src/lib.rs b/substrate/frame/timestamp/src/lib.rs index 5cb6c859c417..78e2939e65b9 100644 --- a/substrate/frame/timestamp/src/lib.rs +++ b/substrate/frame/timestamp/src/lib.rs @@ -161,7 +161,7 @@ pub mod pallet { impl DefaultConfig for TestDefaultConfig { type Moment = u64; type OnTimestampSet = (); - type MinimumPeriod = ConstUint<1>; + type MinimumPeriod = frame_support::traits::ConstU64<1>; type WeightInfo = (); } } diff --git a/substrate/frame/timestamp/src/weights.rs b/substrate/frame/timestamp/src/weights.rs index 9f16a82653a9..9f2cbf7ccd12 100644 --- a/substrate/frame/timestamp/src/weights.rs +++ b/substrate/frame/timestamp/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_timestamp` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-04-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -66,8 +66,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `345` // Estimated: `1493` - // Minimum execution time: 10_176_000 picoseconds. - Weight::from_parts(10_560_000, 1493) + // Minimum execution time: 8_356_000 picoseconds. + Weight::from_parts(8_684_000, 1493) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -75,8 +75,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `194` // Estimated: `0` - // Minimum execution time: 4_915_000 picoseconds. - Weight::from_parts(5_192_000, 0) + // Minimum execution time: 3_886_000 picoseconds. + Weight::from_parts(4_118_000, 0) } } @@ -90,8 +90,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `345` // Estimated: `1493` - // Minimum execution time: 10_176_000 picoseconds. - Weight::from_parts(10_560_000, 1493) + // Minimum execution time: 8_356_000 picoseconds. + Weight::from_parts(8_684_000, 1493) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -99,7 +99,7 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `194` // Estimated: `0` - // Minimum execution time: 4_915_000 picoseconds. - Weight::from_parts(5_192_000, 0) + // Minimum execution time: 3_886_000 picoseconds. + Weight::from_parts(4_118_000, 0) } } diff --git a/substrate/frame/tips/Cargo.toml b/substrate/frame/tips/Cargo.toml index 6b5b89e7a197..7c7a2d6aa909 100644 --- a/substrate/frame/tips/Cargo.toml +++ b/substrate/frame/tips/Cargo.toml @@ -17,13 +17,13 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { features = ["derive"], workspace = true } +log = { workspace = true } +scale-info = { features = ["derive"], workspace = true } +serde = { features = ["derive"], optional = true, workspace = true, default-features = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } -log = { workspace = true } pallet-treasury = { workspace = true } -scale-info = { features = ["derive"], workspace = true } -serde = { features = ["derive"], optional = true, workspace = true, default-features = true } sp-core = { workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } diff --git a/substrate/frame/tips/src/weights.rs b/substrate/frame/tips/src/weights.rs index e9805e9cc9bf..7e1bba3c73e7 100644 --- a/substrate/frame/tips/src/weights.rs +++ b/substrate/frame/tips/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_tips` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-04-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -71,10 +71,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `4` // Estimated: `3469` - // Minimum execution time: 26_606_000 picoseconds. - Weight::from_parts(27_619_942, 3469) - // Standard Error: 179 - .saturating_add(Weight::from_parts(2_750, 0).saturating_mul(r.into())) + // Minimum execution time: 26_549_000 picoseconds. + Weight::from_parts(27_804_619, 3469) + // Standard Error: 173 + .saturating_add(Weight::from_parts(1_718, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -86,8 +86,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `221` // Estimated: `3686` - // Minimum execution time: 29_286_000 picoseconds. - Weight::from_parts(30_230_000, 3686) + // Minimum execution time: 25_430_000 picoseconds. + Weight::from_parts(26_056_000, 3686) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -101,14 +101,14 @@ impl WeightInfo for SubstrateWeight { /// The range of component `t` is `[1, 13]`. fn tip_new(r: u32, t: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `623 + t * (64 ±0)` - // Estimated: `4088 + t * (64 ±0)` - // Minimum execution time: 21_690_000 picoseconds. - Weight::from_parts(22_347_457, 4088) - // Standard Error: 125 - .saturating_add(Weight::from_parts(2_332, 0).saturating_mul(r.into())) - // Standard Error: 2_974 - .saturating_add(Weight::from_parts(20_772, 0).saturating_mul(t.into())) + // Measured: `526 + t * (64 ±0)` + // Estimated: `3991 + t * (64 ±0)` + // Minimum execution time: 17_309_000 picoseconds. + Weight::from_parts(17_493_185, 3991) + // Standard Error: 126 + .saturating_add(Weight::from_parts(1_444, 0).saturating_mul(r.into())) + // Standard Error: 3_011 + .saturating_add(Weight::from_parts(88_592, 0).saturating_mul(t.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 64).saturating_mul(t.into())) @@ -120,12 +120,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `t` is `[1, 13]`. fn tip(t: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `844 + t * (112 ±0)` - // Estimated: `4309 + t * (112 ±0)` - // Minimum execution time: 20_588_000 picoseconds. - Weight::from_parts(21_241_034, 4309) - // Standard Error: 2_448 - .saturating_add(Weight::from_parts(133_643, 0).saturating_mul(t.into())) + // Measured: `747 + t * (112 ±0)` + // Estimated: `4212 + t * (112 ±0)` + // Minimum execution time: 14_148_000 picoseconds. + Weight::from_parts(14_434_268, 4212) + // Standard Error: 4_666 + .saturating_add(Weight::from_parts(210_867, 0).saturating_mul(t.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 112).saturating_mul(t.into())) @@ -141,27 +141,29 @@ impl WeightInfo for SubstrateWeight { /// The range of component `t` is `[1, 13]`. fn close_tip(t: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `896 + t * (112 ±0)` - // Estimated: `4353 + t * (111 ±0)` - // Minimum execution time: 60_824_000 picoseconds. - Weight::from_parts(63_233_742, 4353) - // Standard Error: 9_841 - .saturating_add(Weight::from_parts(77_920, 0).saturating_mul(t.into())) + // Measured: `786 + t * (112 ±0)` + // Estimated: `4242 + t * (112 ±0)` + // Minimum execution time: 56_060_000 picoseconds. + Weight::from_parts(57_913_972, 4242) + // Standard Error: 11_691 + .saturating_add(Weight::from_parts(229_579, 0).saturating_mul(t.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 111).saturating_mul(t.into())) + .saturating_add(Weight::from_parts(0, 112).saturating_mul(t.into())) } /// Storage: `Tips::Tips` (r:1 w:1) /// Proof: `Tips::Tips` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Tips::Reasons` (r:0 w:1) /// Proof: `Tips::Reasons` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `t` is `[1, 13]`. - fn slash_tip(_t: u32, ) -> Weight { + fn slash_tip(t: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `269` // Estimated: `3734` - // Minimum execution time: 13_281_000 picoseconds. - Weight::from_parts(14_089_409, 3734) + // Minimum execution time: 12_034_000 picoseconds. + Weight::from_parts(12_934_534, 3734) + // Standard Error: 2_420 + .saturating_add(Weight::from_parts(4_167, 0).saturating_mul(t.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -178,10 +180,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `4` // Estimated: `3469` - // Minimum execution time: 26_606_000 picoseconds. - Weight::from_parts(27_619_942, 3469) - // Standard Error: 179 - .saturating_add(Weight::from_parts(2_750, 0).saturating_mul(r.into())) + // Minimum execution time: 26_549_000 picoseconds. + Weight::from_parts(27_804_619, 3469) + // Standard Error: 173 + .saturating_add(Weight::from_parts(1_718, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -193,8 +195,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `221` // Estimated: `3686` - // Minimum execution time: 29_286_000 picoseconds. - Weight::from_parts(30_230_000, 3686) + // Minimum execution time: 25_430_000 picoseconds. + Weight::from_parts(26_056_000, 3686) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -208,14 +210,14 @@ impl WeightInfo for () { /// The range of component `t` is `[1, 13]`. fn tip_new(r: u32, t: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `623 + t * (64 ±0)` - // Estimated: `4088 + t * (64 ±0)` - // Minimum execution time: 21_690_000 picoseconds. - Weight::from_parts(22_347_457, 4088) - // Standard Error: 125 - .saturating_add(Weight::from_parts(2_332, 0).saturating_mul(r.into())) - // Standard Error: 2_974 - .saturating_add(Weight::from_parts(20_772, 0).saturating_mul(t.into())) + // Measured: `526 + t * (64 ±0)` + // Estimated: `3991 + t * (64 ±0)` + // Minimum execution time: 17_309_000 picoseconds. + Weight::from_parts(17_493_185, 3991) + // Standard Error: 126 + .saturating_add(Weight::from_parts(1_444, 0).saturating_mul(r.into())) + // Standard Error: 3_011 + .saturating_add(Weight::from_parts(88_592, 0).saturating_mul(t.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 64).saturating_mul(t.into())) @@ -227,12 +229,12 @@ impl WeightInfo for () { /// The range of component `t` is `[1, 13]`. fn tip(t: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `844 + t * (112 ±0)` - // Estimated: `4309 + t * (112 ±0)` - // Minimum execution time: 20_588_000 picoseconds. - Weight::from_parts(21_241_034, 4309) - // Standard Error: 2_448 - .saturating_add(Weight::from_parts(133_643, 0).saturating_mul(t.into())) + // Measured: `747 + t * (112 ±0)` + // Estimated: `4212 + t * (112 ±0)` + // Minimum execution time: 14_148_000 picoseconds. + Weight::from_parts(14_434_268, 4212) + // Standard Error: 4_666 + .saturating_add(Weight::from_parts(210_867, 0).saturating_mul(t.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 112).saturating_mul(t.into())) @@ -248,27 +250,29 @@ impl WeightInfo for () { /// The range of component `t` is `[1, 13]`. fn close_tip(t: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `896 + t * (112 ±0)` - // Estimated: `4353 + t * (111 ±0)` - // Minimum execution time: 60_824_000 picoseconds. - Weight::from_parts(63_233_742, 4353) - // Standard Error: 9_841 - .saturating_add(Weight::from_parts(77_920, 0).saturating_mul(t.into())) + // Measured: `786 + t * (112 ±0)` + // Estimated: `4242 + t * (112 ±0)` + // Minimum execution time: 56_060_000 picoseconds. + Weight::from_parts(57_913_972, 4242) + // Standard Error: 11_691 + .saturating_add(Weight::from_parts(229_579, 0).saturating_mul(t.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 111).saturating_mul(t.into())) + .saturating_add(Weight::from_parts(0, 112).saturating_mul(t.into())) } /// Storage: `Tips::Tips` (r:1 w:1) /// Proof: `Tips::Tips` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Tips::Reasons` (r:0 w:1) /// Proof: `Tips::Reasons` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `t` is `[1, 13]`. - fn slash_tip(_t: u32, ) -> Weight { + fn slash_tip(t: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `269` // Estimated: `3734` - // Minimum execution time: 13_281_000 picoseconds. - Weight::from_parts(14_089_409, 3734) + // Minimum execution time: 12_034_000 picoseconds. + Weight::from_parts(12_934_534, 3734) + // Standard Error: 2_420 + .saturating_add(Weight::from_parts(4_167, 0).saturating_mul(t.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } diff --git a/substrate/frame/transaction-payment/Cargo.toml b/substrate/frame/transaction-payment/Cargo.toml index 2639bda18b6c..afa03ceb12eb 100644 --- a/substrate/frame/transaction-payment/Cargo.toml +++ b/substrate/frame/transaction-payment/Cargo.toml @@ -19,18 +19,18 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { features = [ "derive", ], workspace = true } +scale-info = { features = ["derive"], workspace = true } +serde = { optional = true, workspace = true, default-features = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } -scale-info = { features = ["derive"], workspace = true } -serde = { optional = true, workspace = true, default-features = true } sp-core = { workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } [dev-dependencies] -pallet-balances = { workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } +pallet-balances = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/frame/transaction-payment/asset-conversion-tx-payment/Cargo.toml b/substrate/frame/transaction-payment/asset-conversion-tx-payment/Cargo.toml index 147859fdb26a..7c98d157f6ff 100644 --- a/substrate/frame/transaction-payment/asset-conversion-tx-payment/Cargo.toml +++ b/substrate/frame/transaction-payment/asset-conversion-tx-payment/Cargo.toml @@ -17,21 +17,21 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] # Substrate dependencies -codec = { features = ["derive"], workspace = true } +sp-runtime = { workspace = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } pallet-asset-conversion = { workspace = true } pallet-transaction-payment = { workspace = true } +codec = { features = ["derive"], workspace = true } scale-info = { features = ["derive"], workspace = true } -sp-runtime = { workspace = true } [dev-dependencies] -pallet-assets = { workspace = true, default-features = true } -pallet-balances = { workspace = true, default-features = true } sp-core = { workspace = true } sp-io = { workspace = true } sp-storage = { workspace = true } +pallet-assets = { workspace = true, default-features = true } +pallet-balances = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/benchmarking.rs b/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/benchmarking.rs index eb2635694e9c..97eff03d849d 100644 --- a/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/benchmarking.rs +++ b/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/benchmarking.rs @@ -59,7 +59,7 @@ mod benchmarks { #[block] { assert!(ext - .test_run(RawOrigin::Signed(caller).into(), &call, &info, 0, 0, |_| Ok(post_info)) + .test_run(RawOrigin::Signed(caller).into(), &call, &info, 0, |_| Ok(post_info)) .unwrap() .is_ok()); } @@ -86,7 +86,7 @@ mod benchmarks { #[block] { assert!(ext - .test_run(RawOrigin::Signed(caller).into(), &call, &info, 0, 0, |_| Ok(post_info)) + .test_run(RawOrigin::Signed(caller).into(), &call, &info, 0, |_| Ok(post_info)) .unwrap() .is_ok()); } @@ -115,7 +115,7 @@ mod benchmarks { #[block] { assert!(ext - .test_run(RawOrigin::Signed(caller.clone()).into(), &call, &info, 0, 0, |_| Ok( + .test_run(RawOrigin::Signed(caller.clone()).into(), &call, &info, 0, |_| Ok( post_info )) .unwrap() diff --git a/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/lib.rs b/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/lib.rs index d6721c46422b..787f6b122e86 100644 --- a/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/lib.rs +++ b/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/lib.rs @@ -47,7 +47,6 @@ extern crate alloc; use codec::{Decode, Encode}; use frame_support::{ dispatch::{DispatchInfo, DispatchResult, PostDispatchInfo}, - pallet_prelude::TransactionSource, traits::IsType, DefaultNoBound, }; @@ -309,7 +308,6 @@ where len: usize, _self_implicit: Self::Implicit, _inherited_implication: &impl Encode, - _source: TransactionSource, ) -> ValidateResult { let Some(who) = origin.as_system_origin_signer() else { return Ok((ValidTransaction::default(), Val::NoCharge, origin)) diff --git a/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/tests.rs b/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/tests.rs index 6ce4652fd42f..4312aa9a452f 100644 --- a/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/tests.rs +++ b/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/tests.rs @@ -168,7 +168,7 @@ fn transaction_payment_in_native_possible() { let mut info = info_from_weight(WEIGHT_5); let ext = ChargeAssetTxPayment::::from(0, None); info.extension_weight = ext.weight(CALL); - let (pre, _) = ext.validate_and_prepare(Some(1).into(), CALL, &info, len, 0).unwrap(); + let (pre, _) = ext.validate_and_prepare(Some(1).into(), CALL, &info, len).unwrap(); let initial_balance = 10 * balance_factor; assert_eq!(Balances::free_balance(1), initial_balance - 5 - 5 - 15 - 10); @@ -185,7 +185,7 @@ fn transaction_payment_in_native_possible() { let ext = ChargeAssetTxPayment::::from(5 /* tipped */, None); let extension_weight = ext.weight(CALL); info.extension_weight = extension_weight; - let (pre, _) = ext.validate_and_prepare(Some(2).into(), CALL, &info, len, 0).unwrap(); + let (pre, _) = ext.validate_and_prepare(Some(2).into(), CALL, &info, len).unwrap(); let initial_balance_for_2 = 20 * balance_factor; assert_eq!(Balances::free_balance(2), initial_balance_for_2 - 5 - 10 - 100 - 15 - 5); @@ -255,13 +255,7 @@ fn transaction_payment_in_asset_possible() { assert_eq!(Assets::balance(asset_id, caller), balance); let (pre, _) = ChargeAssetTxPayment::::from(0, Some(asset_id.into())) - .validate_and_prepare( - Some(caller).into(), - CALL, - &info_from_weight(WEIGHT_5), - len, - 0, - ) + .validate_and_prepare(Some(caller).into(), CALL, &info_from_weight(WEIGHT_5), len) .unwrap(); // assert that native balance is not used assert_eq!(Balances::free_balance(caller), 10 * balance_factor); @@ -319,13 +313,7 @@ fn transaction_payment_in_asset_fails_if_no_pool_for_that_asset() { let len = 10; let pre = ChargeAssetTxPayment::::from(0, Some(asset_id.into())) - .validate_and_prepare( - Some(caller).into(), - CALL, - &info_from_weight(WEIGHT_5), - len, - 0, - ); + .validate_and_prepare(Some(caller).into(), CALL, &info_from_weight(WEIGHT_5), len); // As there is no pool in the dex set up for this asset, conversion should fail. assert!(pre.is_err()); @@ -376,13 +364,7 @@ fn transaction_payment_without_fee() { let fee_in_asset = input_quote.unwrap(); let (pre, _) = ChargeAssetTxPayment::::from(0, Some(asset_id.into())) - .validate_and_prepare( - Some(caller).into(), - CALL, - &info_from_weight(WEIGHT_5), - len, - 0, - ) + .validate_and_prepare(Some(caller).into(), CALL, &info_from_weight(WEIGHT_5), len) .unwrap(); // assert that native balance is not used @@ -463,8 +445,7 @@ fn asset_transaction_payment_with_tip_and_refund() { let mut info = info_from_weight(WEIGHT_100); let ext = ChargeAssetTxPayment::::from(tip, Some(asset_id.into())); info.extension_weight = ext.weight(CALL); - let (pre, _) = - ext.validate_and_prepare(Some(caller).into(), CALL, &info, len, 0).unwrap(); + let (pre, _) = ext.validate_and_prepare(Some(caller).into(), CALL, &info, len).unwrap(); assert_eq!(Assets::balance(asset_id, caller), balance - fee_in_asset); let final_weight = 50; @@ -558,13 +539,7 @@ fn payment_from_account_with_only_assets() { assert_eq!(fee_in_asset, 201); let (pre, _) = ChargeAssetTxPayment::::from(0, Some(asset_id.into())) - .validate_and_prepare( - Some(caller).into(), - CALL, - &info_from_weight(WEIGHT_5), - len, - 0, - ) + .validate_and_prepare(Some(caller).into(), CALL, &info_from_weight(WEIGHT_5), len) .unwrap(); // check that fee was charged in the given asset assert_eq!(Assets::balance(asset_id, caller), balance - fee_in_asset); @@ -620,13 +595,7 @@ fn converted_fee_is_never_zero_if_input_fee_is_not() { // there will be no conversion when the fee is zero { let (pre, _) = ChargeAssetTxPayment::::from(0, Some(asset_id.into())) - .validate_and_prepare( - Some(caller).into(), - CALL, - &info_from_pays(Pays::No), - len, - 0, - ) + .validate_and_prepare(Some(caller).into(), CALL, &info_from_pays(Pays::No), len) .unwrap(); // `Pays::No` implies there are no fees assert_eq!(Assets::balance(asset_id, caller), balance); @@ -657,7 +626,6 @@ fn converted_fee_is_never_zero_if_input_fee_is_not() { CALL, &info_from_weight(Weight::from_parts(weight, 0)), len, - 0, ) .unwrap(); assert_eq!(Assets::balance(asset_id, caller), balance - fee_in_asset); @@ -708,7 +676,7 @@ fn post_dispatch_fee_is_zero_if_pre_dispatch_fee_is_zero() { assert!(fee > 0); let (pre, _) = ChargeAssetTxPayment::::from(0, Some(asset_id.into())) - .validate_and_prepare(Some(caller).into(), CALL, &info_from_pays(Pays::No), len, 0) + .validate_and_prepare(Some(caller).into(), CALL, &info_from_pays(Pays::No), len) .unwrap(); // `Pays::No` implies no pre-dispatch fees @@ -762,8 +730,7 @@ fn fee_with_native_asset_passed_with_id() { let mut info = info_from_weight(WEIGHT_100); info.extension_weight = extension_weight; - let (pre, _) = - ext.validate_and_prepare(Some(caller).into(), CALL, &info, len, 0).unwrap(); + let (pre, _) = ext.validate_and_prepare(Some(caller).into(), CALL, &info, len).unwrap(); assert_eq!(Balances::free_balance(caller), caller_balance - initial_fee); let final_weight = 50; @@ -842,7 +809,7 @@ fn transfer_add_and_remove_account() { let mut info = info_from_weight(WEIGHT_100); info.extension_weight = extension_weight; let (pre, _) = ChargeAssetTxPayment::::from(tip, Some(asset_id.into())) - .validate_and_prepare(Some(caller).into(), CALL, &info, len, 0) + .validate_and_prepare(Some(caller).into(), CALL, &info, len) .unwrap(); assert_eq!(Assets::balance(asset_id, &caller), balance - fee_in_asset); @@ -902,7 +869,7 @@ fn no_fee_and_no_weight_for_other_origins() { let len = CALL.encoded_size(); let origin = frame_system::RawOrigin::Root.into(); - let (pre, origin) = ext.validate_and_prepare(origin, CALL, &info, len, 0).unwrap(); + let (pre, origin) = ext.validate_and_prepare(origin, CALL, &info, len).unwrap(); assert!(origin.as_system_ref().unwrap().is_root()); diff --git a/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/weights.rs b/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/weights.rs index 587a399634b7..f95e49f80730 100644 --- a/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/weights.rs +++ b/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_asset_conversion_tx_payment` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -63,33 +63,42 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 735_000 picoseconds. - Weight::from_parts(805_000, 0) + // Minimum execution time: 628_000 picoseconds. + Weight::from_parts(694_000, 0) } - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Authorship::Author` (r:1 w:0) + /// Proof: `Authorship::Author` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `System::Digest` (r:1 w:0) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn charge_asset_tx_payment_native() -> Weight { // Proof Size summary in bytes: - // Measured: `101` - // Estimated: `3593` - // Minimum execution time: 45_111_000 picoseconds. - Weight::from_parts(45_685_000, 3593) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) + // Measured: `248` + // Estimated: `1733` + // Minimum execution time: 34_410_000 picoseconds. + Weight::from_parts(35_263_000, 1733) + .saturating_add(T::DbWeight::get().reads(3_u64)) } + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Assets::Asset` (r:1 w:1) /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) /// Storage: `Assets::Account` (r:2 w:2) /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Authorship::Author` (r:1 w:0) + /// Proof: `Authorship::Author` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `System::Digest` (r:1 w:0) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn charge_asset_tx_payment_asset() -> Weight { // Proof Size summary in bytes: - // Measured: `711` + // Measured: `888` // Estimated: `6208` - // Minimum execution time: 164_069_000 picoseconds. - Weight::from_parts(166_667_000, 6208) - .saturating_add(T::DbWeight::get().reads(4_u64)) + // Minimum execution time: 112_432_000 picoseconds. + Weight::from_parts(113_992_000, 6208) + .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } } @@ -100,33 +109,42 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 735_000 picoseconds. - Weight::from_parts(805_000, 0) + // Minimum execution time: 628_000 picoseconds. + Weight::from_parts(694_000, 0) } - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Authorship::Author` (r:1 w:0) + /// Proof: `Authorship::Author` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `System::Digest` (r:1 w:0) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn charge_asset_tx_payment_native() -> Weight { // Proof Size summary in bytes: - // Measured: `101` - // Estimated: `3593` - // Minimum execution time: 45_111_000 picoseconds. - Weight::from_parts(45_685_000, 3593) - .saturating_add(RocksDbWeight::get().reads(1_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) + // Measured: `248` + // Estimated: `1733` + // Minimum execution time: 34_410_000 picoseconds. + Weight::from_parts(35_263_000, 1733) + .saturating_add(RocksDbWeight::get().reads(3_u64)) } + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Assets::Asset` (r:1 w:1) /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) /// Storage: `Assets::Account` (r:2 w:2) /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Authorship::Author` (r:1 w:0) + /// Proof: `Authorship::Author` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `System::Digest` (r:1 w:0) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn charge_asset_tx_payment_asset() -> Weight { // Proof Size summary in bytes: - // Measured: `711` + // Measured: `888` // Estimated: `6208` - // Minimum execution time: 164_069_000 picoseconds. - Weight::from_parts(166_667_000, 6208) - .saturating_add(RocksDbWeight::get().reads(4_u64)) + // Minimum execution time: 112_432_000 picoseconds. + Weight::from_parts(113_992_000, 6208) + .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } } diff --git a/substrate/frame/transaction-payment/asset-tx-payment/Cargo.toml b/substrate/frame/transaction-payment/asset-tx-payment/Cargo.toml index 2924860c5201..89fe5bfe7a42 100644 --- a/substrate/frame/transaction-payment/asset-tx-payment/Cargo.toml +++ b/substrate/frame/transaction-payment/asset-tx-payment/Cargo.toml @@ -21,10 +21,10 @@ sp-core = { workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } -frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } pallet-transaction-payment = { workspace = true } +frame-benchmarking = { optional = true, workspace = true } # Other dependencies codec = { features = ["derive"], workspace = true } diff --git a/substrate/frame/transaction-payment/asset-tx-payment/src/benchmarking.rs b/substrate/frame/transaction-payment/asset-tx-payment/src/benchmarking.rs index e4340cc6a152..25902bf452b2 100644 --- a/substrate/frame/transaction-payment/asset-tx-payment/src/benchmarking.rs +++ b/substrate/frame/transaction-payment/asset-tx-payment/src/benchmarking.rs @@ -59,7 +59,7 @@ mod benchmarks { #[block] { assert!(ext - .test_run(RawOrigin::Signed(caller).into(), &call, &info, 0, 0, |_| Ok(post_info)) + .test_run(RawOrigin::Signed(caller).into(), &call, &info, 0, |_| Ok(post_info)) .unwrap() .is_ok()); } @@ -87,7 +87,7 @@ mod benchmarks { #[block] { assert!(ext - .test_run(RawOrigin::Signed(caller).into(), &call, &info, 0, 0, |_| Ok(post_info)) + .test_run(RawOrigin::Signed(caller).into(), &call, &info, 0, |_| Ok(post_info)) .unwrap() .is_ok()); } @@ -119,7 +119,7 @@ mod benchmarks { #[block] { assert!(ext - .test_run(RawOrigin::Signed(caller.clone()).into(), &call, &info, 0, 0, |_| Ok( + .test_run(RawOrigin::Signed(caller.clone()).into(), &call, &info, 0, |_| Ok( post_info )) .unwrap() diff --git a/substrate/frame/transaction-payment/asset-tx-payment/src/lib.rs b/substrate/frame/transaction-payment/asset-tx-payment/src/lib.rs index dd752989c366..25aa272ba01b 100644 --- a/substrate/frame/transaction-payment/asset-tx-payment/src/lib.rs +++ b/substrate/frame/transaction-payment/asset-tx-payment/src/lib.rs @@ -38,7 +38,7 @@ use codec::{Decode, Encode}; use frame_support::{ dispatch::{DispatchInfo, DispatchResult, PostDispatchInfo}, - pallet_prelude::{TransactionSource, Weight}, + pallet_prelude::Weight, traits::{ tokens::{ fungibles::{Balanced, Credit, Inspect}, @@ -324,7 +324,6 @@ where len: usize, _self_implicit: Self::Implicit, _inherited_implication: &impl Encode, - _source: TransactionSource, ) -> Result< (ValidTransaction, Self::Val, ::RuntimeOrigin), TransactionValidityError, diff --git a/substrate/frame/transaction-payment/asset-tx-payment/src/tests.rs b/substrate/frame/transaction-payment/asset-tx-payment/src/tests.rs index 6de2e8e7da55..cd694c3e81a7 100644 --- a/substrate/frame/transaction-payment/asset-tx-payment/src/tests.rs +++ b/substrate/frame/transaction-payment/asset-tx-payment/src/tests.rs @@ -122,7 +122,7 @@ fn transaction_payment_in_native_possible() { let mut info = info_from_weight(Weight::from_parts(5, 0)); let ext = ChargeAssetTxPayment::::from(0, None); info.extension_weight = ext.weight(CALL); - let (pre, _) = ext.validate_and_prepare(Some(1).into(), CALL, &info, len, 0).unwrap(); + let (pre, _) = ext.validate_and_prepare(Some(1).into(), CALL, &info, len).unwrap(); let initial_balance = 10 * balance_factor; assert_eq!(Balances::free_balance(1), initial_balance - 5 - 5 - 15 - 10); @@ -138,7 +138,7 @@ fn transaction_payment_in_native_possible() { let mut info = info_from_weight(Weight::from_parts(100, 0)); let ext = ChargeAssetTxPayment::::from(5 /* tipped */, None); info.extension_weight = ext.weight(CALL); - let (pre, _) = ext.validate_and_prepare(Some(2).into(), CALL, &info, len, 0).unwrap(); + let (pre, _) = ext.validate_and_prepare(Some(2).into(), CALL, &info, len).unwrap(); let initial_balance_for_2 = 20 * balance_factor; assert_eq!(Balances::free_balance(2), initial_balance_for_2 - 5 - 10 - 100 - 15 - 5); @@ -204,7 +204,6 @@ fn transaction_payment_in_asset_possible() { CALL, &info_from_weight(Weight::from_parts(weight, 0)), len, - 0, ) .unwrap(); // assert that native balance is not used @@ -275,7 +274,6 @@ fn transaction_payment_without_fee() { CALL, &info_from_weight(Weight::from_parts(weight, 0)), len, - 0, ) .unwrap(); // assert that native balance is not used @@ -336,8 +334,7 @@ fn asset_transaction_payment_with_tip_and_refund() { min_balance / ExistentialDeposit::get(); let mut info = info_from_weight(Weight::from_parts(weight, 0)); info.extension_weight = ext_weight; - let (pre, _) = - ext.validate_and_prepare(Some(caller).into(), CALL, &info, len, 0).unwrap(); + let (pre, _) = ext.validate_and_prepare(Some(caller).into(), CALL, &info, len).unwrap(); assert_eq!(Assets::balance(asset_id, caller), balance - fee_with_tip); System::assert_has_event(RuntimeEvent::Assets(pallet_assets::Event::Withdrawn { @@ -412,7 +409,6 @@ fn payment_from_account_with_only_assets() { CALL, &info_from_weight(Weight::from_parts(weight, 0)), len, - 0, ) .unwrap(); assert_eq!(Balances::free_balance(caller), 0); @@ -449,8 +445,7 @@ fn payment_only_with_existing_sufficient_asset() { Some(caller).into(), CALL, &info_from_weight(Weight::from_parts(weight, 0)), - len, - 0, + len ) .is_err()); @@ -469,8 +464,7 @@ fn payment_only_with_existing_sufficient_asset() { Some(caller).into(), CALL, &info_from_weight(Weight::from_parts(weight, 0)), - len, - 0, + len ) .is_err()); }); @@ -510,13 +504,7 @@ fn converted_fee_is_never_zero_if_input_fee_is_not() { assert_eq!(fee, 0); { let (pre, _) = ChargeAssetTxPayment::::from(0, Some(asset_id)) - .validate_and_prepare( - Some(caller).into(), - CALL, - &info_from_pays(Pays::No), - len, - 0, - ) + .validate_and_prepare(Some(caller).into(), CALL, &info_from_pays(Pays::No), len) .unwrap(); // `Pays::No` still implies no fees assert_eq!(Assets::balance(asset_id, caller), balance); @@ -536,7 +524,6 @@ fn converted_fee_is_never_zero_if_input_fee_is_not() { CALL, &info_from_weight(Weight::from_parts(weight, 0)), len, - 0, ) .unwrap(); // check that at least one coin was charged in the given asset @@ -586,7 +573,7 @@ fn post_dispatch_fee_is_zero_if_pre_dispatch_fee_is_zero() { // calculated fee is greater than 0 assert!(fee > 0); let (pre, _) = ChargeAssetTxPayment::::from(0, Some(asset_id)) - .validate_and_prepare(Some(caller).into(), CALL, &info_from_pays(Pays::No), len, 0) + .validate_and_prepare(Some(caller).into(), CALL, &info_from_pays(Pays::No), len) .unwrap(); // `Pays::No` implies no pre-dispatch fees assert_eq!(Assets::balance(asset_id, caller), balance); @@ -626,7 +613,7 @@ fn no_fee_and_no_weight_for_other_origins() { let len = CALL.encoded_size(); let origin = frame_system::RawOrigin::Root.into(); - let (pre, origin) = ext.validate_and_prepare(origin, CALL, &info, len, 0).unwrap(); + let (pre, origin) = ext.validate_and_prepare(origin, CALL, &info, len).unwrap(); assert!(origin.as_system_ref().unwrap().is_root()); diff --git a/substrate/frame/transaction-payment/skip-feeless-payment/src/lib.rs b/substrate/frame/transaction-payment/skip-feeless-payment/src/lib.rs index 5ba1d1297679..d6ac648cefd4 100644 --- a/substrate/frame/transaction-payment/skip-feeless-payment/src/lib.rs +++ b/substrate/frame/transaction-payment/skip-feeless-payment/src/lib.rs @@ -39,15 +39,13 @@ use codec::{Decode, Encode}; use frame_support::{ dispatch::{CheckIfFeeless, DispatchResult}, - pallet_prelude::TransactionSource, traits::{IsType, OriginTrait}, weights::Weight, }; use scale_info::{StaticTypeInfo, TypeInfo}; use sp_runtime::{ traits::{ - DispatchInfoOf, DispatchOriginOf, Implication, PostDispatchInfoOf, TransactionExtension, - ValidateResult, + DispatchInfoOf, DispatchOriginOf, PostDispatchInfoOf, TransactionExtension, ValidateResult, }, transaction_validity::TransactionValidityError, }; @@ -148,21 +146,13 @@ where info: &DispatchInfoOf, len: usize, self_implicit: S::Implicit, - inherited_implication: &impl Implication, - source: TransactionSource, + inherited_implication: &impl Encode, ) -> ValidateResult { if call.is_feeless(&origin) { Ok((Default::default(), Skip(origin.caller().clone()), origin)) } else { - let (x, y, z) = self.0.validate( - origin, - call, - info, - len, - self_implicit, - inherited_implication, - source, - )?; + let (x, y, z) = + self.0.validate(origin, call, info, len, self_implicit, inherited_implication)?; Ok((x, Apply(y), z)) } } diff --git a/substrate/frame/transaction-payment/skip-feeless-payment/src/mock.rs b/substrate/frame/transaction-payment/skip-feeless-payment/src/mock.rs index cff232a0cae3..83f7b7dfe2b5 100644 --- a/substrate/frame/transaction-payment/skip-feeless-payment/src/mock.rs +++ b/substrate/frame/transaction-payment/skip-feeless-payment/src/mock.rs @@ -60,7 +60,6 @@ impl TransactionExtension for DummyExtension { _len: usize, _self_implicit: Self::Implicit, _inherited_implication: &impl Encode, - _source: TransactionSource, ) -> ValidateResult { ValidateCount::mutate(|c| *c += 1); Ok((ValidTransaction::default(), (), origin)) diff --git a/substrate/frame/transaction-payment/skip-feeless-payment/src/tests.rs b/substrate/frame/transaction-payment/skip-feeless-payment/src/tests.rs index b6ecbf9d5764..666844c883bd 100644 --- a/substrate/frame/transaction-payment/skip-feeless-payment/src/tests.rs +++ b/substrate/frame/transaction-payment/skip-feeless-payment/src/tests.rs @@ -18,19 +18,19 @@ use crate::mock::{ pallet_dummy::Call, DummyExtension, PrepareCount, Runtime, RuntimeCall, ValidateCount, }; use frame_support::dispatch::DispatchInfo; -use sp_runtime::{traits::DispatchTransaction, transaction_validity::TransactionSource}; +use sp_runtime::traits::DispatchTransaction; #[test] fn skip_feeless_payment_works() { let call = RuntimeCall::DummyPallet(Call::::aux { data: 1 }); SkipCheckIfFeeless::::from(DummyExtension) - .validate_and_prepare(Some(0).into(), &call, &DispatchInfo::default(), 0, 0) + .validate_and_prepare(Some(0).into(), &call, &DispatchInfo::default(), 0) .unwrap(); assert_eq!(PrepareCount::get(), 1); let call = RuntimeCall::DummyPallet(Call::::aux { data: 0 }); SkipCheckIfFeeless::::from(DummyExtension) - .validate_and_prepare(Some(0).into(), &call, &DispatchInfo::default(), 0, 0) + .validate_and_prepare(Some(0).into(), &call, &DispatchInfo::default(), 0) .unwrap(); assert_eq!(PrepareCount::get(), 1); } @@ -41,28 +41,14 @@ fn validate_works() { let call = RuntimeCall::DummyPallet(Call::::aux { data: 1 }); SkipCheckIfFeeless::::from(DummyExtension) - .validate_only( - Some(0).into(), - &call, - &DispatchInfo::default(), - 0, - TransactionSource::External, - 0, - ) + .validate_only(Some(0).into(), &call, &DispatchInfo::default(), 0) .unwrap(); assert_eq!(ValidateCount::get(), 1); assert_eq!(PrepareCount::get(), 0); let call = RuntimeCall::DummyPallet(Call::::aux { data: 0 }); SkipCheckIfFeeless::::from(DummyExtension) - .validate_only( - Some(0).into(), - &call, - &DispatchInfo::default(), - 0, - TransactionSource::External, - 0, - ) + .validate_only(Some(0).into(), &call, &DispatchInfo::default(), 0) .unwrap(); assert_eq!(ValidateCount::get(), 1); assert_eq!(PrepareCount::get(), 0); @@ -74,14 +60,14 @@ fn validate_prepare_works() { let call = RuntimeCall::DummyPallet(Call::::aux { data: 1 }); SkipCheckIfFeeless::::from(DummyExtension) - .validate_and_prepare(Some(0).into(), &call, &DispatchInfo::default(), 0, 0) + .validate_and_prepare(Some(0).into(), &call, &DispatchInfo::default(), 0) .unwrap(); assert_eq!(ValidateCount::get(), 1); assert_eq!(PrepareCount::get(), 1); let call = RuntimeCall::DummyPallet(Call::::aux { data: 0 }); SkipCheckIfFeeless::::from(DummyExtension) - .validate_and_prepare(Some(0).into(), &call, &DispatchInfo::default(), 0, 0) + .validate_and_prepare(Some(0).into(), &call, &DispatchInfo::default(), 0) .unwrap(); assert_eq!(ValidateCount::get(), 1); assert_eq!(PrepareCount::get(), 1); @@ -89,7 +75,7 @@ fn validate_prepare_works() { // Changes from previous prepare calls persist. let call = RuntimeCall::DummyPallet(Call::::aux { data: 1 }); SkipCheckIfFeeless::::from(DummyExtension) - .validate_and_prepare(Some(0).into(), &call, &DispatchInfo::default(), 0, 0) + .validate_and_prepare(Some(0).into(), &call, &DispatchInfo::default(), 0) .unwrap(); assert_eq!(ValidateCount::get(), 2); assert_eq!(PrepareCount::get(), 2); diff --git a/substrate/frame/transaction-payment/src/benchmarking.rs b/substrate/frame/transaction-payment/src/benchmarking.rs index eba4c0964ce7..c5f87fb8c12c 100644 --- a/substrate/frame/transaction-payment/src/benchmarking.rs +++ b/substrate/frame/transaction-payment/src/benchmarking.rs @@ -68,7 +68,7 @@ mod benchmarks { #[block] { assert!(ext - .test_run(RawOrigin::Signed(caller.clone()).into(), &call, &info, 10, 0, |_| Ok( + .test_run(RawOrigin::Signed(caller.clone()).into(), &call, &info, 10, |_| Ok( post_info )) .unwrap() diff --git a/substrate/frame/transaction-payment/src/lib.rs b/substrate/frame/transaction-payment/src/lib.rs index 216697beac69..711189be8d07 100644 --- a/substrate/frame/transaction-payment/src/lib.rs +++ b/substrate/frame/transaction-payment/src/lib.rs @@ -54,7 +54,6 @@ use frame_support::{ dispatch::{ DispatchClass, DispatchInfo, DispatchResult, GetDispatchInfo, Pays, PostDispatchInfo, }, - pallet_prelude::TransactionSource, traits::{Defensive, EstimateCallFee, Get}, weights::{Weight, WeightToFee}, RuntimeDebugNoBound, @@ -403,7 +402,6 @@ pub mod pallet { } #[pallet::storage] - #[pallet::whitelist_storage] pub type NextFeeMultiplier = StorageValue<_, Multiplier, ValueQuery, NextFeeMultiplierOnEmpty>; @@ -918,7 +916,6 @@ where len: usize, _: (), _implication: &impl Encode, - _source: TransactionSource, ) -> Result< (ValidTransaction, Self::Val, ::RuntimeOrigin), TransactionValidityError, diff --git a/substrate/frame/transaction-payment/src/payment.rs b/substrate/frame/transaction-payment/src/payment.rs index b8a047fee3e6..4b39cd3fe53b 100644 --- a/substrate/frame/transaction-payment/src/payment.rs +++ b/substrate/frame/transaction-payment/src/payment.rs @@ -155,15 +155,14 @@ where if let Some(paid) = already_withdrawn { // Calculate how much refund we should return let refund_amount = paid.peek().saturating_sub(corrected_fee); - // Refund to the the account that paid the fees if it exists & refund is non-zero. - // Otherwise, don't refund anything. - let refund_imbalance = - if refund_amount > Zero::zero() && F::total_balance(who) > F::Balance::zero() { - F::deposit(who, refund_amount, Precision::BestEffort) - .unwrap_or_else(|_| Debt::::zero()) - } else { - Debt::::zero() - }; + // refund to the the account that paid the fees if it exists. otherwise, don't refind + // anything. + let refund_imbalance = if F::total_balance(who) > F::Balance::zero() { + F::deposit(who, refund_amount, Precision::BestEffort) + .unwrap_or_else(|_| Debt::::zero()) + } else { + Debt::::zero() + }; // merge the imbalance caused by paying the fees and refunding parts of it again. let adjusted_paid: Credit = paid .offset(refund_imbalance) diff --git a/substrate/frame/transaction-payment/src/tests.rs b/substrate/frame/transaction-payment/src/tests.rs index bde1bf64728e..e8f5ab99529f 100644 --- a/substrate/frame/transaction-payment/src/tests.rs +++ b/substrate/frame/transaction-payment/src/tests.rs @@ -23,7 +23,7 @@ use codec::Encode; use sp_runtime::{ generic::UncheckedExtrinsic, traits::{DispatchTransaction, One}, - transaction_validity::{InvalidTransaction, TransactionSource::External}, + transaction_validity::InvalidTransaction, BuildStorage, }; @@ -144,7 +144,7 @@ fn transaction_extension_transaction_payment_work() { let ext = Ext::from(0); let ext_weight = ext.weight(CALL); info.extension_weight = ext_weight; - ext.test_run(Some(1).into(), CALL, &info, 10, 0, |_| { + ext.test_run(Some(1).into(), CALL, &info, 10, |_| { assert_eq!(Balances::free_balance(1), 100 - 5 - 5 - 10 - 10); Ok(default_post_info()) }) @@ -159,7 +159,7 @@ fn transaction_extension_transaction_payment_work() { let mut info = info_from_weight(Weight::from_parts(100, 0)); info.extension_weight = ext_weight; Ext::from(5 /* tipped */) - .test_run(Some(2).into(), CALL, &info, 10, 0, |_| { + .test_run(Some(2).into(), CALL, &info, 10, |_| { assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 100 - 10 - 5); Ok(post_info_from_weight(Weight::from_parts(50, 0))) }) @@ -186,7 +186,7 @@ fn transaction_extension_transaction_payment_multiplied_refund_works() { let ext = Ext::from(5 /* tipped */); let ext_weight = ext.weight(CALL); info.extension_weight = ext_weight; - ext.test_run(origin, CALL, &info, len, 0, |_| { + ext.test_run(origin, CALL, &info, len, |_| { // 5 base fee, 10 byte fee, 3/2 * (100 call weight fee + 10 ext weight fee), 5 // tip assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 165 - 5); @@ -206,7 +206,7 @@ fn transaction_extension_transaction_payment_is_bounded() { ExtBuilder::default().balance_factor(1000).byte_fee(0).build().execute_with(|| { // maximum weight possible let info = info_from_weight(Weight::MAX); - assert_ok!(Ext::from(0).validate_and_prepare(Some(1).into(), CALL, &info, 10, 0)); + assert_ok!(Ext::from(0).validate_and_prepare(Some(1).into(), CALL, &info, 10)); // fee will be proportional to what is the actual maximum weight in the runtime. assert_eq!( Balances::free_balance(&1), @@ -235,7 +235,7 @@ fn transaction_extension_allows_free_transactions() { class: DispatchClass::Operational, pays_fee: Pays::No, }; - assert_ok!(Ext::from(0).validate_only(Some(1).into(), CALL, &op_tx, len, External, 0)); + assert_ok!(Ext::from(0).validate_only(Some(1).into(), CALL, &op_tx, len)); // like a InsecureFreeNormal let free_tx = DispatchInfo { @@ -245,9 +245,7 @@ fn transaction_extension_allows_free_transactions() { pays_fee: Pays::Yes, }; assert_eq!( - Ext::from(0) - .validate_only(Some(1).into(), CALL, &free_tx, len, External, 0) - .unwrap_err(), + Ext::from(0).validate_only(Some(1).into(), CALL, &free_tx, len).unwrap_err(), TransactionValidityError::Invalid(InvalidTransaction::Payment), ); }); @@ -264,7 +262,7 @@ fn transaction_ext_length_fee_is_also_updated_per_congestion() { NextFeeMultiplier::::put(Multiplier::saturating_from_rational(3, 2)); let len = 10; let info = info_from_weight(Weight::from_parts(3, 0)); - assert_ok!(Ext::from(10).validate_and_prepare(Some(1).into(), CALL, &info, len, 0)); + assert_ok!(Ext::from(10).validate_and_prepare(Some(1).into(), CALL, &info, len)); assert_eq!( Balances::free_balance(1), 100 // original @@ -526,7 +524,7 @@ fn refund_does_not_recreate_account() { System::set_block_number(10); let info = info_from_weight(Weight::from_parts(100, 0)); Ext::from(5 /* tipped */) - .test_run(Some(2).into(), CALL, &info, 10, 0, |origin| { + .test_run(Some(2).into(), CALL, &info, 10, |origin| { assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 100 - 5); // kill the account between pre and post dispatch @@ -564,7 +562,7 @@ fn actual_weight_higher_than_max_refunds_nothing() { .execute_with(|| { let info = info_from_weight(Weight::from_parts(100, 0)); Ext::from(5 /* tipped */) - .test_run(Some(2).into(), CALL, &info, 10, 0, |_| { + .test_run(Some(2).into(), CALL, &info, 10, |_| { assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 100 - 5); Ok(post_info_from_weight(Weight::from_parts(101, 0))) }) @@ -591,7 +589,7 @@ fn zero_transfer_on_free_transaction() { }; let user = 69; Ext::from(0) - .test_run(Some(user).into(), CALL, &info, 10, 0, |_| { + .test_run(Some(user).into(), CALL, &info, 10, |_| { assert_eq!(Balances::total_balance(&user), 0); Ok(default_post_info()) }) @@ -628,7 +626,7 @@ fn refund_consistent_with_actual_weight() { NextFeeMultiplier::::put(Multiplier::saturating_from_rational(5, 4)); let actual_post_info = ext - .test_run(Some(2).into(), CALL, &info, len, 0, |_| Ok(post_info)) + .test_run(Some(2).into(), CALL, &info, len, |_| Ok(post_info)) .unwrap() .unwrap(); post_info @@ -661,19 +659,11 @@ fn should_alter_operational_priority() { }; let ext = Ext::from(tip); - let priority = ext - .validate_only(Some(2).into(), CALL, &normal, len, External, 0) - .unwrap() - .0 - .priority; + let priority = ext.validate_only(Some(2).into(), CALL, &normal, len).unwrap().0.priority; assert_eq!(priority, 60); let ext = Ext::from(2 * tip); - let priority = ext - .validate_only(Some(2).into(), CALL, &normal, len, External, 0) - .unwrap() - .0 - .priority; + let priority = ext.validate_only(Some(2).into(), CALL, &normal, len).unwrap().0.priority; assert_eq!(priority, 110); }); @@ -686,19 +676,11 @@ fn should_alter_operational_priority() { }; let ext = Ext::from(tip); - let priority = ext - .validate_only(Some(2).into(), CALL, &op, len, External, 0) - .unwrap() - .0 - .priority; + let priority = ext.validate_only(Some(2).into(), CALL, &op, len).unwrap().0.priority; assert_eq!(priority, 5810); let ext = Ext::from(2 * tip); - let priority = ext - .validate_only(Some(2).into(), CALL, &op, len, External, 0) - .unwrap() - .0 - .priority; + let priority = ext.validate_only(Some(2).into(), CALL, &op, len).unwrap().0.priority; assert_eq!(priority, 6110); }); } @@ -716,11 +698,7 @@ fn no_tip_has_some_priority() { pays_fee: Pays::Yes, }; let ext = Ext::from(tip); - let priority = ext - .validate_only(Some(2).into(), CALL, &normal, len, External, 0) - .unwrap() - .0 - .priority; + let priority = ext.validate_only(Some(2).into(), CALL, &normal, len).unwrap().0.priority; assert_eq!(priority, 10); }); @@ -732,11 +710,7 @@ fn no_tip_has_some_priority() { pays_fee: Pays::Yes, }; let ext = Ext::from(tip); - let priority = ext - .validate_only(Some(2).into(), CALL, &op, len, External, 0) - .unwrap() - .0 - .priority; + let priority = ext.validate_only(Some(2).into(), CALL, &op, len).unwrap().0.priority; assert_eq!(priority, 5510); }); } @@ -755,12 +729,7 @@ fn higher_tip_have_higher_priority() { pays_fee: Pays::Yes, }; let ext = Ext::from(tip); - - pri1 = ext - .validate_only(Some(2).into(), CALL, &normal, len, External, 0) - .unwrap() - .0 - .priority; + pri1 = ext.validate_only(Some(2).into(), CALL, &normal, len).unwrap().0.priority; }); ExtBuilder::default().balance_factor(100).build().execute_with(|| { @@ -771,11 +740,7 @@ fn higher_tip_have_higher_priority() { pays_fee: Pays::Yes, }; let ext = Ext::from(tip); - pri2 = ext - .validate_only(Some(2).into(), CALL, &op, len, External, 0) - .unwrap() - .0 - .priority; + pri2 = ext.validate_only(Some(2).into(), CALL, &op, len).unwrap().0.priority; }); (pri1, pri2) @@ -807,7 +772,7 @@ fn post_info_can_change_pays_fee() { NextFeeMultiplier::::put(Multiplier::saturating_from_rational(5, 4)); let post_info = ChargeTransactionPayment::::from(tip) - .test_run(Some(2).into(), CALL, &info, len, 0, |_| Ok(post_info)) + .test_run(Some(2).into(), CALL, &info, len, |_| Ok(post_info)) .unwrap() .unwrap(); @@ -855,7 +820,7 @@ fn no_fee_and_no_weight_for_other_origins() { let len = CALL.encoded_size(); let origin = frame_system::RawOrigin::Root.into(); - let (pre, origin) = ext.validate_and_prepare(origin, CALL, &info, len, 0).unwrap(); + let (pre, origin) = ext.validate_and_prepare(origin, CALL, &info, len).unwrap(); assert!(origin.as_system_ref().unwrap().is_root()); @@ -877,40 +842,3 @@ fn no_fee_and_no_weight_for_other_origins() { assert_eq!(post_info.actual_weight, Some(info.call_weight)); }) } - -#[test] -fn fungible_adapter_no_zero_refund_action() { - type FungibleAdapterT = payment::FungibleAdapter; - - ExtBuilder::default().balance_factor(10).build().execute_with(|| { - System::set_block_number(10); - - let dummy_acc = 1; - let (actual_fee, no_tip) = (10, 0); - let already_paid = >::withdraw_fee( - &dummy_acc, - CALL, - &CALL.get_dispatch_info(), - actual_fee, - no_tip, - ).expect("Account must have enough funds."); - - // Correction action with no expected side effect. - assert!(>::correct_and_deposit_fee( - &dummy_acc, - &CALL.get_dispatch_info(), - &default_post_info(), - actual_fee, - no_tip, - already_paid, - ).is_ok()); - - // Ensure no zero amount deposit event is emitted. - let events = System::events(); - assert!(!events - .iter() - .any(|record| matches!(record.event, RuntimeEvent::Balances(pallet_balances::Event::Deposit { amount, .. }) if amount.is_zero())), - "No zero amount deposit amount event should be emitted.", - ); - }); -} diff --git a/substrate/frame/transaction-payment/src/weights.rs b/substrate/frame/transaction-payment/src/weights.rs index 59d5cac7a2b7..bcffb2eb331a 100644 --- a/substrate/frame/transaction-payment/src/weights.rs +++ b/substrate/frame/transaction-payment/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_transaction_payment` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -57,30 +57,36 @@ pub trait WeightInfo { /// Weights for `pallet_transaction_payment` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Authorship::Author` (r:1 w:0) + /// Proof: `Authorship::Author` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `System::Digest` (r:1 w:0) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn charge_transaction_payment() -> Weight { // Proof Size summary in bytes: - // Measured: `101` - // Estimated: `3593` - // Minimum execution time: 39_528_000 picoseconds. - Weight::from_parts(40_073_000, 3593) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) + // Measured: `248` + // Estimated: `1733` + // Minimum execution time: 40_506_000 picoseconds. + Weight::from_parts(41_647_000, 1733) + .saturating_add(T::DbWeight::get().reads(3_u64)) } } // For backwards compatibility and tests. impl WeightInfo for () { - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Authorship::Author` (r:1 w:0) + /// Proof: `Authorship::Author` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `System::Digest` (r:1 w:0) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn charge_transaction_payment() -> Weight { // Proof Size summary in bytes: - // Measured: `101` - // Estimated: `3593` - // Minimum execution time: 39_528_000 picoseconds. - Weight::from_parts(40_073_000, 3593) - .saturating_add(RocksDbWeight::get().reads(1_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) + // Measured: `248` + // Estimated: `1733` + // Minimum execution time: 40_506_000 picoseconds. + Weight::from_parts(41_647_000, 1733) + .saturating_add(RocksDbWeight::get().reads(3_u64)) } } diff --git a/substrate/frame/transaction-storage/Cargo.toml b/substrate/frame/transaction-storage/Cargo.toml index 0ca38e9dd60d..f5d6bd1c364c 100644 --- a/substrate/frame/transaction-storage/Cargo.toml +++ b/substrate/frame/transaction-storage/Cargo.toml @@ -18,17 +18,17 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] array-bytes = { optional = true, workspace = true, default-features = true } codec = { workspace = true } +scale-info = { features = ["derive"], workspace = true } +serde = { optional = true, workspace = true, default-features = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } -log = { workspace = true } pallet-balances = { workspace = true } -scale-info = { features = ["derive"], workspace = true } -serde = { optional = true, workspace = true, default-features = true } sp-inherents = { workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } sp-transaction-storage-proof = { workspace = true } +log = { workspace = true } [dev-dependencies] sp-core = { workspace = true } diff --git a/substrate/frame/transaction-storage/src/benchmarking.rs b/substrate/frame/transaction-storage/src/benchmarking.rs index 0b5b0dc99405..f360e9847a1e 100644 --- a/substrate/frame/transaction-storage/src/benchmarking.rs +++ b/substrate/frame/transaction-storage/src/benchmarking.rs @@ -19,14 +19,16 @@ #![cfg(feature = "runtime-benchmarks")] -use crate::*; +use super::*; use alloc::{vec, vec::Vec}; -use frame_benchmarking::v2::*; +use frame_benchmarking::v1::{benchmarks, whitelisted_caller}; use frame_support::traits::{Get, OnFinalize, OnInitialize}; use frame_system::{pallet_prelude::BlockNumberFor, EventRecord, Pallet as System, RawOrigin}; use sp_runtime::traits::{Bounded, CheckedDiv, One, Zero}; use sp_transaction_storage_proof::TransactionStorageProof; +use crate::Pallet as TransactionStorage; + // Proof generated from max size storage: // ``` // let mut transactions = Vec::new(); @@ -120,50 +122,39 @@ pub fn run_to_block(n: frame_system::pallet_prelude::BlockNumberFor) { +benchmarks! { + store { + let l in 1 .. T::MaxTransactionSize::get(); let caller: T::AccountId = whitelisted_caller(); let initial_balance = BalanceOf::::max_value().checked_div(&2u32.into()).unwrap(); T::Currency::set_balance(&caller, initial_balance); - - #[extrinsic_call] - _(RawOrigin::Signed(caller.clone()), vec![0u8; l as usize]); - + }: _(RawOrigin::Signed(caller.clone()), vec![0u8; l as usize]) + verify { assert!(!BlockTransactions::::get().is_empty()); assert_last_event::(Event::Stored { index: 0 }.into()); } - #[benchmark] - fn renew() -> Result<(), BenchmarkError> { + renew { let caller: T::AccountId = whitelisted_caller(); let initial_balance = BalanceOf::::max_value().checked_div(&2u32.into()).unwrap(); T::Currency::set_balance(&caller, initial_balance); - Pallet::::store( + TransactionStorage::::store( RawOrigin::Signed(caller.clone()).into(), vec![0u8; T::MaxTransactionSize::get() as usize], )?; run_to_block::(1u32.into()); - - #[extrinsic_call] - _(RawOrigin::Signed(caller.clone()), BlockNumberFor::::zero(), 0); - + }: _(RawOrigin::Signed(caller.clone()), BlockNumberFor::::zero(), 0) + verify { assert_last_event::(Event::Renewed { index: 0 }.into()); - - Ok(()) } - #[benchmark] - fn check_proof_max() -> Result<(), BenchmarkError> { + check_proof_max { run_to_block::(1u32.into()); let caller: T::AccountId = whitelisted_caller(); let initial_balance = BalanceOf::::max_value().checked_div(&2u32.into()).unwrap(); T::Currency::set_balance(&caller, initial_balance); - for _ in 0..T::MaxBlockTransactions::get() { - Pallet::::store( + for _ in 0 .. T::MaxBlockTransactions::get() { + TransactionStorage::::store( RawOrigin::Signed(caller.clone()).into(), vec![0u8; T::MaxTransactionSize::get() as usize], )?; @@ -171,14 +162,10 @@ mod benchmarks { run_to_block::(StoragePeriod::::get() + BlockNumberFor::::one()); let encoded_proof = proof(); let proof = TransactionStorageProof::decode(&mut &*encoded_proof).unwrap(); - - #[extrinsic_call] - check_proof(RawOrigin::None, proof); - + }: check_proof(RawOrigin::None, proof) + verify { assert_last_event::(Event::ProofChecked.into()); - - Ok(()) } - impl_benchmark_test_suite!(Pallet, mock::new_test_ext(), mock::Test); + impl_benchmark_test_suite!(TransactionStorage, crate::mock::new_test_ext(), crate::mock::Test); } diff --git a/substrate/frame/transaction-storage/src/weights.rs b/substrate/frame/transaction-storage/src/weights.rs index 36681f0abd8b..4d51daa17b40 100644 --- a/substrate/frame/transaction-storage/src/weights.rs +++ b/substrate/frame/transaction-storage/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_transaction_storage` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-04-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -64,7 +64,7 @@ impl WeightInfo for SubstrateWeight { /// Storage: `TransactionStorage::EntryFee` (r:1 w:0) /// Proof: `TransactionStorage::EntryFee` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) /// Storage: `TransactionStorage::BlockTransactions` (r:1 w:1) /// Proof: `TransactionStorage::BlockTransactions` (`max_values`: Some(1), `max_size`: Some(36866), added: 37361, mode: `MaxEncodedLen`) /// The range of component `l` is `[1, 8388608]`. @@ -72,10 +72,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `242` // Estimated: `38351` - // Minimum execution time: 65_899_000 picoseconds. - Weight::from_parts(66_814_000, 38351) - // Standard Error: 7 - .saturating_add(Weight::from_parts(7_678, 0).saturating_mul(l.into())) + // Minimum execution time: 62_024_000 picoseconds. + Weight::from_parts(63_536_000, 38351) + // Standard Error: 13 + .saturating_add(Weight::from_parts(7_178, 0).saturating_mul(l.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -86,15 +86,15 @@ impl WeightInfo for SubstrateWeight { /// Storage: `TransactionStorage::EntryFee` (r:1 w:0) /// Proof: `TransactionStorage::EntryFee` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) /// Storage: `TransactionStorage::BlockTransactions` (r:1 w:1) /// Proof: `TransactionStorage::BlockTransactions` (`max_values`: Some(1), `max_size`: Some(36866), added: 37361, mode: `MaxEncodedLen`) fn renew() -> Weight { // Proof Size summary in bytes: // Measured: `430` // Estimated: `40351` - // Minimum execution time: 87_876_000 picoseconds. - Weight::from_parts(91_976_000, 40351) + // Minimum execution time: 81_473_000 picoseconds. + Weight::from_parts(84_000_000, 40351) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -112,8 +112,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `37211` // Estimated: `40351` - // Minimum execution time: 78_423_000 picoseconds. - Weight::from_parts(82_423_000, 40351) + // Minimum execution time: 68_167_000 picoseconds. + Weight::from_parts(75_532_000, 40351) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -126,7 +126,7 @@ impl WeightInfo for () { /// Storage: `TransactionStorage::EntryFee` (r:1 w:0) /// Proof: `TransactionStorage::EntryFee` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) /// Storage: `TransactionStorage::BlockTransactions` (r:1 w:1) /// Proof: `TransactionStorage::BlockTransactions` (`max_values`: Some(1), `max_size`: Some(36866), added: 37361, mode: `MaxEncodedLen`) /// The range of component `l` is `[1, 8388608]`. @@ -134,10 +134,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `242` // Estimated: `38351` - // Minimum execution time: 65_899_000 picoseconds. - Weight::from_parts(66_814_000, 38351) - // Standard Error: 7 - .saturating_add(Weight::from_parts(7_678, 0).saturating_mul(l.into())) + // Minimum execution time: 62_024_000 picoseconds. + Weight::from_parts(63_536_000, 38351) + // Standard Error: 13 + .saturating_add(Weight::from_parts(7_178, 0).saturating_mul(l.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -148,15 +148,15 @@ impl WeightInfo for () { /// Storage: `TransactionStorage::EntryFee` (r:1 w:0) /// Proof: `TransactionStorage::EntryFee` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) /// Storage: `TransactionStorage::BlockTransactions` (r:1 w:1) /// Proof: `TransactionStorage::BlockTransactions` (`max_values`: Some(1), `max_size`: Some(36866), added: 37361, mode: `MaxEncodedLen`) fn renew() -> Weight { // Proof Size summary in bytes: // Measured: `430` // Estimated: `40351` - // Minimum execution time: 87_876_000 picoseconds. - Weight::from_parts(91_976_000, 40351) + // Minimum execution time: 81_473_000 picoseconds. + Weight::from_parts(84_000_000, 40351) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -174,8 +174,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `37211` // Estimated: `40351` - // Minimum execution time: 78_423_000 picoseconds. - Weight::from_parts(82_423_000, 40351) + // Minimum execution time: 68_167_000 picoseconds. + Weight::from_parts(75_532_000, 40351) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/substrate/frame/treasury/Cargo.toml b/substrate/frame/treasury/Cargo.toml index c6f059f5fa03..93a3d9bea93d 100644 --- a/substrate/frame/treasury/Cargo.toml +++ b/substrate/frame/treasury/Cargo.toml @@ -21,21 +21,21 @@ codec = { features = [ "max-encoded-len", ], workspace = true } docify = { workspace = true } +impl-trait-for-tuples = { workspace = true } +scale-info = { features = ["derive"], workspace = true } +serde = { features = ["derive"], optional = true, workspace = true, default-features = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } -impl-trait-for-tuples = { workspace = true } -log = { workspace = true } pallet-balances = { workspace = true } -scale-info = { features = ["derive"], workspace = true } -serde = { features = ["derive"], optional = true, workspace = true, default-features = true } -sp-core = { optional = true, workspace = true } sp-runtime = { workspace = true } +sp-core = { optional = true, workspace = true } +log = { workspace = true } [dev-dependencies] +sp-io = { workspace = true, default-features = true } pallet-utility = { workspace = true, default-features = true } sp-core = { workspace = true } -sp-io = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/frame/treasury/src/benchmarking.rs b/substrate/frame/treasury/src/benchmarking.rs index a11723a27b2c..a03ee149db9b 100644 --- a/substrate/frame/treasury/src/benchmarking.rs +++ b/substrate/frame/treasury/src/benchmarking.rs @@ -198,7 +198,7 @@ mod benchmarks { None, ); - let valid_from = T::BlockNumberProvider::current_block_number(); + let valid_from = frame_system::Pallet::::block_number(); let expire_at = valid_from.saturating_add(T::PayoutPeriod::get()); assert_last_event::( Event::AssetSpendApproved { diff --git a/substrate/frame/treasury/src/lib.rs b/substrate/frame/treasury/src/lib.rs index 281012ffb4c9..faacda1c0783 100644 --- a/substrate/frame/treasury/src/lib.rs +++ b/substrate/frame/treasury/src/lib.rs @@ -106,7 +106,7 @@ use frame_support::{ weights::Weight, BoundedVec, PalletId, }; -use frame_system::pallet_prelude::BlockNumberFor as SystemBlockNumberFor; +use frame_system::pallet_prelude::BlockNumberFor; pub use pallet::*; pub use weights::WeightInfo; @@ -122,8 +122,6 @@ pub type NegativeImbalanceOf = <>::Currency as Currenc >>::NegativeImbalance; type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; type BeneficiaryLookupOf = <>::BeneficiaryLookup as StaticLookup>::Source; -pub type BlockNumberFor = - <>::BlockNumberProvider as BlockNumberProvider>::BlockNumber; /// A trait to allow the Treasury Pallet to spend it's funds for other purposes. /// There is an expectation that the implementer of this trait will correctly manage @@ -204,7 +202,7 @@ pub mod pallet { pallet_prelude::*, traits::tokens::{ConversionFromAssetBalance, PaymentStatus}, }; - use frame_system::pallet_prelude::{ensure_signed, OriginFor}; + use frame_system::pallet_prelude::*; #[pallet::pallet] pub struct Pallet(PhantomData<(T, I)>); @@ -223,7 +221,7 @@ pub mod pallet { /// Period between successive spends. #[pallet::constant] - type SpendPeriod: Get>; + type SpendPeriod: Get>; /// Percentage of spare funds (if any) that are burnt per spend period. #[pallet::constant] @@ -279,14 +277,14 @@ pub mod pallet { /// The period during which an approved treasury spend has to be claimed. #[pallet::constant] - type PayoutPeriod: Get>; + type PayoutPeriod: Get>; /// Helper type for benchmarks. #[cfg(feature = "runtime-benchmarks")] type BenchmarkHelper: ArgumentsFactory; /// Provider for the block number. Normally this is the `frame_system` pallet. - type BlockNumberProvider: BlockNumberProvider; + type BlockNumberProvider: BlockNumberProvider>; } /// DEPRECATED: associated with `spend_local` call and will be removed in May 2025. @@ -337,7 +335,7 @@ pub mod pallet { T::AssetKind, AssetBalanceOf, T::Beneficiary, - BlockNumberFor, + BlockNumberFor, ::Id, >, OptionQuery, @@ -345,7 +343,7 @@ pub mod pallet { /// The blocknumber for the last triggered spend period. #[pallet::storage] - pub(crate) type LastSpendPeriod = StorageValue<_, BlockNumberFor, OptionQuery>; + pub(crate) type LastSpendPeriod = StorageValue<_, BlockNumberFor, OptionQuery>; #[pallet::genesis_config] #[derive(frame_support::DefaultNoBound)] @@ -393,8 +391,8 @@ pub mod pallet { asset_kind: T::AssetKind, amount: AssetBalanceOf, beneficiary: T::Beneficiary, - valid_from: BlockNumberFor, - expire_at: BlockNumberFor, + valid_from: BlockNumberFor, + expire_at: BlockNumberFor, }, /// An approved spend was voided. AssetSpendVoided { index: SpendIndex }, @@ -436,10 +434,10 @@ pub mod pallet { } #[pallet::hooks] - impl, I: 'static> Hooks> for Pallet { + impl, I: 'static> Hooks> for Pallet { /// ## Complexity /// - `O(A)` where `A` is the number of approvals - fn on_initialize(_do_not_use_local_block_number: SystemBlockNumberFor) -> Weight { + fn on_initialize(_do_not_use_local_block_number: BlockNumberFor) -> Weight { let block_number = T::BlockNumberProvider::current_block_number(); let pot = Self::pot(); let deactivated = Deactivated::::get(); @@ -460,7 +458,7 @@ pub mod pallet { // empty. .unwrap_or_else(|| Self::update_last_spend_period()); let blocks_since_last_spend_period = block_number.saturating_sub(last_spend_period); - let safe_spend_period = T::SpendPeriod::get().max(BlockNumberFor::::one()); + let safe_spend_period = T::SpendPeriod::get().max(BlockNumberFor::::one()); // Safe because of `max(1)` above. let (spend_periods_passed, extra_blocks) = ( @@ -468,7 +466,7 @@ pub mod pallet { blocks_since_last_spend_period % safe_spend_period, ); let new_last_spend_period = block_number.saturating_sub(extra_blocks); - if spend_periods_passed > BlockNumberFor::::zero() { + if spend_periods_passed > BlockNumberFor::::zero() { Self::spend_funds(spend_periods_passed, new_last_spend_period) } else { Weight::zero() @@ -476,7 +474,7 @@ pub mod pallet { } #[cfg(feature = "try-runtime")] - fn try_state(_: SystemBlockNumberFor) -> Result<(), sp_runtime::TryRuntimeError> { + fn try_state(_: BlockNumberFor) -> Result<(), sp_runtime::TryRuntimeError> { Self::do_try_state()?; Ok(()) } @@ -640,7 +638,7 @@ pub mod pallet { asset_kind: Box, #[pallet::compact] amount: AssetBalanceOf, beneficiary: Box>, - valid_from: Option>, + valid_from: Option>, ) -> DispatchResult { let max_amount = T::SpendOrigin::ensure_origin(origin)?; let beneficiary = T::BeneficiaryLookup::lookup(*beneficiary)?; @@ -846,9 +844,9 @@ impl, I: 'static> Pallet { // Backfill the `LastSpendPeriod` storage, assuming that no configuration has changed // since introducing this code. Used specifically for a migration-less switch to populate // `LastSpendPeriod`. - fn update_last_spend_period() -> BlockNumberFor { + fn update_last_spend_period() -> BlockNumberFor { let block_number = T::BlockNumberProvider::current_block_number(); - let spend_period = T::SpendPeriod::get().max(BlockNumberFor::::one()); + let spend_period = T::SpendPeriod::get().max(BlockNumberFor::::one()); let time_since_last_spend = block_number % spend_period; // If it happens that this logic runs directly on a spend period block, we need to backdate // to the last spend period so a spend still occurs this block. @@ -891,8 +889,8 @@ impl, I: 'static> Pallet { /// Spend some money! returns number of approvals before spend. pub fn spend_funds( - spend_periods_passed: BlockNumberFor, - new_last_spend_period: BlockNumberFor, + spend_periods_passed: BlockNumberFor, + new_last_spend_period: BlockNumberFor, ) -> Weight { LastSpendPeriod::::put(new_last_spend_period); let mut total_weight = Weight::zero(); diff --git a/substrate/frame/treasury/src/weights.rs b/substrate/frame/treasury/src/weights.rs index f5063eb881c4..8c9c6eb1d0fb 100644 --- a/substrate/frame/treasury/src/weights.rs +++ b/substrate/frame/treasury/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_treasury` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-04-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -73,55 +73,64 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `76` // Estimated: `1887` - // Minimum execution time: 11_807_000 picoseconds. - Weight::from_parts(12_313_000, 1887) + // Minimum execution time: 11_910_000 picoseconds. + Weight::from_parts(12_681_000, 1887) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: `Treasury::Approvals` (r:1 w:1) - /// Proof: `Treasury::Approvals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) + /// Storage: Treasury Approvals (r:1 w:1) + /// Proof: Treasury Approvals (max_values: Some(1), max_size: Some(402), added: 897, mode: MaxEncodedLen) fn remove_approval() -> Weight { // Proof Size summary in bytes: // Measured: `161` // Estimated: `1887` - // Minimum execution time: 7_217_000 picoseconds. - Weight::from_parts(7_516_000, 1887) + // Minimum execution time: 6_372_000 picoseconds. + Weight::from_parts(6_567_000, 1887) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Treasury::Deactivated` (r:1 w:1) /// Proof: `Treasury::Deactivated` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) - /// Storage: `Treasury::LastSpendPeriod` (r:1 w:1) - /// Proof: `Treasury::LastSpendPeriod` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Treasury::Approvals` (r:1 w:1) + /// Proof: `Treasury::Approvals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) + /// Storage: `Treasury::Proposals` (r:99 w:99) + /// Proof: `Treasury::Proposals` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:198 w:198) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Bounties::BountyApprovals` (r:1 w:1) + /// Proof: `Bounties::BountyApprovals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) /// The range of component `p` is `[0, 99]`. fn on_initialize_proposals(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `170` - // Estimated: `1501` - // Minimum execution time: 10_929_000 picoseconds. - Weight::from_parts(13_737_454, 1501) - // Standard Error: 790 - .saturating_add(Weight::from_parts(33_673, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(2_u64)) - .saturating_add(T::DbWeight::get().writes(2_u64)) + // Measured: `451 + p * (251 ±0)` + // Estimated: `1887 + p * (5206 ±0)` + // Minimum execution time: 33_150_000 picoseconds. + Weight::from_parts(41_451_020, 1887) + // Standard Error: 19_018 + .saturating_add(Weight::from_parts(34_410_759, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(3_u64)) + .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(p.into()))) + .saturating_add(T::DbWeight::get().writes(3_u64)) + .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(p.into()))) + .saturating_add(Weight::from_parts(0, 5206).saturating_mul(p.into())) } /// Storage: `AssetRate::ConversionRateToNative` (r:1 w:0) - /// Proof: `AssetRate::ConversionRateToNative` (`max_values`: None, `max_size`: Some(37), added: 2512, mode: `MaxEncodedLen`) + /// Proof: `AssetRate::ConversionRateToNative` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `MaxEncodedLen`) /// Storage: `Treasury::SpendCount` (r:1 w:1) /// Proof: `Treasury::SpendCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `Treasury::Spends` (r:0 w:1) - /// Proof: `Treasury::Spends` (`max_values`: None, `max_size`: Some(74), added: 2549, mode: `MaxEncodedLen`) + /// Proof: `Treasury::Spends` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn spend() -> Weight { // Proof Size summary in bytes: - // Measured: `141` - // Estimated: `3502` - // Minimum execution time: 16_082_000 picoseconds. - Weight::from_parts(16_542_000, 3502) + // Measured: `140` + // Estimated: `3501` + // Minimum execution time: 14_233_000 picoseconds. + Weight::from_parts(14_842_000, 3501) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } /// Storage: `Treasury::Spends` (r:1 w:1) - /// Proof: `Treasury::Spends` (`max_values`: None, `max_size`: Some(74), added: 2549, mode: `MaxEncodedLen`) + /// Proof: `Treasury::Spends` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) /// Storage: `Assets::Asset` (r:1 w:1) /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) /// Storage: `Assets::Account` (r:2 w:2) @@ -130,32 +139,32 @@ impl WeightInfo for SubstrateWeight { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn payout() -> Weight { // Proof Size summary in bytes: - // Measured: `710` + // Measured: `709` // Estimated: `6208` - // Minimum execution time: 64_180_000 picoseconds. - Weight::from_parts(65_783_000, 6208) + // Minimum execution time: 58_857_000 picoseconds. + Weight::from_parts(61_291_000, 6208) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } /// Storage: `Treasury::Spends` (r:1 w:1) - /// Proof: `Treasury::Spends` (`max_values`: None, `max_size`: Some(74), added: 2549, mode: `MaxEncodedLen`) + /// Proof: `Treasury::Spends` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn check_status() -> Weight { // Proof Size summary in bytes: - // Measured: `199` - // Estimated: `3539` - // Minimum execution time: 13_379_000 picoseconds. - Weight::from_parts(13_751_000, 3539) + // Measured: `198` + // Estimated: `3538` + // Minimum execution time: 12_116_000 picoseconds. + Weight::from_parts(12_480_000, 3538) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Treasury::Spends` (r:1 w:1) - /// Proof: `Treasury::Spends` (`max_values`: None, `max_size`: Some(74), added: 2549, mode: `MaxEncodedLen`) + /// Proof: `Treasury::Spends` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn void_spend() -> Weight { // Proof Size summary in bytes: - // Measured: `199` - // Estimated: `3539` - // Minimum execution time: 12_014_000 picoseconds. - Weight::from_parts(12_423_000, 3539) + // Measured: `198` + // Estimated: `3538` + // Minimum execution time: 10_834_000 picoseconds. + Weight::from_parts(11_427_000, 3538) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -173,55 +182,64 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `76` // Estimated: `1887` - // Minimum execution time: 11_807_000 picoseconds. - Weight::from_parts(12_313_000, 1887) + // Minimum execution time: 11_910_000 picoseconds. + Weight::from_parts(12_681_000, 1887) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: `Treasury::Approvals` (r:1 w:1) - /// Proof: `Treasury::Approvals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) + /// Storage: Treasury Approvals (r:1 w:1) + /// Proof: Treasury Approvals (max_values: Some(1), max_size: Some(402), added: 897, mode: MaxEncodedLen) fn remove_approval() -> Weight { // Proof Size summary in bytes: // Measured: `161` // Estimated: `1887` - // Minimum execution time: 7_217_000 picoseconds. - Weight::from_parts(7_516_000, 1887) + // Minimum execution time: 6_372_000 picoseconds. + Weight::from_parts(6_567_000, 1887) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Treasury::Deactivated` (r:1 w:1) /// Proof: `Treasury::Deactivated` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) - /// Storage: `Treasury::LastSpendPeriod` (r:1 w:1) - /// Proof: `Treasury::LastSpendPeriod` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Treasury::Approvals` (r:1 w:1) + /// Proof: `Treasury::Approvals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) + /// Storage: `Treasury::Proposals` (r:99 w:99) + /// Proof: `Treasury::Proposals` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:198 w:198) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Bounties::BountyApprovals` (r:1 w:1) + /// Proof: `Bounties::BountyApprovals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) /// The range of component `p` is `[0, 99]`. fn on_initialize_proposals(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `170` - // Estimated: `1501` - // Minimum execution time: 10_929_000 picoseconds. - Weight::from_parts(13_737_454, 1501) - // Standard Error: 790 - .saturating_add(Weight::from_parts(33_673, 0).saturating_mul(p.into())) - .saturating_add(RocksDbWeight::get().reads(2_u64)) - .saturating_add(RocksDbWeight::get().writes(2_u64)) + // Measured: `451 + p * (251 ±0)` + // Estimated: `1887 + p * (5206 ±0)` + // Minimum execution time: 33_150_000 picoseconds. + Weight::from_parts(41_451_020, 1887) + // Standard Error: 19_018 + .saturating_add(Weight::from_parts(34_410_759, 0).saturating_mul(p.into())) + .saturating_add(RocksDbWeight::get().reads(3_u64)) + .saturating_add(RocksDbWeight::get().reads((3_u64).saturating_mul(p.into()))) + .saturating_add(RocksDbWeight::get().writes(3_u64)) + .saturating_add(RocksDbWeight::get().writes((3_u64).saturating_mul(p.into()))) + .saturating_add(Weight::from_parts(0, 5206).saturating_mul(p.into())) } /// Storage: `AssetRate::ConversionRateToNative` (r:1 w:0) - /// Proof: `AssetRate::ConversionRateToNative` (`max_values`: None, `max_size`: Some(37), added: 2512, mode: `MaxEncodedLen`) + /// Proof: `AssetRate::ConversionRateToNative` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `MaxEncodedLen`) /// Storage: `Treasury::SpendCount` (r:1 w:1) /// Proof: `Treasury::SpendCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `Treasury::Spends` (r:0 w:1) - /// Proof: `Treasury::Spends` (`max_values`: None, `max_size`: Some(74), added: 2549, mode: `MaxEncodedLen`) + /// Proof: `Treasury::Spends` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn spend() -> Weight { // Proof Size summary in bytes: - // Measured: `141` - // Estimated: `3502` - // Minimum execution time: 16_082_000 picoseconds. - Weight::from_parts(16_542_000, 3502) + // Measured: `140` + // Estimated: `3501` + // Minimum execution time: 14_233_000 picoseconds. + Weight::from_parts(14_842_000, 3501) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } /// Storage: `Treasury::Spends` (r:1 w:1) - /// Proof: `Treasury::Spends` (`max_values`: None, `max_size`: Some(74), added: 2549, mode: `MaxEncodedLen`) + /// Proof: `Treasury::Spends` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) /// Storage: `Assets::Asset` (r:1 w:1) /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) /// Storage: `Assets::Account` (r:2 w:2) @@ -230,32 +248,32 @@ impl WeightInfo for () { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn payout() -> Weight { // Proof Size summary in bytes: - // Measured: `710` + // Measured: `709` // Estimated: `6208` - // Minimum execution time: 64_180_000 picoseconds. - Weight::from_parts(65_783_000, 6208) + // Minimum execution time: 58_857_000 picoseconds. + Weight::from_parts(61_291_000, 6208) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } /// Storage: `Treasury::Spends` (r:1 w:1) - /// Proof: `Treasury::Spends` (`max_values`: None, `max_size`: Some(74), added: 2549, mode: `MaxEncodedLen`) + /// Proof: `Treasury::Spends` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn check_status() -> Weight { // Proof Size summary in bytes: - // Measured: `199` - // Estimated: `3539` - // Minimum execution time: 13_379_000 picoseconds. - Weight::from_parts(13_751_000, 3539) + // Measured: `198` + // Estimated: `3538` + // Minimum execution time: 12_116_000 picoseconds. + Weight::from_parts(12_480_000, 3538) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Treasury::Spends` (r:1 w:1) - /// Proof: `Treasury::Spends` (`max_values`: None, `max_size`: Some(74), added: 2549, mode: `MaxEncodedLen`) + /// Proof: `Treasury::Spends` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn void_spend() -> Weight { // Proof Size summary in bytes: - // Measured: `199` - // Estimated: `3539` - // Minimum execution time: 12_014_000 picoseconds. - Weight::from_parts(12_423_000, 3539) + // Measured: `198` + // Estimated: `3538` + // Minimum execution time: 10_834_000 picoseconds. + Weight::from_parts(11_427_000, 3538) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/substrate/frame/tx-pause/Cargo.toml b/substrate/frame/tx-pause/Cargo.toml index 6298645fb2b3..03c700ec053c 100644 --- a/substrate/frame/tx-pause/Cargo.toml +++ b/substrate/frame/tx-pause/Cargo.toml @@ -20,18 +20,18 @@ docify = { workspace = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } -pallet-balances = { optional = true, workspace = true } -pallet-proxy = { optional = true, workspace = true } -pallet-utility = { optional = true, workspace = true } scale-info = { features = ["derive"], workspace = true } sp-runtime = { workspace = true } +pallet-balances = { optional = true, workspace = true } +pallet-utility = { optional = true, workspace = true } +pallet-proxy = { optional = true, workspace = true } [dev-dependencies] -pallet-balances = { workspace = true, default-features = true } -pallet-proxy = { workspace = true, default-features = true } -pallet-utility = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } sp-io = { workspace = true, default-features = true } +pallet-balances = { workspace = true, default-features = true } +pallet-utility = { workspace = true, default-features = true } +pallet-proxy = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/frame/tx-pause/src/mock.rs b/substrate/frame/tx-pause/src/mock.rs index fd9b3b552ccd..84ce45e83528 100644 --- a/substrate/frame/tx-pause/src/mock.rs +++ b/substrate/frame/tx-pause/src/mock.rs @@ -105,7 +105,6 @@ impl pallet_proxy::Config for Test { type MaxPending = ConstU32<2>; type AnnouncementDepositBase = ConstU64<1>; type AnnouncementDepositFactor = ConstU64<1>; - type BlockNumberProvider = frame_system::Pallet; } parameter_types! { diff --git a/substrate/frame/tx-pause/src/weights.rs b/substrate/frame/tx-pause/src/weights.rs index 67e1390e9c7d..e7837e9ca89c 100644 --- a/substrate/frame/tx-pause/src/weights.rs +++ b/substrate/frame/tx-pause/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_tx_pause` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-04-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -64,8 +64,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `3` // Estimated: `3997` - // Minimum execution time: 12_474_000 picoseconds. - Weight::from_parts(12_922_000, 3997) + // Minimum execution time: 12_218_000 picoseconds. + Weight::from_parts(12_542_000, 3997) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -75,8 +75,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `565` // Estimated: `3997` - // Minimum execution time: 19_918_000 picoseconds. - Weight::from_parts(20_380_000, 3997) + // Minimum execution time: 18_314_000 picoseconds. + Weight::from_parts(18_990_000, 3997) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -90,8 +90,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `3` // Estimated: `3997` - // Minimum execution time: 12_474_000 picoseconds. - Weight::from_parts(12_922_000, 3997) + // Minimum execution time: 12_218_000 picoseconds. + Weight::from_parts(12_542_000, 3997) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -101,8 +101,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `565` // Estimated: `3997` - // Minimum execution time: 19_918_000 picoseconds. - Weight::from_parts(20_380_000, 3997) + // Minimum execution time: 18_314_000 picoseconds. + Weight::from_parts(18_990_000, 3997) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/substrate/frame/uniques/Cargo.toml b/substrate/frame/uniques/Cargo.toml index 135292fb4ecd..abd456d97556 100644 --- a/substrate/frame/uniques/Cargo.toml +++ b/substrate/frame/uniques/Cargo.toml @@ -17,11 +17,11 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { workspace = true } +log = { workspace = true } +scale-info = { features = ["derive"], workspace = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } -log = { workspace = true } -scale-info = { features = ["derive"], workspace = true } sp-runtime = { workspace = true } [dev-dependencies] diff --git a/substrate/frame/uniques/src/weights.rs b/substrate/frame/uniques/src/weights.rs index 60c6f9316ec7..5576c8921f9c 100644 --- a/substrate/frame/uniques/src/weights.rs +++ b/substrate/frame/uniques/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_uniques` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-04-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -88,10 +88,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Uniques::ClassAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) fn create() -> Weight { // Proof Size summary in bytes: - // Measured: `282` + // Measured: `249` // Estimated: `3643` - // Minimum execution time: 31_956_000 picoseconds. - Weight::from_parts(33_104_000, 3643) + // Minimum execution time: 27_074_000 picoseconds. + Weight::from_parts(28_213_000, 3643) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -101,10 +101,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Uniques::ClassAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) fn force_create() -> Weight { // Proof Size summary in bytes: - // Measured: `142` + // Measured: `109` // Estimated: `3643` - // Minimum execution time: 12_757_000 picoseconds. - Weight::from_parts(13_327_000, 3643) + // Minimum execution time: 12_034_000 picoseconds. + Weight::from_parts(12_669_000, 3643) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -129,16 +129,16 @@ impl WeightInfo for SubstrateWeight { /// The range of component `a` is `[0, 1000]`. fn destroy(n: u32, m: u32, a: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `451 + a * (107 ±0) + m * (56 ±0) + n * (76 ±0)` + // Measured: `418 + a * (107 ±0) + m * (56 ±0) + n * (76 ±0)` // Estimated: `3643 + a * (2647 ±0) + m * (2662 ±0) + n * (2597 ±0)` - // Minimum execution time: 3_236_461_000 picoseconds. - Weight::from_parts(3_291_013_000, 3643) - // Standard Error: 39_603 - .saturating_add(Weight::from_parts(8_285_170, 0).saturating_mul(n.into())) - // Standard Error: 39_603 - .saturating_add(Weight::from_parts(469_210, 0).saturating_mul(m.into())) - // Standard Error: 39_603 - .saturating_add(Weight::from_parts(546_865, 0).saturating_mul(a.into())) + // Minimum execution time: 2_928_174_000 picoseconds. + Weight::from_parts(2_970_367_000, 3643) + // Standard Error: 30_368 + .saturating_add(Weight::from_parts(7_336_699, 0).saturating_mul(n.into())) + // Standard Error: 30_368 + .saturating_add(Weight::from_parts(401_816, 0).saturating_mul(m.into())) + // Standard Error: 30_368 + .saturating_add(Weight::from_parts(346_952, 0).saturating_mul(a.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(m.into()))) @@ -161,10 +161,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Uniques::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) fn mint() -> Weight { // Proof Size summary in bytes: - // Measured: `382` + // Measured: `349` // Estimated: `3643` - // Minimum execution time: 39_056_000 picoseconds. - Weight::from_parts(40_157_000, 3643) + // Minimum execution time: 33_733_000 picoseconds. + Weight::from_parts(35_366_000, 3643) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -178,10 +178,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Uniques::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) fn burn() -> Weight { // Proof Size summary in bytes: - // Measured: `528` + // Measured: `495` // Estimated: `3643` - // Minimum execution time: 39_462_000 picoseconds. - Weight::from_parts(41_368_000, 3643) + // Minimum execution time: 35_064_000 picoseconds. + Weight::from_parts(35_747_000, 3643) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -195,10 +195,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Uniques::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) fn transfer() -> Weight { // Proof Size summary in bytes: - // Measured: `528` + // Measured: `495` // Estimated: `3643` - // Minimum execution time: 30_639_000 picoseconds. - Weight::from_parts(31_523_000, 3643) + // Minimum execution time: 24_955_000 picoseconds. + Weight::from_parts(25_661_000, 3643) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -209,12 +209,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `i` is `[0, 5000]`. fn redeposit(i: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `838 + i * (76 ±0)` + // Measured: `805 + i * (76 ±0)` // Estimated: `3643 + i * (2597 ±0)` - // Minimum execution time: 16_920_000 picoseconds. - Weight::from_parts(17_096_000, 3643) - // Standard Error: 24_966 - .saturating_add(Weight::from_parts(18_491_945, 0).saturating_mul(i.into())) + // Minimum execution time: 12_119_000 picoseconds. + Weight::from_parts(12_490_000, 3643) + // Standard Error: 14_697 + .saturating_add(Weight::from_parts(15_720_495, 0).saturating_mul(i.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(i.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) @@ -227,10 +227,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) fn freeze() -> Weight { // Proof Size summary in bytes: - // Measured: `528` + // Measured: `495` // Estimated: `3643` - // Minimum execution time: 21_752_000 picoseconds. - Weight::from_parts(22_743_000, 3643) + // Minimum execution time: 16_183_000 picoseconds. + Weight::from_parts(16_716_000, 3643) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -240,10 +240,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) fn thaw() -> Weight { // Proof Size summary in bytes: - // Measured: `528` + // Measured: `495` // Estimated: `3643` - // Minimum execution time: 21_892_000 picoseconds. - Weight::from_parts(22_583_000, 3643) + // Minimum execution time: 16_119_000 picoseconds. + Weight::from_parts(16_725_000, 3643) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -251,10 +251,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) fn freeze_collection() -> Weight { // Proof Size summary in bytes: - // Measured: `382` + // Measured: `349` // Estimated: `3643` - // Minimum execution time: 15_920_000 picoseconds. - Weight::from_parts(16_470_000, 3643) + // Minimum execution time: 10_889_000 picoseconds. + Weight::from_parts(11_480_000, 3643) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -262,10 +262,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) fn thaw_collection() -> Weight { // Proof Size summary in bytes: - // Measured: `382` + // Measured: `349` // Estimated: `3643` - // Minimum execution time: 15_489_000 picoseconds. - Weight::from_parts(16_232_000, 3643) + // Minimum execution time: 10_903_000 picoseconds. + Weight::from_parts(11_241_000, 3643) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -279,10 +279,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Uniques::ClassAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) fn transfer_ownership() -> Weight { // Proof Size summary in bytes: - // Measured: `630` + // Measured: `597` // Estimated: `3643` - // Minimum execution time: 31_035_000 picoseconds. - Weight::from_parts(31_987_000, 3643) + // Minimum execution time: 24_942_000 picoseconds. + Weight::from_parts(25_715_000, 3643) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -290,10 +290,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) fn set_team() -> Weight { // Proof Size summary in bytes: - // Measured: `382` + // Measured: `349` // Estimated: `3643` - // Minimum execution time: 15_914_000 picoseconds. - Weight::from_parts(16_494_000, 3643) + // Minimum execution time: 11_488_000 picoseconds. + Weight::from_parts(11_752_000, 3643) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -303,10 +303,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Uniques::ClassAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) fn force_item_status() -> Weight { // Proof Size summary in bytes: - // Measured: `382` + // Measured: `349` // Estimated: `3643` - // Minimum execution time: 19_490_000 picoseconds. - Weight::from_parts(20_121_000, 3643) + // Minimum execution time: 14_721_000 picoseconds. + Weight::from_parts(15_187_000, 3643) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -318,10 +318,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Uniques::Attribute` (`max_values`: None, `max_size`: Some(172), added: 2647, mode: `MaxEncodedLen`) fn set_attribute() -> Weight { // Proof Size summary in bytes: - // Measured: `659` + // Measured: `626` // Estimated: `3652` - // Minimum execution time: 42_331_000 picoseconds. - Weight::from_parts(44_248_000, 3652) + // Minimum execution time: 36_665_000 picoseconds. + Weight::from_parts(37_587_000, 3652) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -333,10 +333,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Uniques::Attribute` (`max_values`: None, `max_size`: Some(172), added: 2647, mode: `MaxEncodedLen`) fn clear_attribute() -> Weight { // Proof Size summary in bytes: - // Measured: `856` + // Measured: `823` // Estimated: `3652` - // Minimum execution time: 42_378_000 picoseconds. - Weight::from_parts(43_407_000, 3652) + // Minimum execution time: 35_066_000 picoseconds. + Weight::from_parts(36_380_000, 3652) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -346,10 +346,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Uniques::InstanceMetadataOf` (`max_values`: None, `max_size`: Some(187), added: 2662, mode: `MaxEncodedLen`) fn set_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `448` + // Measured: `415` // Estimated: `3652` - // Minimum execution time: 32_461_000 picoseconds. - Weight::from_parts(33_579_000, 3652) + // Minimum execution time: 27_060_000 picoseconds. + Weight::from_parts(27_813_000, 3652) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -359,10 +359,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Uniques::InstanceMetadataOf` (`max_values`: None, `max_size`: Some(187), added: 2662, mode: `MaxEncodedLen`) fn clear_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `659` + // Measured: `626` // Estimated: `3652` - // Minimum execution time: 34_123_000 picoseconds. - Weight::from_parts(35_283_000, 3652) + // Minimum execution time: 27_776_000 picoseconds. + Weight::from_parts(28_582_000, 3652) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -372,10 +372,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Uniques::ClassMetadataOf` (`max_values`: None, `max_size`: Some(167), added: 2642, mode: `MaxEncodedLen`) fn set_collection_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `382` + // Measured: `349` // Estimated: `3643` - // Minimum execution time: 33_300_000 picoseconds. - Weight::from_parts(34_163_000, 3643) + // Minimum execution time: 27_636_000 picoseconds. + Weight::from_parts(29_118_000, 3643) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -385,10 +385,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Uniques::ClassMetadataOf` (`max_values`: None, `max_size`: Some(167), added: 2642, mode: `MaxEncodedLen`) fn clear_collection_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `573` + // Measured: `540` // Estimated: `3643` - // Minimum execution time: 32_810_000 picoseconds. - Weight::from_parts(33_865_000, 3643) + // Minimum execution time: 28_246_000 picoseconds. + Weight::from_parts(29_059_000, 3643) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -398,10 +398,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) fn approve_transfer() -> Weight { // Proof Size summary in bytes: - // Measured: `528` + // Measured: `495` // Estimated: `3643` - // Minimum execution time: 22_203_000 picoseconds. - Weight::from_parts(22_831_000, 3643) + // Minimum execution time: 16_793_000 picoseconds. + Weight::from_parts(17_396_000, 3643) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -411,10 +411,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) fn cancel_approval() -> Weight { // Proof Size summary in bytes: - // Measured: `561` + // Measured: `528` // Estimated: `3643` - // Minimum execution time: 22_182_000 picoseconds. - Weight::from_parts(22_739_000, 3643) + // Minimum execution time: 16_726_000 picoseconds. + Weight::from_parts(17_357_000, 3643) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -422,10 +422,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Uniques::OwnershipAcceptance` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) fn set_accept_ownership() -> Weight { // Proof Size summary in bytes: - // Measured: `142` + // Measured: `109` // Estimated: `3517` - // Minimum execution time: 13_384_000 picoseconds. - Weight::from_parts(13_850_000, 3517) + // Minimum execution time: 12_686_000 picoseconds. + Weight::from_parts(13_182_000, 3517) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -435,10 +435,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) fn set_collection_max_supply() -> Weight { // Proof Size summary in bytes: - // Measured: `382` + // Measured: `349` // Estimated: `3643` - // Minimum execution time: 18_516_000 picoseconds. - Weight::from_parts(19_043_000, 3643) + // Minimum execution time: 13_508_000 picoseconds. + Weight::from_parts(13_906_000, 3643) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -448,10 +448,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Uniques::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) fn set_price() -> Weight { // Proof Size summary in bytes: - // Measured: `359` + // Measured: `326` // Estimated: `3587` - // Minimum execution time: 18_536_000 picoseconds. - Weight::from_parts(19_118_000, 3587) + // Minimum execution time: 13_742_000 picoseconds. + Weight::from_parts(14_200_000, 3587) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -465,10 +465,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Uniques::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) fn buy_item() -> Weight { // Proof Size summary in bytes: - // Measured: `640` + // Measured: `607` // Estimated: `3643` - // Minimum execution time: 38_751_000 picoseconds. - Weight::from_parts(39_570_000, 3643) + // Minimum execution time: 32_931_000 picoseconds. + Weight::from_parts(34_023_000, 3643) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -482,10 +482,10 @@ impl WeightInfo for () { /// Proof: `Uniques::ClassAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) fn create() -> Weight { // Proof Size summary in bytes: - // Measured: `282` + // Measured: `249` // Estimated: `3643` - // Minimum execution time: 31_956_000 picoseconds. - Weight::from_parts(33_104_000, 3643) + // Minimum execution time: 27_074_000 picoseconds. + Weight::from_parts(28_213_000, 3643) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -495,10 +495,10 @@ impl WeightInfo for () { /// Proof: `Uniques::ClassAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) fn force_create() -> Weight { // Proof Size summary in bytes: - // Measured: `142` + // Measured: `109` // Estimated: `3643` - // Minimum execution time: 12_757_000 picoseconds. - Weight::from_parts(13_327_000, 3643) + // Minimum execution time: 12_034_000 picoseconds. + Weight::from_parts(12_669_000, 3643) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -523,16 +523,16 @@ impl WeightInfo for () { /// The range of component `a` is `[0, 1000]`. fn destroy(n: u32, m: u32, a: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `451 + a * (107 ±0) + m * (56 ±0) + n * (76 ±0)` + // Measured: `418 + a * (107 ±0) + m * (56 ±0) + n * (76 ±0)` // Estimated: `3643 + a * (2647 ±0) + m * (2662 ±0) + n * (2597 ±0)` - // Minimum execution time: 3_236_461_000 picoseconds. - Weight::from_parts(3_291_013_000, 3643) - // Standard Error: 39_603 - .saturating_add(Weight::from_parts(8_285_170, 0).saturating_mul(n.into())) - // Standard Error: 39_603 - .saturating_add(Weight::from_parts(469_210, 0).saturating_mul(m.into())) - // Standard Error: 39_603 - .saturating_add(Weight::from_parts(546_865, 0).saturating_mul(a.into())) + // Minimum execution time: 2_928_174_000 picoseconds. + Weight::from_parts(2_970_367_000, 3643) + // Standard Error: 30_368 + .saturating_add(Weight::from_parts(7_336_699, 0).saturating_mul(n.into())) + // Standard Error: 30_368 + .saturating_add(Weight::from_parts(401_816, 0).saturating_mul(m.into())) + // Standard Error: 30_368 + .saturating_add(Weight::from_parts(346_952, 0).saturating_mul(a.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(m.into()))) @@ -555,10 +555,10 @@ impl WeightInfo for () { /// Proof: `Uniques::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) fn mint() -> Weight { // Proof Size summary in bytes: - // Measured: `382` + // Measured: `349` // Estimated: `3643` - // Minimum execution time: 39_056_000 picoseconds. - Weight::from_parts(40_157_000, 3643) + // Minimum execution time: 33_733_000 picoseconds. + Weight::from_parts(35_366_000, 3643) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -572,10 +572,10 @@ impl WeightInfo for () { /// Proof: `Uniques::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) fn burn() -> Weight { // Proof Size summary in bytes: - // Measured: `528` + // Measured: `495` // Estimated: `3643` - // Minimum execution time: 39_462_000 picoseconds. - Weight::from_parts(41_368_000, 3643) + // Minimum execution time: 35_064_000 picoseconds. + Weight::from_parts(35_747_000, 3643) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -589,10 +589,10 @@ impl WeightInfo for () { /// Proof: `Uniques::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) fn transfer() -> Weight { // Proof Size summary in bytes: - // Measured: `528` + // Measured: `495` // Estimated: `3643` - // Minimum execution time: 30_639_000 picoseconds. - Weight::from_parts(31_523_000, 3643) + // Minimum execution time: 24_955_000 picoseconds. + Weight::from_parts(25_661_000, 3643) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -603,12 +603,12 @@ impl WeightInfo for () { /// The range of component `i` is `[0, 5000]`. fn redeposit(i: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `838 + i * (76 ±0)` + // Measured: `805 + i * (76 ±0)` // Estimated: `3643 + i * (2597 ±0)` - // Minimum execution time: 16_920_000 picoseconds. - Weight::from_parts(17_096_000, 3643) - // Standard Error: 24_966 - .saturating_add(Weight::from_parts(18_491_945, 0).saturating_mul(i.into())) + // Minimum execution time: 12_119_000 picoseconds. + Weight::from_parts(12_490_000, 3643) + // Standard Error: 14_697 + .saturating_add(Weight::from_parts(15_720_495, 0).saturating_mul(i.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(i.into()))) .saturating_add(RocksDbWeight::get().writes(1_u64)) @@ -621,10 +621,10 @@ impl WeightInfo for () { /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) fn freeze() -> Weight { // Proof Size summary in bytes: - // Measured: `528` + // Measured: `495` // Estimated: `3643` - // Minimum execution time: 21_752_000 picoseconds. - Weight::from_parts(22_743_000, 3643) + // Minimum execution time: 16_183_000 picoseconds. + Weight::from_parts(16_716_000, 3643) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -634,10 +634,10 @@ impl WeightInfo for () { /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) fn thaw() -> Weight { // Proof Size summary in bytes: - // Measured: `528` + // Measured: `495` // Estimated: `3643` - // Minimum execution time: 21_892_000 picoseconds. - Weight::from_parts(22_583_000, 3643) + // Minimum execution time: 16_119_000 picoseconds. + Weight::from_parts(16_725_000, 3643) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -645,10 +645,10 @@ impl WeightInfo for () { /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) fn freeze_collection() -> Weight { // Proof Size summary in bytes: - // Measured: `382` + // Measured: `349` // Estimated: `3643` - // Minimum execution time: 15_920_000 picoseconds. - Weight::from_parts(16_470_000, 3643) + // Minimum execution time: 10_889_000 picoseconds. + Weight::from_parts(11_480_000, 3643) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -656,10 +656,10 @@ impl WeightInfo for () { /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) fn thaw_collection() -> Weight { // Proof Size summary in bytes: - // Measured: `382` + // Measured: `349` // Estimated: `3643` - // Minimum execution time: 15_489_000 picoseconds. - Weight::from_parts(16_232_000, 3643) + // Minimum execution time: 10_903_000 picoseconds. + Weight::from_parts(11_241_000, 3643) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -673,10 +673,10 @@ impl WeightInfo for () { /// Proof: `Uniques::ClassAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) fn transfer_ownership() -> Weight { // Proof Size summary in bytes: - // Measured: `630` + // Measured: `597` // Estimated: `3643` - // Minimum execution time: 31_035_000 picoseconds. - Weight::from_parts(31_987_000, 3643) + // Minimum execution time: 24_942_000 picoseconds. + Weight::from_parts(25_715_000, 3643) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -684,10 +684,10 @@ impl WeightInfo for () { /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) fn set_team() -> Weight { // Proof Size summary in bytes: - // Measured: `382` + // Measured: `349` // Estimated: `3643` - // Minimum execution time: 15_914_000 picoseconds. - Weight::from_parts(16_494_000, 3643) + // Minimum execution time: 11_488_000 picoseconds. + Weight::from_parts(11_752_000, 3643) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -697,10 +697,10 @@ impl WeightInfo for () { /// Proof: `Uniques::ClassAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) fn force_item_status() -> Weight { // Proof Size summary in bytes: - // Measured: `382` + // Measured: `349` // Estimated: `3643` - // Minimum execution time: 19_490_000 picoseconds. - Weight::from_parts(20_121_000, 3643) + // Minimum execution time: 14_721_000 picoseconds. + Weight::from_parts(15_187_000, 3643) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -712,10 +712,10 @@ impl WeightInfo for () { /// Proof: `Uniques::Attribute` (`max_values`: None, `max_size`: Some(172), added: 2647, mode: `MaxEncodedLen`) fn set_attribute() -> Weight { // Proof Size summary in bytes: - // Measured: `659` + // Measured: `626` // Estimated: `3652` - // Minimum execution time: 42_331_000 picoseconds. - Weight::from_parts(44_248_000, 3652) + // Minimum execution time: 36_665_000 picoseconds. + Weight::from_parts(37_587_000, 3652) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -727,10 +727,10 @@ impl WeightInfo for () { /// Proof: `Uniques::Attribute` (`max_values`: None, `max_size`: Some(172), added: 2647, mode: `MaxEncodedLen`) fn clear_attribute() -> Weight { // Proof Size summary in bytes: - // Measured: `856` + // Measured: `823` // Estimated: `3652` - // Minimum execution time: 42_378_000 picoseconds. - Weight::from_parts(43_407_000, 3652) + // Minimum execution time: 35_066_000 picoseconds. + Weight::from_parts(36_380_000, 3652) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -740,10 +740,10 @@ impl WeightInfo for () { /// Proof: `Uniques::InstanceMetadataOf` (`max_values`: None, `max_size`: Some(187), added: 2662, mode: `MaxEncodedLen`) fn set_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `448` + // Measured: `415` // Estimated: `3652` - // Minimum execution time: 32_461_000 picoseconds. - Weight::from_parts(33_579_000, 3652) + // Minimum execution time: 27_060_000 picoseconds. + Weight::from_parts(27_813_000, 3652) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -753,10 +753,10 @@ impl WeightInfo for () { /// Proof: `Uniques::InstanceMetadataOf` (`max_values`: None, `max_size`: Some(187), added: 2662, mode: `MaxEncodedLen`) fn clear_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `659` + // Measured: `626` // Estimated: `3652` - // Minimum execution time: 34_123_000 picoseconds. - Weight::from_parts(35_283_000, 3652) + // Minimum execution time: 27_776_000 picoseconds. + Weight::from_parts(28_582_000, 3652) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -766,10 +766,10 @@ impl WeightInfo for () { /// Proof: `Uniques::ClassMetadataOf` (`max_values`: None, `max_size`: Some(167), added: 2642, mode: `MaxEncodedLen`) fn set_collection_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `382` + // Measured: `349` // Estimated: `3643` - // Minimum execution time: 33_300_000 picoseconds. - Weight::from_parts(34_163_000, 3643) + // Minimum execution time: 27_636_000 picoseconds. + Weight::from_parts(29_118_000, 3643) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -779,10 +779,10 @@ impl WeightInfo for () { /// Proof: `Uniques::ClassMetadataOf` (`max_values`: None, `max_size`: Some(167), added: 2642, mode: `MaxEncodedLen`) fn clear_collection_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `573` + // Measured: `540` // Estimated: `3643` - // Minimum execution time: 32_810_000 picoseconds. - Weight::from_parts(33_865_000, 3643) + // Minimum execution time: 28_246_000 picoseconds. + Weight::from_parts(29_059_000, 3643) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -792,10 +792,10 @@ impl WeightInfo for () { /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) fn approve_transfer() -> Weight { // Proof Size summary in bytes: - // Measured: `528` + // Measured: `495` // Estimated: `3643` - // Minimum execution time: 22_203_000 picoseconds. - Weight::from_parts(22_831_000, 3643) + // Minimum execution time: 16_793_000 picoseconds. + Weight::from_parts(17_396_000, 3643) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -805,10 +805,10 @@ impl WeightInfo for () { /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) fn cancel_approval() -> Weight { // Proof Size summary in bytes: - // Measured: `561` + // Measured: `528` // Estimated: `3643` - // Minimum execution time: 22_182_000 picoseconds. - Weight::from_parts(22_739_000, 3643) + // Minimum execution time: 16_726_000 picoseconds. + Weight::from_parts(17_357_000, 3643) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -816,10 +816,10 @@ impl WeightInfo for () { /// Proof: `Uniques::OwnershipAcceptance` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) fn set_accept_ownership() -> Weight { // Proof Size summary in bytes: - // Measured: `142` + // Measured: `109` // Estimated: `3517` - // Minimum execution time: 13_384_000 picoseconds. - Weight::from_parts(13_850_000, 3517) + // Minimum execution time: 12_686_000 picoseconds. + Weight::from_parts(13_182_000, 3517) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -829,10 +829,10 @@ impl WeightInfo for () { /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) fn set_collection_max_supply() -> Weight { // Proof Size summary in bytes: - // Measured: `382` + // Measured: `349` // Estimated: `3643` - // Minimum execution time: 18_516_000 picoseconds. - Weight::from_parts(19_043_000, 3643) + // Minimum execution time: 13_508_000 picoseconds. + Weight::from_parts(13_906_000, 3643) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -842,10 +842,10 @@ impl WeightInfo for () { /// Proof: `Uniques::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) fn set_price() -> Weight { // Proof Size summary in bytes: - // Measured: `359` + // Measured: `326` // Estimated: `3587` - // Minimum execution time: 18_536_000 picoseconds. - Weight::from_parts(19_118_000, 3587) + // Minimum execution time: 13_742_000 picoseconds. + Weight::from_parts(14_200_000, 3587) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -859,10 +859,10 @@ impl WeightInfo for () { /// Proof: `Uniques::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) fn buy_item() -> Weight { // Proof Size summary in bytes: - // Measured: `640` + // Measured: `607` // Estimated: `3643` - // Minimum execution time: 38_751_000 picoseconds. - Weight::from_parts(39_570_000, 3643) + // Minimum execution time: 32_931_000 picoseconds. + Weight::from_parts(34_023_000, 3643) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } diff --git a/substrate/frame/utility/Cargo.toml b/substrate/frame/utility/Cargo.toml index c9a4432648ea..e2d35fc1699f 100644 --- a/substrate/frame/utility/Cargo.toml +++ b/substrate/frame/utility/Cargo.toml @@ -17,18 +17,18 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { workspace = true } +scale-info = { features = ["derive"], workspace = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } -scale-info = { features = ["derive"], workspace = true } sp-core = { workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } [dev-dependencies] pallet-balances = { workspace = true, default-features = true } -pallet-collective = { workspace = true, default-features = true } pallet-root-testing = { workspace = true, default-features = true } +pallet-collective = { workspace = true, default-features = true } pallet-timestamp = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } diff --git a/substrate/frame/utility/src/weights.rs b/substrate/frame/utility/src/weights.rs index 8b31eb2ced85..502f85a3f178 100644 --- a/substrate/frame/utility/src/weights.rs +++ b/substrate/frame/utility/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_utility` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-04-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -70,10 +70,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3997` - // Minimum execution time: 4_830_000 picoseconds. - Weight::from_parts(19_388_813, 3997) - // Standard Error: 2_694 - .saturating_add(Weight::from_parts(4_591_113, 0).saturating_mul(c.into())) + // Minimum execution time: 5_312_000 picoseconds. + Weight::from_parts(2_694_370, 3997) + // Standard Error: 5_055 + .saturating_add(Weight::from_parts(5_005_941, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) } /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) @@ -84,8 +84,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3997` - // Minimum execution time: 10_474_000 picoseconds. - Weight::from_parts(10_896_000, 3997) + // Minimum execution time: 9_263_000 picoseconds. + Weight::from_parts(9_639_000, 3997) .saturating_add(T::DbWeight::get().reads(2_u64)) } /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) @@ -97,18 +97,18 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3997` - // Minimum execution time: 4_773_000 picoseconds. - Weight::from_parts(22_628_420, 3997) - // Standard Error: 2_405 - .saturating_add(Weight::from_parts(4_797_007, 0).saturating_mul(c.into())) + // Minimum execution time: 5_120_000 picoseconds. + Weight::from_parts(12_948_874, 3997) + // Standard Error: 4_643 + .saturating_add(Weight::from_parts(5_162_821, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) } fn dispatch_as() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_668_000 picoseconds. - Weight::from_parts(6_985_000, 0) + // Minimum execution time: 7_126_000 picoseconds. + Weight::from_parts(7_452_000, 0) } /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -119,10 +119,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3997` - // Minimum execution time: 5_434_000 picoseconds. - Weight::from_parts(23_270_604, 3997) - // Standard Error: 2_511 - .saturating_add(Weight::from_parts(4_570_923, 0).saturating_mul(c.into())) + // Minimum execution time: 5_254_000 picoseconds. + Weight::from_parts(4_879_712, 3997) + // Standard Error: 4_988 + .saturating_add(Weight::from_parts(4_955_816, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) } } @@ -138,10 +138,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3997` - // Minimum execution time: 4_830_000 picoseconds. - Weight::from_parts(19_388_813, 3997) - // Standard Error: 2_694 - .saturating_add(Weight::from_parts(4_591_113, 0).saturating_mul(c.into())) + // Minimum execution time: 5_312_000 picoseconds. + Weight::from_parts(2_694_370, 3997) + // Standard Error: 5_055 + .saturating_add(Weight::from_parts(5_005_941, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) } /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) @@ -152,8 +152,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3997` - // Minimum execution time: 10_474_000 picoseconds. - Weight::from_parts(10_896_000, 3997) + // Minimum execution time: 9_263_000 picoseconds. + Weight::from_parts(9_639_000, 3997) .saturating_add(RocksDbWeight::get().reads(2_u64)) } /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) @@ -165,18 +165,18 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3997` - // Minimum execution time: 4_773_000 picoseconds. - Weight::from_parts(22_628_420, 3997) - // Standard Error: 2_405 - .saturating_add(Weight::from_parts(4_797_007, 0).saturating_mul(c.into())) + // Minimum execution time: 5_120_000 picoseconds. + Weight::from_parts(12_948_874, 3997) + // Standard Error: 4_643 + .saturating_add(Weight::from_parts(5_162_821, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) } fn dispatch_as() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_668_000 picoseconds. - Weight::from_parts(6_985_000, 0) + // Minimum execution time: 7_126_000 picoseconds. + Weight::from_parts(7_452_000, 0) } /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -187,10 +187,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3997` - // Minimum execution time: 5_434_000 picoseconds. - Weight::from_parts(23_270_604, 3997) - // Standard Error: 2_511 - .saturating_add(Weight::from_parts(4_570_923, 0).saturating_mul(c.into())) + // Minimum execution time: 5_254_000 picoseconds. + Weight::from_parts(4_879_712, 3997) + // Standard Error: 4_988 + .saturating_add(Weight::from_parts(4_955_816, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) } } diff --git a/substrate/frame/verify-signature/Cargo.toml b/substrate/frame/verify-signature/Cargo.toml index 37cc6c0b3065..3c5fd5e65157 100644 --- a/substrate/frame/verify-signature/Cargo.toml +++ b/substrate/frame/verify-signature/Cargo.toml @@ -17,10 +17,10 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { workspace = true } +scale-info = { features = ["derive"], workspace = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } -scale-info = { features = ["derive"], workspace = true } sp-core = { workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } @@ -28,8 +28,8 @@ sp-weights = { features = ["serde"], workspace = true } [dev-dependencies] pallet-balances = { workspace = true, default-features = true } -pallet-collective = { workspace = true, default-features = true } pallet-root-testing = { workspace = true, default-features = true } +pallet-collective = { workspace = true, default-features = true } pallet-timestamp = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } diff --git a/substrate/frame/verify-signature/src/benchmarking.rs b/substrate/frame/verify-signature/src/benchmarking.rs index 99e893e6f6ab..2b592a4023ec 100644 --- a/substrate/frame/verify-signature/src/benchmarking.rs +++ b/substrate/frame/verify-signature/src/benchmarking.rs @@ -27,16 +27,10 @@ use super::*; use crate::{extension::VerifySignature, Config, Pallet as VerifySignaturePallet}; use alloc::vec; use frame_benchmarking::{v2::*, BenchmarkError}; -use frame_support::{ - dispatch::{DispatchInfo, GetDispatchInfo}, - pallet_prelude::TransactionSource, -}; +use frame_support::dispatch::{DispatchInfo, GetDispatchInfo}; use frame_system::{Call as SystemCall, RawOrigin}; use sp_io::hashing::blake2_256; -use sp_runtime::{ - generic::ExtensionVersion, - traits::{AsTransactionAuthorizedOrigin, DispatchTransaction, Dispatchable}, -}; +use sp_runtime::traits::{AsTransactionAuthorizedOrigin, Dispatchable, TransactionExtension}; pub trait BenchmarkHelper { fn create_signature(entropy: &[u8], msg: &[u8]) -> (Signature, Signer); @@ -54,24 +48,14 @@ mod benchmarks { fn verify_signature() -> Result<(), BenchmarkError> { let entropy = [42u8; 256]; let call: T::RuntimeCall = SystemCall::remark { remark: vec![] }.into(); - let ext_version: ExtensionVersion = 0; let info = call.get_dispatch_info(); - let msg = (ext_version, &call).using_encoded(blake2_256).to_vec(); + let msg = call.using_encoded(blake2_256).to_vec(); let (signature, signer) = T::BenchmarkHelper::create_signature(&entropy, &msg[..]); let ext = VerifySignature::::new_with_signature(signature, signer); #[block] { - assert!(ext - .validate_only( - RawOrigin::None.into(), - &call, - &info, - 0, - TransactionSource::External, - ext_version - ) - .is_ok()); + assert!(ext.validate(RawOrigin::None.into(), &call, &info, 0, (), &call).is_ok()); } Ok(()) diff --git a/substrate/frame/verify-signature/src/extension.rs b/substrate/frame/verify-signature/src/extension.rs index d48991e7a1da..4490a0a600bb 100644 --- a/substrate/frame/verify-signature/src/extension.rs +++ b/substrate/frame/verify-signature/src/extension.rs @@ -20,7 +20,7 @@ use crate::{Config, WeightInfo}; use codec::{Decode, Encode}; -use frame_support::{pallet_prelude::TransactionSource, traits::OriginTrait}; +use frame_support::traits::OriginTrait; use scale_info::TypeInfo; use sp_io::hashing::blake2_256; use sp_runtime::{ @@ -113,7 +113,6 @@ where _len: usize, _: (), inherited_implication: &impl Encode, - _source: TransactionSource, ) -> Result< (ValidTransaction, Self::Val, ::RuntimeOrigin), TransactionValidityError, diff --git a/substrate/frame/verify-signature/src/tests.rs b/substrate/frame/verify-signature/src/tests.rs index 63a310506eec..3e4c8db12fe2 100644 --- a/substrate/frame/verify-signature/src/tests.rs +++ b/substrate/frame/verify-signature/src/tests.rs @@ -25,13 +25,12 @@ use extension::VerifySignature; use frame_support::{ derive_impl, dispatch::GetDispatchInfo, - pallet_prelude::{InvalidTransaction, TransactionSource, TransactionValidityError}, + pallet_prelude::{InvalidTransaction, TransactionValidityError}, traits::OriginTrait, }; use frame_system::Call as SystemCall; use sp_io::hashing::blake2_256; use sp_runtime::{ - generic::ExtensionVersion, testing::{TestSignature, UintAuthorityId}, traits::DispatchTransaction, }; @@ -81,32 +80,15 @@ pub fn new_test_ext() -> sp_io::TestExternalities { fn verification_works() { let who = 0; let call: RuntimeCall = SystemCall::remark { remark: vec![] }.into(); - let ext_version: ExtensionVersion = 0; - let sig = TestSignature(0, (ext_version, &call).using_encoded(blake2_256).to_vec()); + let sig = TestSignature(0, call.using_encoded(blake2_256).to_vec()); let info = call.get_dispatch_info(); let (_, _, origin) = VerifySignature::::new_with_signature(sig, who) - .validate_only(None.into(), &call, &info, 0, TransactionSource::External, 0) + .validate_only(None.into(), &call, &info, 0) .unwrap(); assert_eq!(origin.as_signer().unwrap(), &who) } -#[test] -fn bad_inherited_implication() { - let who = 0; - let call: RuntimeCall = SystemCall::remark { remark: vec![] }.into(); - // Inherited implication should include extension version byte. - let sig = TestSignature(0, call.using_encoded(blake2_256).to_vec()); - let info = call.get_dispatch_info(); - - assert_eq!( - VerifySignature::::new_with_signature(sig, who) - .validate_only(None.into(), &call, &info, 0, TransactionSource::External, 0) - .unwrap_err(), - TransactionValidityError::Invalid(InvalidTransaction::BadProof) - ); -} - #[test] fn bad_signature() { let who = 0; @@ -116,7 +98,7 @@ fn bad_signature() { assert_eq!( VerifySignature::::new_with_signature(sig, who) - .validate_only(None.into(), &call, &info, 0, TransactionSource::External, 0) + .validate_only(None.into(), &call, &info, 0) .unwrap_err(), TransactionValidityError::Invalid(InvalidTransaction::BadProof) ); @@ -131,7 +113,7 @@ fn bad_starting_origin() { assert_eq!( VerifySignature::::new_with_signature(sig, who) - .validate_only(Some(42).into(), &call, &info, 0, TransactionSource::External, 0) + .validate_only(Some(42).into(), &call, &info, 0) .unwrap_err(), TransactionValidityError::Invalid(InvalidTransaction::BadSigner) ); @@ -144,7 +126,7 @@ fn disabled_extension_works() { let info = call.get_dispatch_info(); let (_, _, origin) = VerifySignature::::new_disabled() - .validate_only(Some(who).into(), &call, &info, 0, TransactionSource::External, 0) + .validate_only(Some(who).into(), &call, &info, 0) .unwrap(); assert_eq!(origin.as_signer().unwrap(), &who) } diff --git a/substrate/frame/verify-signature/src/weights.rs b/substrate/frame/verify-signature/src/weights.rs index a8bfa9ea902d..2c1f0f795422 100644 --- a/substrate/frame/verify-signature/src/weights.rs +++ b/substrate/frame/verify-signature/src/weights.rs @@ -18,25 +18,22 @@ //! Autogenerated weights for `pallet_verify_signature` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-09-24, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate-node +// ./target/debug/substrate-node // benchmark // pallet -// --chain=dev -// --steps=50 -// --repeat=20 -// --pallet=pallet_verify_signature -// --no-storage-info -// --no-median-slopes -// --no-min-squares +// --steps=2 +// --repeat=2 // --extrinsic=* // --wasm-execution=compiled // --heap-pages=4096 +// --pallet=pallet-verify-signature +// --chain=dev // --output=./substrate/frame/verify-signature/src/weights.rs // --header=./substrate/HEADER-APACHE2 // --template=./substrate/.maintain/frame-weight-template.hbs @@ -61,8 +58,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 46_215_000 picoseconds. - Weight::from_parts(46_714_000, 0) + // Minimum execution time: 48_953_000 picoseconds. + Weight::from_parts(49_254_000, 0) } } @@ -72,7 +69,7 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 46_215_000 picoseconds. - Weight::from_parts(46_714_000, 0) + // Minimum execution time: 48_953_000 picoseconds. + Weight::from_parts(49_254_000, 0) } } diff --git a/substrate/frame/vesting/Cargo.toml b/substrate/frame/vesting/Cargo.toml index 882ce5f81373..f896c3962eaa 100644 --- a/substrate/frame/vesting/Cargo.toml +++ b/substrate/frame/vesting/Cargo.toml @@ -19,11 +19,11 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { features = [ "derive", ], workspace = true } +log = { workspace = true } +scale-info = { features = ["derive"], workspace = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } -log = { workspace = true } -scale-info = { features = ["derive"], workspace = true } sp-runtime = { workspace = true } [dev-dependencies] diff --git a/substrate/frame/vesting/src/weights.rs b/substrate/frame/vesting/src/weights.rs index 3ab161e822e8..efb8cbcc41c4 100644 --- a/substrate/frame/vesting/src/weights.rs +++ b/substrate/frame/vesting/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_vesting` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-04-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -75,14 +75,14 @@ impl WeightInfo for SubstrateWeight { /// The range of component `s` is `[1, 28]`. fn vest_locked(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `414 + l * (25 ±0) + s * (36 ±0)` + // Measured: `381 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 39_505_000 picoseconds. - Weight::from_parts(39_835_306, 4764) - // Standard Error: 1_394 - .saturating_add(Weight::from_parts(21_450, 0).saturating_mul(l.into())) - // Standard Error: 2_481 - .saturating_add(Weight::from_parts(70_901, 0).saturating_mul(s.into())) + // Minimum execution time: 32_202_000 picoseconds. + Weight::from_parts(31_586_520, 4764) + // Standard Error: 1_513 + .saturating_add(Weight::from_parts(67_257, 0).saturating_mul(l.into())) + // Standard Error: 2_693 + .saturating_add(Weight::from_parts(69_725, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -96,14 +96,14 @@ impl WeightInfo for SubstrateWeight { /// The range of component `s` is `[1, 28]`. fn vest_unlocked(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `414 + l * (25 ±0) + s * (36 ±0)` + // Measured: `381 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 40_781_000 picoseconds. - Weight::from_parts(40_777_528, 4764) - // Standard Error: 1_209 - .saturating_add(Weight::from_parts(35_116, 0).saturating_mul(l.into())) - // Standard Error: 2_151 - .saturating_add(Weight::from_parts(83_093, 0).saturating_mul(s.into())) + // Minimum execution time: 34_847_000 picoseconds. + Weight::from_parts(34_690_456, 4764) + // Standard Error: 1_681 + .saturating_add(Weight::from_parts(51_103, 0).saturating_mul(l.into())) + // Standard Error: 2_991 + .saturating_add(Weight::from_parts(55_094, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -119,14 +119,14 @@ impl WeightInfo for SubstrateWeight { /// The range of component `s` is `[1, 28]`. fn vest_other_locked(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `517 + l * (25 ±0) + s * (36 ±0)` + // Measured: `484 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 41_590_000 picoseconds. - Weight::from_parts(40_756_231, 4764) - // Standard Error: 1_420 - .saturating_add(Weight::from_parts(45_223, 0).saturating_mul(l.into())) - // Standard Error: 2_527 - .saturating_add(Weight::from_parts(102_603, 0).saturating_mul(s.into())) + // Minimum execution time: 34_027_000 picoseconds. + Weight::from_parts(33_353_168, 4764) + // Standard Error: 1_477 + .saturating_add(Weight::from_parts(72_605, 0).saturating_mul(l.into())) + // Standard Error: 2_629 + .saturating_add(Weight::from_parts(64_115, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -142,14 +142,14 @@ impl WeightInfo for SubstrateWeight { /// The range of component `s` is `[1, 28]`. fn vest_other_unlocked(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `517 + l * (25 ±0) + s * (36 ±0)` + // Measured: `484 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 43_490_000 picoseconds. - Weight::from_parts(43_900_384, 4764) - // Standard Error: 1_670 - .saturating_add(Weight::from_parts(31_084, 0).saturating_mul(l.into())) - // Standard Error: 2_971 - .saturating_add(Weight::from_parts(66_673, 0).saturating_mul(s.into())) + // Minimum execution time: 36_816_000 picoseconds. + Weight::from_parts(36_467_447, 4764) + // Standard Error: 1_689 + .saturating_add(Weight::from_parts(51_855, 0).saturating_mul(l.into())) + // Standard Error: 3_006 + .saturating_add(Weight::from_parts(58_233, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -165,14 +165,14 @@ impl WeightInfo for SubstrateWeight { /// The range of component `s` is `[0, 27]`. fn vested_transfer(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `588 + l * (25 ±0) + s * (36 ±0)` + // Measured: `555 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 76_194_000 picoseconds. - Weight::from_parts(77_923_603, 4764) - // Standard Error: 2_141 - .saturating_add(Weight::from_parts(50_161, 0).saturating_mul(l.into())) - // Standard Error: 3_810 - .saturating_add(Weight::from_parts(97_415, 0).saturating_mul(s.into())) + // Minimum execution time: 70_906_000 picoseconds. + Weight::from_parts(72_663_428, 4764) + // Standard Error: 2_877 + .saturating_add(Weight::from_parts(81_242, 0).saturating_mul(l.into())) + // Standard Error: 5_118 + .saturating_add(Weight::from_parts(103_344, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -188,14 +188,14 @@ impl WeightInfo for SubstrateWeight { /// The range of component `s` is `[0, 27]`. fn force_vested_transfer(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `691 + l * (25 ±0) + s * (36 ±0)` + // Measured: `658 + l * (25 ±0) + s * (36 ±0)` // Estimated: `6196` - // Minimum execution time: 78_333_000 picoseconds. - Weight::from_parts(80_199_350, 6196) - // Standard Error: 1_903 - .saturating_add(Weight::from_parts(46_798, 0).saturating_mul(l.into())) - // Standard Error: 3_385 - .saturating_add(Weight::from_parts(106_311, 0).saturating_mul(s.into())) + // Minimum execution time: 72_730_000 picoseconds. + Weight::from_parts(75_050_411, 6196) + // Standard Error: 2_748 + .saturating_add(Weight::from_parts(73_218, 0).saturating_mul(l.into())) + // Standard Error: 4_889 + .saturating_add(Weight::from_parts(112_868, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -205,20 +205,22 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:0) /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `l` is `[0, 49]`. /// The range of component `s` is `[2, 28]`. fn not_unlocking_merge_schedules(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `414 + l * (25 ±0) + s * (36 ±0)` + // Measured: `482 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 40_102_000 picoseconds. - Weight::from_parts(39_552_301, 4764) - // Standard Error: 1_309 - .saturating_add(Weight::from_parts(37_184, 0).saturating_mul(l.into())) - // Standard Error: 2_418 - .saturating_add(Weight::from_parts(91_621, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(3_u64)) - .saturating_add(T::DbWeight::get().writes(2_u64)) + // Minimum execution time: 34_698_000 picoseconds. + Weight::from_parts(34_504_324, 4764) + // Standard Error: 1_703 + .saturating_add(Weight::from_parts(56_321, 0).saturating_mul(l.into())) + // Standard Error: 3_145 + .saturating_add(Weight::from_parts(55_503, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(4_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: `Vesting::Vesting` (r:1 w:1) /// Proof: `Vesting::Vesting` (`max_values`: None, `max_size`: Some(1057), added: 3532, mode: `MaxEncodedLen`) @@ -226,20 +228,22 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:0) /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `l` is `[0, 49]`. /// The range of component `s` is `[2, 28]`. fn unlocking_merge_schedules(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `414 + l * (25 ±0) + s * (36 ±0)` + // Measured: `482 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 42_287_000 picoseconds. - Weight::from_parts(41_937_484, 4764) - // Standard Error: 1_306 - .saturating_add(Weight::from_parts(39_880, 0).saturating_mul(l.into())) - // Standard Error: 2_412 - .saturating_add(Weight::from_parts(85_247, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(3_u64)) - .saturating_add(T::DbWeight::get().writes(2_u64)) + // Minimum execution time: 36_951_000 picoseconds. + Weight::from_parts(37_020_649, 4764) + // Standard Error: 1_791 + .saturating_add(Weight::from_parts(65_437, 0).saturating_mul(l.into())) + // Standard Error: 3_308 + .saturating_add(Weight::from_parts(54_146, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(4_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: `Vesting::Vesting` (r:1 w:1) /// Proof: `Vesting::Vesting` (`max_values`: None, `max_size`: Some(1057), added: 3532, mode: `MaxEncodedLen`) @@ -253,14 +257,14 @@ impl WeightInfo for SubstrateWeight { /// The range of component `s` is `[2, 28]`. fn force_remove_vesting_schedule(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `588 + l * (25 ±0) + s * (36 ±0)` + // Measured: `555 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 46_462_000 picoseconds. - Weight::from_parts(46_571_504, 4764) - // Standard Error: 1_298 - .saturating_add(Weight::from_parts(42_091, 0).saturating_mul(l.into())) - // Standard Error: 2_397 - .saturating_add(Weight::from_parts(77_382, 0).saturating_mul(s.into())) + // Minimum execution time: 38_849_000 picoseconds. + Weight::from_parts(38_488_577, 4764) + // Standard Error: 1_911 + .saturating_add(Weight::from_parts(72_338, 0).saturating_mul(l.into())) + // Standard Error: 3_529 + .saturating_add(Weight::from_parts(62_206, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -278,14 +282,14 @@ impl WeightInfo for () { /// The range of component `s` is `[1, 28]`. fn vest_locked(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `414 + l * (25 ±0) + s * (36 ±0)` + // Measured: `381 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 39_505_000 picoseconds. - Weight::from_parts(39_835_306, 4764) - // Standard Error: 1_394 - .saturating_add(Weight::from_parts(21_450, 0).saturating_mul(l.into())) - // Standard Error: 2_481 - .saturating_add(Weight::from_parts(70_901, 0).saturating_mul(s.into())) + // Minimum execution time: 32_202_000 picoseconds. + Weight::from_parts(31_586_520, 4764) + // Standard Error: 1_513 + .saturating_add(Weight::from_parts(67_257, 0).saturating_mul(l.into())) + // Standard Error: 2_693 + .saturating_add(Weight::from_parts(69_725, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -299,14 +303,14 @@ impl WeightInfo for () { /// The range of component `s` is `[1, 28]`. fn vest_unlocked(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `414 + l * (25 ±0) + s * (36 ±0)` + // Measured: `381 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 40_781_000 picoseconds. - Weight::from_parts(40_777_528, 4764) - // Standard Error: 1_209 - .saturating_add(Weight::from_parts(35_116, 0).saturating_mul(l.into())) - // Standard Error: 2_151 - .saturating_add(Weight::from_parts(83_093, 0).saturating_mul(s.into())) + // Minimum execution time: 34_847_000 picoseconds. + Weight::from_parts(34_690_456, 4764) + // Standard Error: 1_681 + .saturating_add(Weight::from_parts(51_103, 0).saturating_mul(l.into())) + // Standard Error: 2_991 + .saturating_add(Weight::from_parts(55_094, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -322,14 +326,14 @@ impl WeightInfo for () { /// The range of component `s` is `[1, 28]`. fn vest_other_locked(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `517 + l * (25 ±0) + s * (36 ±0)` + // Measured: `484 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 41_590_000 picoseconds. - Weight::from_parts(40_756_231, 4764) - // Standard Error: 1_420 - .saturating_add(Weight::from_parts(45_223, 0).saturating_mul(l.into())) - // Standard Error: 2_527 - .saturating_add(Weight::from_parts(102_603, 0).saturating_mul(s.into())) + // Minimum execution time: 34_027_000 picoseconds. + Weight::from_parts(33_353_168, 4764) + // Standard Error: 1_477 + .saturating_add(Weight::from_parts(72_605, 0).saturating_mul(l.into())) + // Standard Error: 2_629 + .saturating_add(Weight::from_parts(64_115, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -345,14 +349,14 @@ impl WeightInfo for () { /// The range of component `s` is `[1, 28]`. fn vest_other_unlocked(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `517 + l * (25 ±0) + s * (36 ±0)` + // Measured: `484 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 43_490_000 picoseconds. - Weight::from_parts(43_900_384, 4764) - // Standard Error: 1_670 - .saturating_add(Weight::from_parts(31_084, 0).saturating_mul(l.into())) - // Standard Error: 2_971 - .saturating_add(Weight::from_parts(66_673, 0).saturating_mul(s.into())) + // Minimum execution time: 36_816_000 picoseconds. + Weight::from_parts(36_467_447, 4764) + // Standard Error: 1_689 + .saturating_add(Weight::from_parts(51_855, 0).saturating_mul(l.into())) + // Standard Error: 3_006 + .saturating_add(Weight::from_parts(58_233, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -368,14 +372,14 @@ impl WeightInfo for () { /// The range of component `s` is `[0, 27]`. fn vested_transfer(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `588 + l * (25 ±0) + s * (36 ±0)` + // Measured: `555 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 76_194_000 picoseconds. - Weight::from_parts(77_923_603, 4764) - // Standard Error: 2_141 - .saturating_add(Weight::from_parts(50_161, 0).saturating_mul(l.into())) - // Standard Error: 3_810 - .saturating_add(Weight::from_parts(97_415, 0).saturating_mul(s.into())) + // Minimum execution time: 70_906_000 picoseconds. + Weight::from_parts(72_663_428, 4764) + // Standard Error: 2_877 + .saturating_add(Weight::from_parts(81_242, 0).saturating_mul(l.into())) + // Standard Error: 5_118 + .saturating_add(Weight::from_parts(103_344, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -391,14 +395,14 @@ impl WeightInfo for () { /// The range of component `s` is `[0, 27]`. fn force_vested_transfer(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `691 + l * (25 ±0) + s * (36 ±0)` + // Measured: `658 + l * (25 ±0) + s * (36 ±0)` // Estimated: `6196` - // Minimum execution time: 78_333_000 picoseconds. - Weight::from_parts(80_199_350, 6196) - // Standard Error: 1_903 - .saturating_add(Weight::from_parts(46_798, 0).saturating_mul(l.into())) - // Standard Error: 3_385 - .saturating_add(Weight::from_parts(106_311, 0).saturating_mul(s.into())) + // Minimum execution time: 72_730_000 picoseconds. + Weight::from_parts(75_050_411, 6196) + // Standard Error: 2_748 + .saturating_add(Weight::from_parts(73_218, 0).saturating_mul(l.into())) + // Standard Error: 4_889 + .saturating_add(Weight::from_parts(112_868, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -408,20 +412,22 @@ impl WeightInfo for () { /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:0) /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `l` is `[0, 49]`. /// The range of component `s` is `[2, 28]`. fn not_unlocking_merge_schedules(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `414 + l * (25 ±0) + s * (36 ±0)` + // Measured: `482 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 40_102_000 picoseconds. - Weight::from_parts(39_552_301, 4764) - // Standard Error: 1_309 - .saturating_add(Weight::from_parts(37_184, 0).saturating_mul(l.into())) - // Standard Error: 2_418 - .saturating_add(Weight::from_parts(91_621, 0).saturating_mul(s.into())) - .saturating_add(RocksDbWeight::get().reads(3_u64)) - .saturating_add(RocksDbWeight::get().writes(2_u64)) + // Minimum execution time: 34_698_000 picoseconds. + Weight::from_parts(34_504_324, 4764) + // Standard Error: 1_703 + .saturating_add(Weight::from_parts(56_321, 0).saturating_mul(l.into())) + // Standard Error: 3_145 + .saturating_add(Weight::from_parts(55_503, 0).saturating_mul(s.into())) + .saturating_add(RocksDbWeight::get().reads(4_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: `Vesting::Vesting` (r:1 w:1) /// Proof: `Vesting::Vesting` (`max_values`: None, `max_size`: Some(1057), added: 3532, mode: `MaxEncodedLen`) @@ -429,20 +435,22 @@ impl WeightInfo for () { /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:0) /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `l` is `[0, 49]`. /// The range of component `s` is `[2, 28]`. fn unlocking_merge_schedules(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `414 + l * (25 ±0) + s * (36 ±0)` + // Measured: `482 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 42_287_000 picoseconds. - Weight::from_parts(41_937_484, 4764) - // Standard Error: 1_306 - .saturating_add(Weight::from_parts(39_880, 0).saturating_mul(l.into())) - // Standard Error: 2_412 - .saturating_add(Weight::from_parts(85_247, 0).saturating_mul(s.into())) - .saturating_add(RocksDbWeight::get().reads(3_u64)) - .saturating_add(RocksDbWeight::get().writes(2_u64)) + // Minimum execution time: 36_951_000 picoseconds. + Weight::from_parts(37_020_649, 4764) + // Standard Error: 1_791 + .saturating_add(Weight::from_parts(65_437, 0).saturating_mul(l.into())) + // Standard Error: 3_308 + .saturating_add(Weight::from_parts(54_146, 0).saturating_mul(s.into())) + .saturating_add(RocksDbWeight::get().reads(4_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: `Vesting::Vesting` (r:1 w:1) /// Proof: `Vesting::Vesting` (`max_values`: None, `max_size`: Some(1057), added: 3532, mode: `MaxEncodedLen`) @@ -456,14 +464,14 @@ impl WeightInfo for () { /// The range of component `s` is `[2, 28]`. fn force_remove_vesting_schedule(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `588 + l * (25 ±0) + s * (36 ±0)` + // Measured: `555 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 46_462_000 picoseconds. - Weight::from_parts(46_571_504, 4764) - // Standard Error: 1_298 - .saturating_add(Weight::from_parts(42_091, 0).saturating_mul(l.into())) - // Standard Error: 2_397 - .saturating_add(Weight::from_parts(77_382, 0).saturating_mul(s.into())) + // Minimum execution time: 38_849_000 picoseconds. + Weight::from_parts(38_488_577, 4764) + // Standard Error: 1_911 + .saturating_add(Weight::from_parts(72_338, 0).saturating_mul(l.into())) + // Standard Error: 3_529 + .saturating_add(Weight::from_parts(62_206, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } diff --git a/substrate/frame/whitelist/Cargo.toml b/substrate/frame/whitelist/Cargo.toml index 68ecc5d0d78e..a347174ed2eb 100644 --- a/substrate/frame/whitelist/Cargo.toml +++ b/substrate/frame/whitelist/Cargo.toml @@ -16,10 +16,10 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { features = ["derive", "max-encoded-len"], workspace = true } +scale-info = { features = ["derive"], workspace = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } -scale-info = { features = ["derive"], workspace = true } sp-api = { workspace = true } sp-runtime = { workspace = true } diff --git a/substrate/frame/whitelist/src/weights.rs b/substrate/frame/whitelist/src/weights.rs index 12a18a8f0107..2e28d4fcf7e5 100644 --- a/substrate/frame/whitelist/src/weights.rs +++ b/substrate/frame/whitelist/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_whitelist` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-04-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -68,10 +68,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn whitelist_call() -> Weight { // Proof Size summary in bytes: - // Measured: `245` + // Measured: `317` // Estimated: `3556` - // Minimum execution time: 18_287_000 picoseconds. - Weight::from_parts(18_733_000, 3556) + // Minimum execution time: 19_521_000 picoseconds. + Weight::from_parts(20_136_000, 3556) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -83,10 +83,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn remove_whitelisted_call() -> Weight { // Proof Size summary in bytes: - // Measured: `374` + // Measured: `446` // Estimated: `3556` - // Minimum execution time: 22_887_000 picoseconds. - Weight::from_parts(23_352_000, 3556) + // Minimum execution time: 18_530_000 picoseconds. + Weight::from_parts(19_004_000, 3556) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -101,12 +101,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[1, 4194294]`. fn dispatch_whitelisted_call(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `450 + n * (1 ±0)` - // Estimated: `3914 + n * (1 ±0)` - // Minimum execution time: 33_692_000 picoseconds. - Weight::from_parts(34_105_000, 3914) - // Standard Error: 16 - .saturating_add(Weight::from_parts(1_800, 0).saturating_mul(n.into())) + // Measured: `522 + n * (1 ±0)` + // Estimated: `3986 + n * (1 ±0)` + // Minimum execution time: 29_721_000 picoseconds. + Weight::from_parts(30_140_000, 3986) + // Standard Error: 0 + .saturating_add(Weight::from_parts(1_179, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) @@ -120,12 +120,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[1, 10000]`. fn dispatch_whitelisted_call_with_preimage(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `374` + // Measured: `446` // Estimated: `3556` - // Minimum execution time: 26_380_000 picoseconds. - Weight::from_parts(27_186_471, 3556) + // Minimum execution time: 22_608_000 picoseconds. + Weight::from_parts(23_682_511, 3556) // Standard Error: 6 - .saturating_add(Weight::from_parts(1_423, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(1_420, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -141,10 +141,10 @@ impl WeightInfo for () { /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn whitelist_call() -> Weight { // Proof Size summary in bytes: - // Measured: `245` + // Measured: `317` // Estimated: `3556` - // Minimum execution time: 18_287_000 picoseconds. - Weight::from_parts(18_733_000, 3556) + // Minimum execution time: 19_521_000 picoseconds. + Weight::from_parts(20_136_000, 3556) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -156,10 +156,10 @@ impl WeightInfo for () { /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn remove_whitelisted_call() -> Weight { // Proof Size summary in bytes: - // Measured: `374` + // Measured: `446` // Estimated: `3556` - // Minimum execution time: 22_887_000 picoseconds. - Weight::from_parts(23_352_000, 3556) + // Minimum execution time: 18_530_000 picoseconds. + Weight::from_parts(19_004_000, 3556) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -174,12 +174,12 @@ impl WeightInfo for () { /// The range of component `n` is `[1, 4194294]`. fn dispatch_whitelisted_call(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `450 + n * (1 ±0)` - // Estimated: `3914 + n * (1 ±0)` - // Minimum execution time: 33_692_000 picoseconds. - Weight::from_parts(34_105_000, 3914) - // Standard Error: 16 - .saturating_add(Weight::from_parts(1_800, 0).saturating_mul(n.into())) + // Measured: `522 + n * (1 ±0)` + // Estimated: `3986 + n * (1 ±0)` + // Minimum execution time: 29_721_000 picoseconds. + Weight::from_parts(30_140_000, 3986) + // Standard Error: 0 + .saturating_add(Weight::from_parts(1_179, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) @@ -193,12 +193,12 @@ impl WeightInfo for () { /// The range of component `n` is `[1, 10000]`. fn dispatch_whitelisted_call_with_preimage(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `374` + // Measured: `446` // Estimated: `3556` - // Minimum execution time: 26_380_000 picoseconds. - Weight::from_parts(27_186_471, 3556) + // Minimum execution time: 22_608_000 picoseconds. + Weight::from_parts(23_682_511, 3556) // Standard Error: 6 - .saturating_add(Weight::from_parts(1_423, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(1_420, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } diff --git a/substrate/primitives/api/Cargo.toml b/substrate/primitives/api/Cargo.toml index 7295adbc11ca..e0a4d06b2d81 100644 --- a/substrate/primitives/api/Cargo.toml +++ b/substrate/primitives/api/Cargo.toml @@ -17,22 +17,22 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { workspace = true } -docify = { workspace = true } -hash-db = { optional = true, workspace = true, default-features = true } -log = { workspace = true } -scale-info = { features = [ - "derive", -], workspace = true } sp-api-proc-macro = { workspace = true } sp-core = { workspace = true } -sp-externalities = { optional = true, workspace = true } -sp-metadata-ir = { optional = true, workspace = true } sp-runtime = { workspace = true } sp-runtime-interface = { workspace = true } +sp-externalities = { optional = true, workspace = true } +sp-version = { workspace = true } sp-state-machine = { optional = true, workspace = true } sp-trie = { optional = true, workspace = true } -sp-version = { workspace = true } +hash-db = { optional = true, workspace = true, default-features = true } thiserror = { optional = true, workspace = true } +scale-info = { features = [ + "derive", +], workspace = true } +sp-metadata-ir = { optional = true, workspace = true } +log = { workspace = true } +docify = { workspace = true } [dev-dependencies] sp-test-primitives = { workspace = true } diff --git a/substrate/primitives/api/proc-macro/Cargo.toml b/substrate/primitives/api/proc-macro/Cargo.toml index 2f414597fb74..191578f432ad 100644 --- a/substrate/primitives/api/proc-macro/Cargo.toml +++ b/substrate/primitives/api/proc-macro/Cargo.toml @@ -19,13 +19,13 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -Inflector = { workspace = true } -blake2 = { workspace = true } -expander = { workspace = true } -proc-macro-crate = { workspace = true } -proc-macro2 = { workspace = true } quote = { workspace = true } syn = { features = ["extra-traits", "fold", "full", "visit", "visit-mut"], workspace = true } +proc-macro2 = { workspace = true } +blake2 = { workspace = true } +proc-macro-crate = { workspace = true } +expander = { workspace = true } +Inflector = { workspace = true } [dev-dependencies] assert_matches = { workspace = true } diff --git a/substrate/primitives/api/proc-macro/src/runtime_metadata.rs b/substrate/primitives/api/proc-macro/src/runtime_metadata.rs index 1706f8ca6fbb..6be396339259 100644 --- a/substrate/primitives/api/proc-macro/src/runtime_metadata.rs +++ b/substrate/primitives/api/proc-macro/src/runtime_metadata.rs @@ -298,14 +298,18 @@ pub fn generate_impl_runtime_metadata(impls: &[ItemImpl]) -> Result #crate_::vec::Vec<#crate_::metadata_ir::RuntimeApiMetadataIR> { #crate_::vec![ #( #metadata, )* ] } } + #[doc(hidden)] + impl InternalImplRuntimeApis for #runtime_name {} } )) } diff --git a/substrate/primitives/api/test/Cargo.toml b/substrate/primitives/api/test/Cargo.toml index 9b02cf125eae..1d21f23eb804 100644 --- a/substrate/primitives/api/test/Cargo.toml +++ b/substrate/primitives/api/test/Cargo.toml @@ -15,19 +15,18 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { workspace = true, default-features = true } -rustversion = { workspace = true } -sc-block-builder = { workspace = true, default-features = true } -scale-info = { features = ["derive"], workspace = true } sp-api = { workspace = true, default-features = true } -sp-consensus = { workspace = true, default-features = true } -sp-metadata-ir = { workspace = true, default-features = true } +substrate-test-runtime-client = { workspace = true } +sp-version = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sc-block-builder = { workspace = true, default-features = true } +codec = { workspace = true, default-features = true } sp-state-machine = { workspace = true, default-features = true } -sp-tracing = { workspace = true, default-features = true } -sp-version = { workspace = true, default-features = true } -substrate-test-runtime-client = { workspace = true } trybuild = { workspace = true } +rustversion = { workspace = true } +scale-info = { features = ["derive"], workspace = true } [dev-dependencies] criterion = { workspace = true, default-features = true } @@ -41,5 +40,5 @@ name = "bench" harness = false [features] -enable-staging-api = [] +"enable-staging-api" = [] disable-ui-tests = [] diff --git a/substrate/primitives/api/test/tests/decl_and_impl.rs b/substrate/primitives/api/test/tests/decl_and_impl.rs index 2e5a078cb382..890cf6eccdbc 100644 --- a/substrate/primitives/api/test/tests/decl_and_impl.rs +++ b/substrate/primitives/api/test/tests/decl_and_impl.rs @@ -309,8 +309,6 @@ fn mock_runtime_api_works_with_advanced() { #[test] fn runtime_api_metadata_matches_version_implemented() { - use sp_metadata_ir::InternalImplRuntimeApis; - let rt = Runtime {}; let runtime_metadata = rt.runtime_metadata(); diff --git a/substrate/primitives/api/test/tests/runtime_calls.rs b/substrate/primitives/api/test/tests/runtime_calls.rs index 0470b8b72aa0..5a524d1c7f4d 100644 --- a/substrate/primitives/api/test/tests/runtime_calls.rs +++ b/substrate/primitives/api/test/tests/runtime_calls.rs @@ -99,8 +99,8 @@ fn record_proof_works() { let transaction = Transfer { amount: 1000, nonce: 0, - from: Sr25519Keyring::Alice.into(), - to: Sr25519Keyring::Bob.into(), + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Bob.into(), } .into_unchecked_extrinsic(); diff --git a/substrate/primitives/application-crypto/Cargo.toml b/substrate/primitives/application-crypto/Cargo.toml index 9589cce042f5..1161d43ded5a 100644 --- a/substrate/primitives/application-crypto/Cargo.toml +++ b/substrate/primitives/application-crypto/Cargo.toml @@ -18,10 +18,10 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] +sp-core = { workspace = true } codec = { features = ["derive"], workspace = true } scale-info = { features = ["derive"], workspace = true } serde = { optional = true, features = ["alloc", "derive"], workspace = true } -sp-core = { workspace = true } sp-io = { workspace = true } [features] diff --git a/substrate/primitives/arithmetic/Cargo.toml b/substrate/primitives/arithmetic/Cargo.toml index 77b82fbe6468..485656bf30bb 100644 --- a/substrate/primitives/arithmetic/Cargo.toml +++ b/substrate/primitives/arithmetic/Cargo.toml @@ -21,18 +21,18 @@ codec = { features = [ "derive", "max-encoded-len", ], workspace = true } -docify = { workspace = true } integer-sqrt = { workspace = true } num-traits = { workspace = true } scale-info = { features = ["derive"], workspace = true } serde = { features = ["alloc", "derive"], optional = true, workspace = true } static_assertions = { workspace = true, default-features = true } +docify = { workspace = true } [dev-dependencies] criterion = { workspace = true, default-features = true } primitive-types = { workspace = true, default-features = true } -rand = { workspace = true, default-features = true } sp-crypto-hashing = { workspace = true, default-features = true } +rand = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/primitives/blockchain/Cargo.toml b/substrate/primitives/blockchain/Cargo.toml index aed09a684bda..93158274d98f 100644 --- a/substrate/primitives/blockchain/Cargo.toml +++ b/substrate/primitives/blockchain/Cargo.toml @@ -21,11 +21,11 @@ codec = { features = ["derive"], workspace = true } futures = { workspace = true } parking_lot = { workspace = true, default-features = true } schnellru = { workspace = true } +thiserror = { workspace = true } sp-api = { workspace = true, default-features = true } -sp-consensus = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } sp-database = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } sp-state-machine = { workspace = true, default-features = true } -thiserror = { workspace = true } tracing = { workspace = true, default-features = true } diff --git a/substrate/primitives/consensus/beefy/Cargo.toml b/substrate/primitives/consensus/beefy/Cargo.toml index 572e46d8de8d..13d80683c853 100644 --- a/substrate/primitives/consensus/beefy/Cargo.toml +++ b/substrate/primitives/consensus/beefy/Cargo.toml @@ -23,9 +23,9 @@ sp-application-crypto = { workspace = true } sp-core = { workspace = true } sp-crypto-hashing = { workspace = true } sp-io = { workspace = true } -sp-keystore = { workspace = true } sp-mmr-primitives = { workspace = true } sp-runtime = { workspace = true } +sp-keystore = { workspace = true } sp-weights = { workspace = true } strum = { features = ["derive"], workspace = true } diff --git a/substrate/primitives/consensus/common/Cargo.toml b/substrate/primitives/consensus/common/Cargo.toml index 3a6ffd031ec5..764ef1d97346 100644 --- a/substrate/primitives/consensus/common/Cargo.toml +++ b/substrate/primitives/consensus/common/Cargo.toml @@ -20,11 +20,11 @@ targets = ["x86_64-unknown-linux-gnu"] async-trait = { workspace = true } futures = { features = ["thread-pool"], workspace = true } log = { workspace = true, default-features = true } +thiserror = { workspace = true } sp-core = { workspace = true, default-features = true } sp-inherents = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } sp-state-machine = { workspace = true, default-features = true } -thiserror = { workspace = true } [dev-dependencies] futures = { workspace = true } diff --git a/substrate/primitives/core/Cargo.toml b/substrate/primitives/core/Cargo.toml index 0ea885abd22d..f6bc17bccaca 100644 --- a/substrate/primitives/core/Cargo.toml +++ b/substrate/primitives/core/Cargo.toml @@ -16,47 +16,47 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -bounded-collections = { workspace = true } -bs58 = { optional = true, workspace = true } codec = { features = ["derive", "max-encoded-len"], workspace = true } -hash-db = { workspace = true } -hash256-std-hasher = { workspace = true } -impl-serde = { optional = true, workspace = true } +scale-info = { features = ["derive"], workspace = true } log = { workspace = true } +serde = { optional = true, features = ["alloc", "derive"], workspace = true } +bounded-collections = { workspace = true } primitive-types = { features = ["codec", "scale-info"], workspace = true } +impl-serde = { optional = true, workspace = true } +hash-db = { workspace = true } +hash256-std-hasher = { workspace = true } +bs58 = { optional = true, workspace = true } rand = { features = [ "small_rng", ], optional = true, workspace = true, default-features = true } -scale-info = { features = ["derive"], workspace = true } -serde = { optional = true, features = ["alloc", "derive"], workspace = true } substrate-bip39 = { workspace = true } # personal fork here as workaround for: https://github.com/rust-bitcoin/rust-bip39/pull/64 bip39 = { package = "parity-bip39", version = "2.0.1", default-features = false, features = [ "alloc", ] } -bitflags = { workspace = true } -dyn-clonable = { optional = true, workspace = true } -futures = { optional = true, workspace = true } -itertools = { optional = true, workspace = true } -parking_lot = { optional = true, workspace = true, default-features = true } -paste = { workspace = true, default-features = true } +zeroize = { workspace = true } secrecy = { features = ["alloc"], workspace = true } -sp-debug-derive = { workspace = true } -sp-externalities = { optional = true, workspace = true } +parking_lot = { optional = true, workspace = true, default-features = true } +ss58-registry = { workspace = true } sp-std = { workspace = true } +sp-debug-derive = { workspace = true } sp-storage = { workspace = true } -ss58-registry = { workspace = true } +sp-externalities = { optional = true, workspace = true } +futures = { optional = true, workspace = true } +dyn-clonable = { optional = true, workspace = true } thiserror = { optional = true, workspace = true } tracing = { optional = true, workspace = true, default-features = true } -zeroize = { workspace = true } +bitflags = { workspace = true } +paste = { workspace = true, default-features = true } +itertools = { optional = true, workspace = true } # full crypto array-bytes = { workspace = true, default-features = true } -blake2 = { optional = true, workspace = true } ed25519-zebra = { workspace = true } +blake2 = { optional = true, workspace = true } libsecp256k1 = { features = ["static-context"], workspace = true } -merlin = { workspace = true } schnorrkel = { features = ["preaudit_deprecated"], workspace = true } +merlin = { workspace = true } sp-crypto-hashing = { workspace = true } sp-runtime-interface = { workspace = true } # k256 crate, better portability, intended to be used in substrate-runtimes (no-std) @@ -76,8 +76,8 @@ bandersnatch_vrfs = { git = "https://github.com/w3f/ring-vrf", rev = "0fef826", [dev-dependencies] criterion = { workspace = true, default-features = true } -regex = { workspace = true } serde_json = { workspace = true, default-features = true } +regex = { workspace = true } [[bench]] name = "bench" diff --git a/substrate/primitives/core/src/lib.rs b/substrate/primitives/core/src/lib.rs index 454f61df7941..bb05bebc6274 100644 --- a/substrate/primitives/core/src/lib.rs +++ b/substrate/primitives/core/src/lib.rs @@ -101,9 +101,8 @@ pub use bounded_collections as bounded; #[cfg(feature = "std")] pub use bounded_collections::{bounded_btree_map, bounded_vec}; pub use bounded_collections::{ - parameter_types, ConstBool, ConstI128, ConstI16, ConstI32, ConstI64, ConstI8, ConstInt, - ConstU128, ConstU16, ConstU32, ConstU64, ConstU8, ConstUint, Get, GetDefault, TryCollect, - TypedGet, + parameter_types, ConstBool, ConstI128, ConstI16, ConstI32, ConstI64, ConstI8, ConstU128, + ConstU16, ConstU32, ConstU64, ConstU8, Get, GetDefault, TryCollect, TypedGet, }; pub use sp_storage as storage; diff --git a/substrate/primitives/crypto/ec-utils/Cargo.toml b/substrate/primitives/crypto/ec-utils/Cargo.toml index 1e5964f85575..29e30133ebea 100644 --- a/substrate/primitives/crypto/ec-utils/Cargo.toml +++ b/substrate/primitives/crypto/ec-utils/Cargo.toml @@ -15,17 +15,17 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -ark-bls12-377 = { features = ["curve"], optional = true, workspace = true } +ark-ec = { optional = true, workspace = true } ark-bls12-377-ext = { optional = true, workspace = true } -ark-bls12-381 = { features = ["curve"], optional = true, workspace = true } +ark-bls12-377 = { features = ["curve"], optional = true, workspace = true } ark-bls12-381-ext = { optional = true, workspace = true } -ark-bw6-761 = { optional = true, workspace = true } +ark-bls12-381 = { features = ["curve"], optional = true, workspace = true } ark-bw6-761-ext = { optional = true, workspace = true } -ark-ec = { optional = true, workspace = true } -ark-ed-on-bls12-377 = { optional = true, workspace = true } -ark-ed-on-bls12-377-ext = { optional = true, workspace = true } -ark-ed-on-bls12-381-bandersnatch = { optional = true, workspace = true } +ark-bw6-761 = { optional = true, workspace = true } ark-ed-on-bls12-381-bandersnatch-ext = { optional = true, workspace = true } +ark-ed-on-bls12-381-bandersnatch = { optional = true, workspace = true } +ark-ed-on-bls12-377-ext = { optional = true, workspace = true } +ark-ed-on-bls12-377 = { optional = true, workspace = true } ark-scale = { features = ["hazmat"], optional = true, workspace = true } sp-runtime-interface = { optional = true, workspace = true } diff --git a/substrate/primitives/crypto/hashing/proc-macro/Cargo.toml b/substrate/primitives/crypto/hashing/proc-macro/Cargo.toml index e09661d41c11..6f974a3e2c8a 100644 --- a/substrate/primitives/crypto/hashing/proc-macro/Cargo.toml +++ b/substrate/primitives/crypto/hashing/proc-macro/Cargo.toml @@ -20,5 +20,5 @@ proc-macro = true [dependencies] quote = { workspace = true } -sp-crypto-hashing = { workspace = true } syn = { features = ["full", "parsing"], workspace = true } +sp-crypto-hashing = { workspace = true } diff --git a/substrate/primitives/debug-derive/Cargo.toml b/substrate/primitives/debug-derive/Cargo.toml index a26cbbf62ada..4979b89155ab 100644 --- a/substrate/primitives/debug-derive/Cargo.toml +++ b/substrate/primitives/debug-derive/Cargo.toml @@ -19,9 +19,9 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -proc-macro2 = { workspace = true } quote = { workspace = true } syn = { workspace = true } +proc-macro2 = { workspace = true } [features] default = ["std"] diff --git a/substrate/primitives/genesis-builder/Cargo.toml b/substrate/primitives/genesis-builder/Cargo.toml index f1fa60d023be..285b214907ad 100644 --- a/substrate/primitives/genesis-builder/Cargo.toml +++ b/substrate/primitives/genesis-builder/Cargo.toml @@ -19,9 +19,9 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { features = ["bytes"], workspace = true } scale-info = { features = ["derive"], workspace = true } -serde_json = { features = ["alloc", "arbitrary_precision"], workspace = true } sp-api = { workspace = true } sp-runtime = { workspace = true } +serde_json = { features = ["alloc", "arbitrary_precision"], workspace = true } [features] default = ["std"] diff --git a/substrate/primitives/inherents/Cargo.toml b/substrate/primitives/inherents/Cargo.toml index 19966919047f..271308c9cbf1 100644 --- a/substrate/primitives/inherents/Cargo.toml +++ b/substrate/primitives/inherents/Cargo.toml @@ -19,10 +19,10 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] async-trait = { optional = true, workspace = true } codec = { features = ["derive"], workspace = true } -impl-trait-for-tuples = { workspace = true } scale-info = { features = ["derive"], workspace = true } -sp-runtime = { optional = true, workspace = true } +impl-trait-for-tuples = { workspace = true } thiserror = { optional = true, workspace = true } +sp-runtime = { optional = true, workspace = true } [dev-dependencies] futures = { workspace = true } diff --git a/substrate/primitives/io/Cargo.toml b/substrate/primitives/io/Cargo.toml index b0c99002910b..97940759a987 100644 --- a/substrate/primitives/io/Cargo.toml +++ b/substrate/primitives/io/Cargo.toml @@ -22,20 +22,20 @@ bytes = { workspace = true } codec = { features = [ "bytes", ], workspace = true } +sp-core = { workspace = true } +sp-crypto-hashing = { workspace = true } +sp-keystore = { optional = true, workspace = true } libsecp256k1 = { optional = true, workspace = true, default-features = true } +sp-state-machine = { optional = true, workspace = true } +sp-runtime-interface = { workspace = true } +sp-trie = { optional = true, workspace = true } +sp-externalities = { workspace = true } +sp-tracing = { workspace = true } log = { optional = true, workspace = true, default-features = true } secp256k1 = { features = [ "global-context", "recovery", ], optional = true, workspace = true, default-features = true } -sp-core = { workspace = true } -sp-crypto-hashing = { workspace = true } -sp-externalities = { workspace = true } -sp-keystore = { optional = true, workspace = true } -sp-runtime-interface = { workspace = true } -sp-state-machine = { optional = true, workspace = true } -sp-tracing = { workspace = true } -sp-trie = { optional = true, workspace = true } tracing = { workspace = true } tracing-core = { workspace = true } diff --git a/substrate/primitives/keyring/Cargo.toml b/substrate/primitives/keyring/Cargo.toml index 9ffcf50c7b45..27f7304a9358 100644 --- a/substrate/primitives/keyring/Cargo.toml +++ b/substrate/primitives/keyring/Cargo.toml @@ -17,9 +17,9 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] +strum = { features = ["derive"], workspace = true } sp-core = { workspace = true } sp-runtime = { workspace = true } -strum = { features = ["derive"], workspace = true } [features] default = ["std"] diff --git a/substrate/primitives/keyring/src/lib.rs b/substrate/primitives/keyring/src/lib.rs index 36e77dabd601..008c01b150f0 100644 --- a/substrate/primitives/keyring/src/lib.rs +++ b/substrate/primitives/keyring/src/lib.rs @@ -32,11 +32,20 @@ pub mod ed25519; #[cfg(feature = "bandersnatch-experimental")] pub mod bandersnatch; +/// Convenience export: Sr25519's Keyring is exposed as `AccountKeyring`, since it tends to be +/// used for accounts (although it may also be used by authorities). +pub use sr25519::Keyring as AccountKeyring; + #[cfg(feature = "bandersnatch-experimental")] pub use bandersnatch::Keyring as BandersnatchKeyring; pub use ed25519::Keyring as Ed25519Keyring; pub use sr25519::Keyring as Sr25519Keyring; +pub mod test { + /// The keyring for use with accounts when using the test runtime. + pub use super::ed25519::Keyring as AccountKeyring; +} + #[derive(Debug)] /// Represents an error that occurs when parsing a string into a `KeyRing`. pub struct ParseKeyringError; diff --git a/substrate/primitives/merkle-mountain-range/Cargo.toml b/substrate/primitives/merkle-mountain-range/Cargo.toml index 5f861ca7acf1..6f944a3f6a8d 100644 --- a/substrate/primitives/merkle-mountain-range/Cargo.toml +++ b/substrate/primitives/merkle-mountain-range/Cargo.toml @@ -16,9 +16,9 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { workspace = true } +scale-info = { features = ["derive"], workspace = true } log = { workspace = true } mmr-lib = { package = "polkadot-ckb-merkle-mountain-range", version = "0.7.0", default-features = false } -scale-info = { features = ["derive"], workspace = true } serde = { features = ["alloc", "derive"], optional = true, workspace = true } sp-api = { workspace = true } sp-core = { workspace = true } diff --git a/substrate/primitives/metadata-ir/Cargo.toml b/substrate/primitives/metadata-ir/Cargo.toml index 046441104b88..d7786347dd02 100644 --- a/substrate/primitives/metadata-ir/Cargo.toml +++ b/substrate/primitives/metadata-ir/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { workspace = true } -frame-metadata = { features = ["current", "unstable"], workspace = true } +frame-metadata = { features = ["current"], workspace = true } scale-info = { features = ["derive"], workspace = true } [features] diff --git a/substrate/primitives/metadata-ir/src/lib.rs b/substrate/primitives/metadata-ir/src/lib.rs index dc01f7eaadb3..4bd13b935afd 100644 --- a/substrate/primitives/metadata-ir/src/lib.rs +++ b/substrate/primitives/metadata-ir/src/lib.rs @@ -30,7 +30,6 @@ mod types; use frame_metadata::RuntimeMetadataPrefixed; pub use types::*; -mod unstable; mod v14; mod v15; @@ -40,33 +39,23 @@ const V14: u32 = 14; /// Metadata V15. const V15: u32 = 15; -/// Unstable metadata V16. -const UNSTABLE_V16: u32 = u32::MAX; - /// Transform the IR to the specified version. /// /// Use [`supported_versions`] to find supported versions. pub fn into_version(metadata: MetadataIR, version: u32) -> Option { // Note: Unstable metadata version is `u32::MAX` until stabilized. match version { - // Version V14. This needs to be around until the - // deprecation of the `Metadata_metadata` runtime call in favor of - // `Metadata_metadata_at_version. + // Latest stable version. V14 => Some(into_v14(metadata)), - - // Version V15 - latest stable. + // Unstable metadata. V15 => Some(into_latest(metadata)), - - // Unstable metadata under `u32::MAX`. - UNSTABLE_V16 => Some(into_unstable(metadata)), - _ => None, } } /// Returns the supported metadata versions. pub fn supported_versions() -> alloc::vec::Vec { - alloc::vec![V14, V15, UNSTABLE_V16] + alloc::vec![V14, V15] } /// Transform the IR to the latest stable metadata version. @@ -81,22 +70,6 @@ pub fn into_v14(metadata: MetadataIR) -> RuntimeMetadataPrefixed { latest.into() } -/// Transform the IR to unstable metadata version 16. -pub fn into_unstable(metadata: MetadataIR) -> RuntimeMetadataPrefixed { - let latest: frame_metadata::v16::RuntimeMetadataV16 = metadata.into(); - latest.into() -} - -/// INTERNAL USE ONLY -/// -/// Special trait that is used together with `InternalConstructRuntime` by `construct_runtime!` to -/// fetch the runtime api metadata without exploding when there is no runtime api implementation -/// available. -#[doc(hidden)] -pub trait InternalImplRuntimeApis { - fn runtime_metadata(&self) -> alloc::vec::Vec; -} - #[cfg(test)] mod test { use super::*; @@ -108,7 +81,7 @@ mod test { pallets: vec![], extrinsic: ExtrinsicMetadataIR { ty: meta_type::<()>(), - versions: vec![0], + version: 0, address_ty: meta_type::<()>(), call_ty: meta_type::<()>(), signature_ty: meta_type::<()>(), diff --git a/substrate/primitives/metadata-ir/src/types.rs b/substrate/primitives/metadata-ir/src/types.rs index af217ffe16ee..199b692fbd8c 100644 --- a/substrate/primitives/metadata-ir/src/types.rs +++ b/substrate/primitives/metadata-ir/src/types.rs @@ -170,8 +170,8 @@ pub struct ExtrinsicMetadataIR { /// /// Note: Field used for metadata V14 only. pub ty: T::Type, - /// Extrinsic versions. - pub versions: Vec, + /// Extrinsic version. + pub version: u8, /// The type of the address that signs the extrinsic pub address_ty: T::Type, /// The type of the outermost Call enum. @@ -191,7 +191,7 @@ impl IntoPortable for ExtrinsicMetadataIR { fn into_portable(self, registry: &mut Registry) -> Self::Output { ExtrinsicMetadataIR { ty: registry.register_type(&self.ty), - versions: self.versions, + version: self.version, address_ty: registry.register_type(&self.address_ty), call_ty: registry.register_type(&self.call_ty), signature_ty: registry.register_type(&self.signature_ty), diff --git a/substrate/primitives/metadata-ir/src/unstable.rs b/substrate/primitives/metadata-ir/src/unstable.rs deleted file mode 100644 index d46ce3ec6a7d..000000000000 --- a/substrate/primitives/metadata-ir/src/unstable.rs +++ /dev/null @@ -1,211 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Convert the IR to V16 metadata. - -use crate::{ - DeprecationInfoIR, DeprecationStatusIR, OuterEnumsIR, PalletAssociatedTypeMetadataIR, - PalletCallMetadataIR, PalletConstantMetadataIR, PalletErrorMetadataIR, PalletEventMetadataIR, - PalletStorageMetadataIR, StorageEntryMetadataIR, -}; - -use super::types::{ - ExtrinsicMetadataIR, MetadataIR, PalletMetadataIR, RuntimeApiMetadataIR, - RuntimeApiMethodMetadataIR, RuntimeApiMethodParamMetadataIR, TransactionExtensionMetadataIR, -}; - -use frame_metadata::v16::{ - CustomMetadata, DeprecationInfo, DeprecationStatus, ExtrinsicMetadata, OuterEnums, - PalletAssociatedTypeMetadata, PalletCallMetadata, PalletConstantMetadata, PalletErrorMetadata, - PalletEventMetadata, PalletMetadata, PalletStorageMetadata, RuntimeApiMetadata, - RuntimeApiMethodMetadata, RuntimeApiMethodParamMetadata, RuntimeMetadataV16, - StorageEntryMetadata, TransactionExtensionMetadata, -}; - -impl From for RuntimeMetadataV16 { - fn from(ir: MetadataIR) -> Self { - RuntimeMetadataV16::new( - ir.pallets.into_iter().map(Into::into).collect(), - ir.extrinsic.into(), - ir.apis.into_iter().map(Into::into).collect(), - ir.outer_enums.into(), - // Substrate does not collect yet the custom metadata fields. - // This allows us to extend the V16 easily. - CustomMetadata { map: Default::default() }, - ) - } -} - -impl From for RuntimeApiMetadata { - fn from(ir: RuntimeApiMetadataIR) -> Self { - RuntimeApiMetadata { - name: ir.name, - methods: ir.methods.into_iter().map(Into::into).collect(), - docs: ir.docs, - deprecation_info: ir.deprecation_info.into(), - } - } -} - -impl From for RuntimeApiMethodMetadata { - fn from(ir: RuntimeApiMethodMetadataIR) -> Self { - RuntimeApiMethodMetadata { - name: ir.name, - inputs: ir.inputs.into_iter().map(Into::into).collect(), - output: ir.output, - docs: ir.docs, - deprecation_info: ir.deprecation_info.into(), - } - } -} - -impl From for RuntimeApiMethodParamMetadata { - fn from(ir: RuntimeApiMethodParamMetadataIR) -> Self { - RuntimeApiMethodParamMetadata { name: ir.name, ty: ir.ty } - } -} - -impl From for PalletMetadata { - fn from(ir: PalletMetadataIR) -> Self { - PalletMetadata { - name: ir.name, - storage: ir.storage.map(Into::into), - calls: ir.calls.map(Into::into), - event: ir.event.map(Into::into), - constants: ir.constants.into_iter().map(Into::into).collect(), - error: ir.error.map(Into::into), - index: ir.index, - docs: ir.docs, - associated_types: ir.associated_types.into_iter().map(Into::into).collect(), - deprecation_info: ir.deprecation_info.into(), - } - } -} - -impl From for PalletStorageMetadata { - fn from(ir: PalletStorageMetadataIR) -> Self { - PalletStorageMetadata { - prefix: ir.prefix, - entries: ir.entries.into_iter().map(Into::into).collect(), - } - } -} - -impl From for StorageEntryMetadata { - fn from(ir: StorageEntryMetadataIR) -> Self { - StorageEntryMetadata { - name: ir.name, - modifier: ir.modifier.into(), - ty: ir.ty.into(), - default: ir.default, - docs: ir.docs, - deprecation_info: ir.deprecation_info.into(), - } - } -} - -impl From for PalletAssociatedTypeMetadata { - fn from(ir: PalletAssociatedTypeMetadataIR) -> Self { - PalletAssociatedTypeMetadata { name: ir.name, ty: ir.ty, docs: ir.docs } - } -} - -impl From for PalletErrorMetadata { - fn from(ir: PalletErrorMetadataIR) -> Self { - PalletErrorMetadata { ty: ir.ty, deprecation_info: ir.deprecation_info.into() } - } -} - -impl From for PalletEventMetadata { - fn from(ir: PalletEventMetadataIR) -> Self { - PalletEventMetadata { ty: ir.ty, deprecation_info: ir.deprecation_info.into() } - } -} - -impl From for PalletCallMetadata { - fn from(ir: PalletCallMetadataIR) -> Self { - PalletCallMetadata { ty: ir.ty, deprecation_info: ir.deprecation_info.into() } - } -} - -impl From for PalletConstantMetadata { - fn from(ir: PalletConstantMetadataIR) -> Self { - PalletConstantMetadata { - name: ir.name, - ty: ir.ty, - value: ir.value, - docs: ir.docs, - deprecation_info: ir.deprecation_info.into(), - } - } -} - -impl From for TransactionExtensionMetadata { - fn from(ir: TransactionExtensionMetadataIR) -> Self { - TransactionExtensionMetadata { identifier: ir.identifier, ty: ir.ty, implicit: ir.implicit } - } -} - -impl From for ExtrinsicMetadata { - fn from(ir: ExtrinsicMetadataIR) -> Self { - // Assume version 0 for all extensions. - let indexes = (0..ir.extensions.len()).map(|index| index as u32).collect(); - let transaction_extensions_by_version = [(0, indexes)].iter().cloned().collect(); - - ExtrinsicMetadata { - versions: ir.versions, - address_ty: ir.address_ty, - signature_ty: ir.signature_ty, - transaction_extensions_by_version, - transaction_extensions: ir.extensions.into_iter().map(Into::into).collect(), - } - } -} - -impl From for OuterEnums { - fn from(ir: OuterEnumsIR) -> Self { - OuterEnums { - call_enum_ty: ir.call_enum_ty, - event_enum_ty: ir.event_enum_ty, - error_enum_ty: ir.error_enum_ty, - } - } -} - -impl From for DeprecationStatus { - fn from(ir: DeprecationStatusIR) -> Self { - match ir { - DeprecationStatusIR::NotDeprecated => DeprecationStatus::NotDeprecated, - DeprecationStatusIR::DeprecatedWithoutNote => DeprecationStatus::DeprecatedWithoutNote, - DeprecationStatusIR::Deprecated { since, note } => - DeprecationStatus::Deprecated { since, note }, - } - } -} - -impl From for DeprecationInfo { - fn from(ir: DeprecationInfoIR) -> Self { - match ir { - DeprecationInfoIR::NotDeprecated => DeprecationInfo::NotDeprecated, - DeprecationInfoIR::ItemDeprecated(status) => - DeprecationInfo::ItemDeprecated(status.into()), - DeprecationInfoIR::VariantsDeprecated(btree) => DeprecationInfo::VariantsDeprecated( - btree.into_iter().map(|(key, value)| (key.0, value.into())).collect(), - ), - } - } -} diff --git a/substrate/primitives/metadata-ir/src/v14.rs b/substrate/primitives/metadata-ir/src/v14.rs index f3cb5973f5bd..70e84532add9 100644 --- a/substrate/primitives/metadata-ir/src/v14.rs +++ b/substrate/primitives/metadata-ir/src/v14.rs @@ -149,12 +149,9 @@ impl From for SignedExtensionMetadata { impl From for ExtrinsicMetadata { fn from(ir: ExtrinsicMetadataIR) -> Self { - let lowest_supported_version = - ir.versions.iter().min().expect("Metadata V14 supports one version; qed"); - ExtrinsicMetadata { ty: ir.ty, - version: *lowest_supported_version, + version: ir.version, signed_extensions: ir.extensions.into_iter().map(Into::into).collect(), } } diff --git a/substrate/primitives/metadata-ir/src/v15.rs b/substrate/primitives/metadata-ir/src/v15.rs index ed315a31e6dc..4b3b6106d27f 100644 --- a/substrate/primitives/metadata-ir/src/v15.rs +++ b/substrate/primitives/metadata-ir/src/v15.rs @@ -100,7 +100,7 @@ impl From for SignedExtensionMetadata { impl From for ExtrinsicMetadata { fn from(ir: ExtrinsicMetadataIR) -> Self { ExtrinsicMetadata { - version: *ir.versions.iter().min().expect("Metadata V15 supports only one version"), + version: ir.version, address_ty: ir.address_ty, call_ty: ir.call_ty, signature_ty: ir.signature_ty, diff --git a/substrate/primitives/panic-handler/src/lib.rs b/substrate/primitives/panic-handler/src/lib.rs index 81ccaaee828e..c4a7eb8dc67c 100644 --- a/substrate/primitives/panic-handler/src/lib.rs +++ b/substrate/primitives/panic-handler/src/lib.rs @@ -30,7 +30,7 @@ use std::{ cell::Cell, io::{self, Write}, marker::PhantomData, - panic::{self, PanicHookInfo}, + panic::{self, PanicInfo}, sync::LazyLock, thread, }; @@ -149,7 +149,7 @@ fn strip_control_codes(input: &str) -> std::borrow::Cow { } /// Function being called when a panic happens. -fn panic_hook(info: &PanicHookInfo, report_url: &str, version: &str) { +fn panic_hook(info: &PanicInfo, report_url: &str, version: &str) { let location = info.location(); let file = location.as_ref().map(|l| l.file()).unwrap_or(""); let line = location.as_ref().map(|l| l.line()).unwrap_or(0); diff --git a/substrate/primitives/runtime-interface/Cargo.toml b/substrate/primitives/runtime-interface/Cargo.toml index 2d82838ca0b3..ee44d90fa959 100644 --- a/substrate/primitives/runtime-interface/Cargo.toml +++ b/substrate/primitives/runtime-interface/Cargo.toml @@ -18,26 +18,26 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] bytes = { workspace = true } -codec = { features = ["bytes"], workspace = true } -impl-trait-for-tuples = { workspace = true } -primitive-types = { workspace = true } -sp-externalities = { workspace = true } -sp-runtime-interface-proc-macro = { workspace = true, default-features = true } +sp-wasm-interface = { workspace = true } sp-std = { workspace = true } -sp-storage = { workspace = true } sp-tracing = { workspace = true } -sp-wasm-interface = { workspace = true } +sp-runtime-interface-proc-macro = { workspace = true, default-features = true } +sp-externalities = { workspace = true } +codec = { features = ["bytes"], workspace = true } static_assertions = { workspace = true, default-features = true } +primitive-types = { workspace = true } +sp-storage = { workspace = true } +impl-trait-for-tuples = { workspace = true } [target.'cfg(all(any(target_arch = "riscv32", target_arch = "riscv64"), substrate_runtime))'.dependencies] polkavm-derive = { workspace = true } [dev-dependencies] -rustversion = { workspace = true } -sp-core = { workspace = true, default-features = true } -sp-io = { workspace = true, default-features = true } sp-runtime-interface-test-wasm = { workspace = true } sp-state-machine = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } +rustversion = { workspace = true } trybuild = { workspace = true } [features] diff --git a/substrate/primitives/runtime-interface/proc-macro/Cargo.toml b/substrate/primitives/runtime-interface/proc-macro/Cargo.toml index 2112d5bc0693..3fd5f073f025 100644 --- a/substrate/primitives/runtime-interface/proc-macro/Cargo.toml +++ b/substrate/primitives/runtime-interface/proc-macro/Cargo.toml @@ -20,8 +20,8 @@ proc-macro = true [dependencies] Inflector = { workspace = true } -expander = { workspace = true } proc-macro-crate = { workspace = true } proc-macro2 = { workspace = true } quote = { workspace = true } +expander = { workspace = true } syn = { features = ["extra-traits", "fold", "full", "visit"], workspace = true } diff --git a/substrate/primitives/runtime-interface/test/Cargo.toml b/substrate/primitives/runtime-interface/test/Cargo.toml index ebcf4222bda3..29ef0f6b4892 100644 --- a/substrate/primitives/runtime-interface/test/Cargo.toml +++ b/substrate/primitives/runtime-interface/test/Cargo.toml @@ -15,6 +15,8 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] +tracing = { workspace = true, default-features = true } +tracing-core = { workspace = true, default-features = true } sc-executor = { workspace = true, default-features = true } sc-executor-common = { workspace = true, default-features = true } sp-io = { workspace = true, default-features = true } @@ -23,5 +25,3 @@ sp-runtime-interface = { workspace = true, default-features = true } sp-runtime-interface-test-wasm = { workspace = true } sp-runtime-interface-test-wasm-deprecated = { workspace = true } sp-state-machine = { workspace = true, default-features = true } -tracing = { workspace = true, default-features = true } -tracing-core = { workspace = true, default-features = true } diff --git a/substrate/primitives/runtime/Cargo.toml b/substrate/primitives/runtime/Cargo.toml index 89c221d574fc..8a812c3a5772 100644 --- a/substrate/primitives/runtime/Cargo.toml +++ b/substrate/primitives/runtime/Cargo.toml @@ -17,9 +17,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -binary-merkle-tree = { workspace = true } codec = { features = ["derive", "max-encoded-len"], workspace = true } -docify = { workspace = true } either = { workspace = true } hash256-std-hasher = { workspace = true } impl-trait-for-tuples = { workspace = true } @@ -36,7 +34,9 @@ sp-io = { workspace = true } sp-std = { workspace = true } sp-trie = { workspace = true } sp-weights = { workspace = true } +docify = { workspace = true } tracing = { workspace = true, features = ["log"], default-features = false } +binary-merkle-tree = { workspace = true } simple-mermaid = { version = "0.1.1", optional = true } tuplex = { version = "0.1.2", default-features = false } @@ -44,11 +44,11 @@ tuplex = { version = "0.1.2", default-features = false } [dev-dependencies] rand = { workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } +zstd = { workspace = true } sp-api = { workspace = true, default-features = true } sp-state-machine = { workspace = true, default-features = true } sp-tracing = { workspace = true, default-features = true } substrate-test-runtime-client = { workspace = true } -zstd = { workspace = true } [features] runtime-benchmarks = [] diff --git a/substrate/primitives/runtime/src/generic/checked_extrinsic.rs b/substrate/primitives/runtime/src/generic/checked_extrinsic.rs index 1842b1631621..e2ecd5ed6da7 100644 --- a/substrate/primitives/runtime/src/generic/checked_extrinsic.rs +++ b/substrate/primitives/runtime/src/generic/checked_extrinsic.rs @@ -30,12 +30,6 @@ use crate::{ transaction_validity::{TransactionSource, TransactionValidity}, }; -use super::unchecked_extrinsic::ExtensionVersion; - -/// Default version of the [Extension](TransactionExtension) used to construct the inherited -/// implication for legacy transactions. -const DEFAULT_EXTENSION_VERSION: ExtensionVersion = 0; - /// The kind of extrinsic this is, including any fields required of that kind. This is basically /// the full extrinsic except the `Call`. #[derive(PartialEq, Eq, Clone, sp_core::RuntimeDebug)] @@ -48,7 +42,7 @@ pub enum ExtrinsicFormat { Signed(AccountId, Extension), /// Extrinsic has a default `Origin` of `None` and must pass all `TransactionExtension`s. /// regular checks and includes all extension data. - General(ExtensionVersion, Extension), + General(Extension), } /// Definition of something that the external world might want to say; its existence implies that it @@ -91,20 +85,10 @@ where }, ExtrinsicFormat::Signed(ref signer, ref extension) => { let origin = Some(signer.clone()).into(); - extension - .validate_only( - origin, - &self.function, - info, - len, - source, - DEFAULT_EXTENSION_VERSION, - ) - .map(|x| x.0) + extension.validate_only(origin, &self.function, info, len).map(|x| x.0) }, - ExtrinsicFormat::General(extension_version, ref extension) => extension - .validate_only(None.into(), &self.function, info, len, source, extension_version) - .map(|x| x.0), + ExtrinsicFormat::General(ref extension) => + extension.validate_only(None.into(), &self.function, info, len).map(|x| x.0), } } @@ -127,15 +111,10 @@ where Extension::bare_post_dispatch(info, &mut post_info, len, &pd_res)?; Ok(res) }, - ExtrinsicFormat::Signed(signer, extension) => extension.dispatch_transaction( - Some(signer).into(), - self.function, - info, - len, - DEFAULT_EXTENSION_VERSION, - ), - ExtrinsicFormat::General(extension_version, extension) => extension - .dispatch_transaction(None.into(), self.function, info, len, extension_version), + ExtrinsicFormat::Signed(signer, extension) => + extension.dispatch_transaction(Some(signer).into(), self.function, info, len), + ExtrinsicFormat::General(extension) => + extension.dispatch_transaction(None.into(), self.function, info, len), } } } @@ -148,7 +127,7 @@ impl> pub fn extension_weight(&self) -> Weight { match &self.format { ExtrinsicFormat::Bare => Weight::zero(), - ExtrinsicFormat::Signed(_, ext) | ExtrinsicFormat::General(_, ext) => + ExtrinsicFormat::Signed(_, ext) | ExtrinsicFormat::General(ext) => ext.weight(&self.function), } } diff --git a/substrate/primitives/runtime/src/generic/digest.rs b/substrate/primitives/runtime/src/generic/digest.rs index 5ed0c7075cae..c639576a2867 100644 --- a/substrate/primitives/runtime/src/generic/digest.rs +++ b/substrate/primitives/runtime/src/generic/digest.rs @@ -20,7 +20,6 @@ #[cfg(all(not(feature = "std"), feature = "serde"))] use alloc::format; use alloc::vec::Vec; -use codec::DecodeAll; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; @@ -257,7 +256,8 @@ impl DigestItem { self.dref().try_as_raw(id) } - /// Returns the data decoded as `T`, if the `id` is matching. + /// Returns the data contained in the item if `Some` if this entry has the id given, decoded + /// to the type provided `T`. pub fn try_to(&self, id: OpaqueDigestItemId) -> Option { self.dref().try_to::(id) } @@ -367,16 +367,17 @@ impl<'a> DigestItemRef<'a> { /// Try to match this digest item to the given opaque item identifier; if it matches, then /// try to cast to the given data type; if that works, return it. pub fn try_to(&self, id: OpaqueDigestItemId) -> Option { - self.try_as_raw(id).and_then(|mut x| DecodeAll::decode_all(&mut x).ok()) + self.try_as_raw(id).and_then(|mut x| Decode::decode(&mut x).ok()) } /// Try to match this to a `Self::Seal`, check `id` matches and decode it. /// /// Returns `None` if this isn't a seal item, the `id` doesn't match or when the decoding fails. pub fn seal_try_to(&self, id: &ConsensusEngineId) -> Option { - self.as_seal() - .filter(|s| s.0 == *id) - .and_then(|mut d| DecodeAll::decode_all(&mut d.1).ok()) + match self { + Self::Seal(v, s) if *v == id => Decode::decode(&mut &s[..]).ok(), + _ => None, + } } /// Try to match this to a `Self::Consensus`, check `id` matches and decode it. @@ -384,9 +385,10 @@ impl<'a> DigestItemRef<'a> { /// Returns `None` if this isn't a consensus item, the `id` doesn't match or /// when the decoding fails. pub fn consensus_try_to(&self, id: &ConsensusEngineId) -> Option { - self.as_consensus() - .filter(|s| s.0 == *id) - .and_then(|mut d| DecodeAll::decode_all(&mut d.1).ok()) + match self { + Self::Consensus(v, s) if *v == id => Decode::decode(&mut &s[..]).ok(), + _ => None, + } } /// Try to match this to a `Self::PreRuntime`, check `id` matches and decode it. @@ -394,21 +396,40 @@ impl<'a> DigestItemRef<'a> { /// Returns `None` if this isn't a pre-runtime item, the `id` doesn't match or /// when the decoding fails. pub fn pre_runtime_try_to(&self, id: &ConsensusEngineId) -> Option { - self.as_pre_runtime() - .filter(|s| s.0 == *id) - .and_then(|mut d| DecodeAll::decode_all(&mut d.1).ok()) + match self { + Self::PreRuntime(v, s) if *v == id => Decode::decode(&mut &s[..]).ok(), + _ => None, + } } } impl<'a> Encode for DigestItemRef<'a> { fn encode(&self) -> Vec { + let mut v = Vec::new(); + match *self { - Self::Consensus(val, data) => (DigestItemType::Consensus, val, data).encode(), - Self::Seal(val, sig) => (DigestItemType::Seal, val, sig).encode(), - Self::PreRuntime(val, data) => (DigestItemType::PreRuntime, val, data).encode(), - Self::Other(val) => (DigestItemType::Other, val).encode(), - Self::RuntimeEnvironmentUpdated => DigestItemType::RuntimeEnvironmentUpdated.encode(), + Self::Consensus(val, data) => { + DigestItemType::Consensus.encode_to(&mut v); + (val, data).encode_to(&mut v); + }, + Self::Seal(val, sig) => { + DigestItemType::Seal.encode_to(&mut v); + (val, sig).encode_to(&mut v); + }, + Self::PreRuntime(val, data) => { + DigestItemType::PreRuntime.encode_to(&mut v); + (val, data).encode_to(&mut v); + }, + Self::Other(val) => { + DigestItemType::Other.encode_to(&mut v); + val.encode_to(&mut v); + }, + Self::RuntimeEnvironmentUpdated => { + DigestItemType::RuntimeEnvironmentUpdated.encode_to(&mut v); + }, } + + v } } diff --git a/substrate/primitives/runtime/src/generic/mod.rs b/substrate/primitives/runtime/src/generic/mod.rs index f79058e270ed..007dee2684b0 100644 --- a/substrate/primitives/runtime/src/generic/mod.rs +++ b/substrate/primitives/runtime/src/generic/mod.rs @@ -33,8 +33,6 @@ pub use self::{ digest::{Digest, DigestItem, DigestItemRef, OpaqueDigestItemId}, era::{Era, Phase}, header::Header, - unchecked_extrinsic::{ - ExtensionVersion, Preamble, SignedPayload, UncheckedExtrinsic, EXTRINSIC_FORMAT_VERSION, - }, + unchecked_extrinsic::{Preamble, SignedPayload, UncheckedExtrinsic, EXTRINSIC_FORMAT_VERSION}, }; pub use unchecked_extrinsic::UncheckedSignaturePayload; diff --git a/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs b/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs index d8510a60a789..8c44e147f90b 100644 --- a/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs +++ b/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs @@ -83,7 +83,7 @@ pub enum Preamble { Bare(ExtrinsicVersion), /// An old-school transaction extrinsic which includes a signature of some hard-coded crypto. /// Available only on extrinsic version 4. - Signed(Address, Signature, Extension), + Signed(Address, Signature, ExtensionVersion, Extension), /// A new-school transaction extrinsic which does not include a signature by default. The /// origin authorization, through signatures or other means, is performed by the transaction /// extension in this extrinsic. Available starting with extrinsic version 5. @@ -117,7 +117,7 @@ where let address = Address::decode(input)?; let signature = Signature::decode(input)?; let ext = Extension::decode(input)?; - Self::Signed(address, signature, ext) + Self::Signed(address, signature, 0, ext) }, (EXTRINSIC_FORMAT_VERSION, GENERAL_EXTRINSIC) => { let ext_version = ExtensionVersion::decode(input)?; @@ -140,7 +140,7 @@ where fn size_hint(&self) -> usize { match &self { Preamble::Bare(_) => EXTRINSIC_FORMAT_VERSION.size_hint(), - Preamble::Signed(address, signature, ext) => LEGACY_EXTRINSIC_FORMAT_VERSION + Preamble::Signed(address, signature, _, ext) => LEGACY_EXTRINSIC_FORMAT_VERSION .size_hint() .saturating_add(address.size_hint()) .saturating_add(signature.size_hint()) @@ -157,7 +157,7 @@ where Preamble::Bare(extrinsic_version) => { (extrinsic_version | BARE_EXTRINSIC).encode_to(dest); }, - Preamble::Signed(address, signature, ext) => { + Preamble::Signed(address, signature, _, ext) => { (LEGACY_EXTRINSIC_FORMAT_VERSION | SIGNED_EXTRINSIC).encode_to(dest); address.encode_to(dest); signature.encode_to(dest); @@ -176,7 +176,7 @@ impl Preamble { /// Returns `Some` if this is a signed extrinsic, together with the relevant inner fields. pub fn to_signed(self) -> Option<(Address, Signature, Extension)> { match self { - Self::Signed(a, s, e) => Some((a, s, e)), + Self::Signed(a, s, _, e) => Some((a, s, e)), _ => None, } } @@ -190,7 +190,8 @@ where fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { Self::Bare(_) => write!(f, "Bare"), - Self::Signed(address, _, tx_ext) => write!(f, "Signed({:?}, {:?})", address, tx_ext), + Self::Signed(address, _, ext_version, tx_ext) => + write!(f, "Signed({:?}, {:?}, {:?})", address, ext_version, tx_ext), Self::General(ext_version, tx_ext) => write!(f, "General({:?}, {:?})", ext_version, tx_ext), } @@ -304,7 +305,7 @@ impl UncheckedExtrinsic Self { - Self { preamble: Preamble::Signed(signed, signature, tx_ext), function } + Self { preamble: Preamble::Signed(signed, signature, 0, tx_ext), function } } /// New instance of an new-school unsigned transaction. @@ -344,7 +345,7 @@ where fn check(self, lookup: &Lookup) -> Result { Ok(match self.preamble { - Preamble::Signed(signed, signature, tx_ext) => { + Preamble::Signed(signed, signature, _, tx_ext) => { let signed = lookup.lookup(signed)?; // The `Implicit` is "implicitly" included in the payload. let raw_payload = SignedPayload::new(self.function, tx_ext)?; @@ -354,8 +355,8 @@ where let (function, tx_ext, _) = raw_payload.deconstruct(); CheckedExtrinsic { format: ExtrinsicFormat::Signed(signed, tx_ext), function } }, - Preamble::General(extension_version, tx_ext) => CheckedExtrinsic { - format: ExtrinsicFormat::General(extension_version, tx_ext), + Preamble::General(_, tx_ext) => CheckedExtrinsic { + format: ExtrinsicFormat::General(tx_ext), function: self.function, }, Preamble::Bare(_) => @@ -369,15 +370,15 @@ where lookup: &Lookup, ) -> Result { Ok(match self.preamble { - Preamble::Signed(signed, _, tx_ext) => { + Preamble::Signed(signed, _, _, extra) => { let signed = lookup.lookup(signed)?; CheckedExtrinsic { - format: ExtrinsicFormat::Signed(signed, tx_ext), + format: ExtrinsicFormat::Signed(signed, extra), function: self.function, } }, - Preamble::General(extension_version, tx_ext) => CheckedExtrinsic { - format: ExtrinsicFormat::General(extension_version, tx_ext), + Preamble::General(_, extra) => CheckedExtrinsic { + format: ExtrinsicFormat::General(extra), function: self.function, }, Preamble::Bare(_) => @@ -389,7 +390,8 @@ where impl> ExtrinsicMetadata for UncheckedExtrinsic { - const VERSIONS: &'static [u8] = &[LEGACY_EXTRINSIC_FORMAT_VERSION, EXTRINSIC_FORMAT_VERSION]; + // TODO: Expose both version 4 and version 5 in metadata v16. + const VERSION: u8 = LEGACY_EXTRINSIC_FORMAT_VERSION; type TransactionExtensions = Extension; } @@ -401,7 +403,8 @@ impl Weight { match &self.preamble { Preamble::Bare(_) => Weight::zero(), - Preamble::Signed(_, _, ext) | Preamble::General(_, ext) => ext.weight(&self.function), + Preamble::Signed(_, _, _, ext) | Preamble::General(_, ext) => + ext.weight(&self.function), } } } @@ -836,7 +839,7 @@ mod tests { assert_eq!( >::check(ux, &Default::default()), Ok(CEx { - format: ExtrinsicFormat::General(0, DummyExtension), + format: ExtrinsicFormat::General(DummyExtension), function: vec![0u8; 0].into() }), ); @@ -912,7 +915,7 @@ mod tests { assert_eq!(decoded_old_ux.function, call); assert_eq!( decoded_old_ux.preamble, - Preamble::Signed(signed, legacy_signature.clone(), extension.clone()) + Preamble::Signed(signed, legacy_signature.clone(), 0, extension.clone()) ); let new_ux = @@ -949,7 +952,7 @@ mod tests { assert_eq!(decoded_old_ux.function, call); assert_eq!( decoded_old_ux.preamble, - Preamble::Signed(signed, signature.clone(), extension.clone()) + Preamble::Signed(signed, signature.clone(), 0, extension.clone()) ); let new_ux = Ex::new_signed(call.clone(), signed, signature.clone(), extension.clone()); diff --git a/substrate/primitives/runtime/src/traits/mod.rs b/substrate/primitives/runtime/src/traits/mod.rs index d371152dc40a..e6906cdb3877 100644 --- a/substrate/primitives/runtime/src/traits/mod.rs +++ b/substrate/primitives/runtime/src/traits/mod.rs @@ -44,9 +44,8 @@ pub use sp_arithmetic::traits::{ use sp_core::{self, storage::StateVersion, Hasher, RuntimeDebug, TypeId, U256}; #[doc(hidden)] pub use sp_core::{ - parameter_types, ConstBool, ConstI128, ConstI16, ConstI32, ConstI64, ConstI8, ConstInt, - ConstU128, ConstU16, ConstU32, ConstU64, ConstU8, ConstUint, Get, GetDefault, TryCollect, - TypedGet, + parameter_types, ConstBool, ConstI128, ConstI16, ConstI32, ConstI64, ConstI8, ConstU128, + ConstU16, ConstU32, ConstU64, ConstU8, Get, GetDefault, TryCollect, TypedGet, }; #[cfg(feature = "std")] use std::fmt::Display; @@ -55,8 +54,7 @@ use std::str::FromStr; pub mod transaction_extension; pub use transaction_extension::{ - DispatchTransaction, Implication, ImplicationParts, TransactionExtension, - TransactionExtensionMetadata, TxBaseImplication, ValidateResult, + DispatchTransaction, TransactionExtension, TransactionExtensionMetadata, ValidateResult, }; /// A lazy value. @@ -1411,10 +1409,10 @@ impl SignaturePayload for () { /// Implementor is an [`Extrinsic`] and provides metadata about this extrinsic. pub trait ExtrinsicMetadata { - /// The format versions of the `Extrinsic`. + /// The format version of the `Extrinsic`. /// - /// By format we mean the encoded representation of the `Extrinsic`. - const VERSIONS: &'static [u8]; + /// By format is meant the encoded representation of the `Extrinsic`. + const VERSION: u8; /// Transaction extensions attached to this `Extrinsic`. type TransactionExtensions; @@ -2350,8 +2348,7 @@ pub trait BlockNumberProvider { + TypeInfo + Debug + MaxEncodedLen - + Copy - + EncodeLike; + + Copy; /// Returns the current block number. /// diff --git a/substrate/primitives/runtime/src/traits/transaction_extension/as_transaction_extension.rs b/substrate/primitives/runtime/src/traits/transaction_extension/as_transaction_extension.rs index 282064078fe3..a5179748673f 100644 --- a/substrate/primitives/runtime/src/traits/transaction_extension/as_transaction_extension.rs +++ b/substrate/primitives/runtime/src/traits/transaction_extension/as_transaction_extension.rs @@ -25,7 +25,7 @@ use sp_core::RuntimeDebug; use crate::{ traits::{AsSystemOriginSigner, SignedExtension, ValidateResult}, - transaction_validity::{InvalidTransaction, TransactionSource}, + transaction_validity::InvalidTransaction, }; use super::*; @@ -74,7 +74,6 @@ where len: usize, _self_implicit: Self::Implicit, _inherited_implication: &impl Encode, - _source: TransactionSource, ) -> ValidateResult { let who = origin.as_system_origin_signer().ok_or(InvalidTransaction::BadSigner)?; let r = self.0.validate(who, call, info, len)?; diff --git a/substrate/primitives/runtime/src/traits/transaction_extension/dispatch_transaction.rs b/substrate/primitives/runtime/src/traits/transaction_extension/dispatch_transaction.rs index 1fbaab0d45ac..e2fb556bf9d3 100644 --- a/substrate/primitives/runtime/src/traits/transaction_extension/dispatch_transaction.rs +++ b/substrate/primitives/runtime/src/traits/transaction_extension/dispatch_transaction.rs @@ -17,11 +17,7 @@ //! The [DispatchTransaction] trait. -use crate::{ - generic::ExtensionVersion, - traits::AsTransactionAuthorizedOrigin, - transaction_validity::{InvalidTransaction, TransactionSource}, -}; +use crate::{traits::AsTransactionAuthorizedOrigin, transaction_validity::InvalidTransaction}; use super::*; @@ -49,8 +45,6 @@ pub trait DispatchTransaction { call: &Call, info: &Self::Info, len: usize, - source: TransactionSource, - extension_version: ExtensionVersion, ) -> Result<(ValidTransaction, Self::Val, Self::Origin), TransactionValidityError>; /// Validate and prepare a transaction, ready for dispatch. fn validate_and_prepare( @@ -59,7 +53,6 @@ pub trait DispatchTransaction { call: &Call, info: &Self::Info, len: usize, - extension_version: ExtensionVersion, ) -> Result<(Self::Pre, Self::Origin), TransactionValidityError>; /// Dispatch a transaction with the given base origin and call. fn dispatch_transaction( @@ -68,7 +61,6 @@ pub trait DispatchTransaction { call: Call, info: &Self::Info, len: usize, - extension_version: ExtensionVersion, ) -> Self::Result; /// Do everything which would be done in a [dispatch_transaction](Self::dispatch_transaction), /// but instead of executing the call, execute `substitute` instead. Since this doesn't actually @@ -79,7 +71,6 @@ pub trait DispatchTransaction { call: &Call, info: &Self::Info, len: usize, - extension_version: ExtensionVersion, substitute: impl FnOnce( Self::Origin, ) -> crate::DispatchResultWithInfo<::PostInfo>, @@ -102,18 +93,8 @@ where call: &Call, info: &DispatchInfoOf, len: usize, - source: TransactionSource, - extension_version: ExtensionVersion, ) -> Result<(ValidTransaction, T::Val, Self::Origin), TransactionValidityError> { - match self.validate( - origin, - call, - info, - len, - self.implicit()?, - &TxBaseImplication((extension_version, call)), - source, - ) { + match self.validate(origin, call, info, len, self.implicit()?, call) { // After validation, some origin must have been authorized. Ok((_, _, origin)) if !origin.is_transaction_authorized() => Err(InvalidTransaction::UnknownOrigin.into()), @@ -126,16 +107,8 @@ where call: &Call, info: &DispatchInfoOf, len: usize, - extension_version: ExtensionVersion, ) -> Result<(T::Pre, Self::Origin), TransactionValidityError> { - let (_, val, origin) = self.validate_only( - origin, - call, - info, - len, - TransactionSource::InBlock, - extension_version, - )?; + let (_, val, origin) = self.validate_only(origin, call, info, len)?; let pre = self.prepare(val, &origin, &call, info, len)?; Ok((pre, origin)) } @@ -145,10 +118,8 @@ where call: Call, info: &DispatchInfoOf, len: usize, - extension_version: ExtensionVersion, ) -> Self::Result { - let (pre, origin) = - self.validate_and_prepare(origin, &call, info, len, extension_version)?; + let (pre, origin) = self.validate_and_prepare(origin, &call, info, len)?; let mut res = call.dispatch(origin); let pd_res = res.map(|_| ()).map_err(|e| e.error); let post_info = match &mut res { @@ -165,13 +136,11 @@ where call: &Call, info: &Self::Info, len: usize, - extension_version: ExtensionVersion, substitute: impl FnOnce( Self::Origin, ) -> crate::DispatchResultWithInfo<::PostInfo>, ) -> Self::Result { - let (pre, origin) = - self.validate_and_prepare(origin, &call, info, len, extension_version)?; + let (pre, origin) = self.validate_and_prepare(origin, &call, info, len)?; let mut res = substitute(origin); let pd_res = res.map(|_| ()).map_err(|e| e.error); let post_info = match &mut res { diff --git a/substrate/primitives/runtime/src/traits/transaction_extension/mod.rs b/substrate/primitives/runtime/src/traits/transaction_extension/mod.rs index 27f33acb69cc..58cd0974661a 100644 --- a/substrate/primitives/runtime/src/traits/transaction_extension/mod.rs +++ b/substrate/primitives/runtime/src/traits/transaction_extension/mod.rs @@ -19,9 +19,7 @@ use crate::{ scale_info::{MetaType, StaticTypeInfo}, - transaction_validity::{ - TransactionSource, TransactionValidity, TransactionValidityError, ValidTransaction, - }, + transaction_validity::{TransactionValidity, TransactionValidityError, ValidTransaction}, DispatchResult, }; use codec::{Codec, Decode, Encode}; @@ -43,72 +41,6 @@ mod dispatch_transaction; pub use as_transaction_extension::AsTransactionExtension; pub use dispatch_transaction::DispatchTransaction; -/// Provides `Sealed` trait. -mod private { - /// Special trait that prevents the implementation of some traits outside of this crate. - pub trait Sealed {} -} - -/// The base implication in a transaction. -/// -/// This struct is used to represent the base implication in the transaction, that is -/// the implication not part of any transaction extensions. It usually comprises of the call and -/// the transaction extension version. -/// -/// The concept of implication in the transaction extension pipeline is explained in the trait -/// documentation: [`TransactionExtension`]. -#[derive(Encode)] -pub struct TxBaseImplication(pub T); - -impl Implication for TxBaseImplication { - fn parts(&self) -> ImplicationParts<&impl Encode, &impl Encode, &impl Encode> { - ImplicationParts { base: self, explicit: &(), implicit: &() } - } -} - -impl private::Sealed for TxBaseImplication {} - -/// The implication in a transaction. -/// -/// The concept of implication in the transaction extension pipeline is explained in the trait -/// documentation: [`TransactionExtension`]. -#[derive(Encode)] -pub struct ImplicationParts { - /// The base implication, that is implication not part of any transaction extension, usually - /// the call and the transaction extension version. - pub base: Base, - /// The explicit implication in transaction extensions. - pub explicit: Explicit, - /// The implicit implication in transaction extensions. - pub implicit: Implicit, -} - -impl Implication - for ImplicationParts -{ - fn parts(&self) -> ImplicationParts<&impl Encode, &impl Encode, &impl Encode> { - ImplicationParts { base: &self.base, explicit: &self.explicit, implicit: &self.implicit } - } -} - -impl private::Sealed for ImplicationParts {} - -/// Interface of implications in the transaction extension pipeline. -/// -/// Implications can be encoded, this is useful for checking signature on the implications. -/// Implications can be split into parts, this allow to destructure and restructure the -/// implications, this is useful for nested pipeline. -/// -/// This trait is sealed, consider using [`TxBaseImplication`] and [`ImplicationParts`] -/// implementations. -/// -/// The concept of implication in the transaction extension pipeline is explained in the trait -/// documentation: [`TransactionExtension`]. -pub trait Implication: Encode + private::Sealed { - /// Destructure the implication into its parts. - fn parts(&self) -> ImplicationParts<&impl Encode, &impl Encode, &impl Encode>; -} - /// Shortcut for the result value of the `validate` function. pub type ValidateResult = Result<(ValidTransaction, Val, DispatchOriginOf), TransactionValidityError>; @@ -310,8 +242,7 @@ pub trait TransactionExtension: info: &DispatchInfoOf, len: usize, self_implicit: Self::Implicit, - inherited_implication: &impl Implication, - source: TransactionSource, + inherited_implication: &impl Encode, ) -> ValidateResult; /// Do any pre-flight stuff for a transaction after validation. @@ -498,7 +429,6 @@ macro_rules! impl_tx_ext_default { _len: usize, _self_implicit: Self::Implicit, _inherited_implication: &impl $crate::codec::Encode, - _source: $crate::transaction_validity::TransactionSource, ) -> $crate::traits::ValidateResult { Ok((Default::default(), Default::default(), origin)) } @@ -565,8 +495,7 @@ impl TransactionExtension for Tuple { info: &DispatchInfoOf, len: usize, self_implicit: Self::Implicit, - inherited_implication: &impl Implication, - source: TransactionSource, + inherited_implication: &impl Encode, ) -> Result< (ValidTransaction, Self::Val, ::RuntimeOrigin), TransactionValidityError, @@ -576,20 +505,23 @@ impl TransactionExtension for Tuple { let following_explicit_implications = for_tuples!( ( #( &self.Tuple ),* ) ); let following_implicit_implications = self_implicit; - let implication_parts = inherited_implication.parts(); - for_tuples!(#( // Implication of this pipeline element not relevant for later items, so we pop it. let (_item, following_explicit_implications) = following_explicit_implications.pop_front(); let (item_implicit, following_implicit_implications) = following_implicit_implications.pop_front(); let (item_valid, item_val, origin) = { - Tuple.validate(origin, call, info, len, item_implicit, - &ImplicationParts { - base: implication_parts.base, - explicit: (&following_explicit_implications, implication_parts.explicit), - implicit: (&following_implicit_implications, implication_parts.implicit), - }, - source)? + let implications = ( + // The first is the implications born of the fact we return the mutated + // origin. + inherited_implication, + // This is the explicitly made implication born of the fact the new origin is + // passed into the next items in this pipeline-tuple. + &following_explicit_implications, + // This is the implicitly made implication born of the fact the new origin is + // passed into the next items in this pipeline-tuple. + &following_implicit_implications, + ); + Tuple.validate(origin, call, info, len, item_implicit, &implications)? }; let valid = valid.combine_with(item_valid); let val = val.push_back(item_val); @@ -683,8 +615,7 @@ impl TransactionExtension for () { _info: &DispatchInfoOf, _len: usize, _self_implicit: Self::Implicit, - _inherited_implication: &impl Implication, - _source: TransactionSource, + _inherited_implication: &impl Encode, ) -> Result< (ValidTransaction, (), ::RuntimeOrigin), TransactionValidityError, @@ -702,168 +633,3 @@ impl TransactionExtension for () { Ok(()) } } - -#[cfg(test)] -mod test { - use super::*; - - #[test] - fn test_implications_on_nested_structure() { - use scale_info::TypeInfo; - use std::cell::RefCell; - - #[derive(Clone, Debug, Eq, PartialEq, Encode, Decode, TypeInfo)] - struct MockExtension { - also_implicit: u8, - explicit: u8, - } - - const CALL_IMPLICIT: u8 = 23; - - thread_local! { - static COUNTER: RefCell = RefCell::new(1); - } - - impl TransactionExtension<()> for MockExtension { - const IDENTIFIER: &'static str = "MockExtension"; - type Implicit = u8; - fn implicit(&self) -> Result { - Ok(self.also_implicit) - } - type Val = (); - type Pre = (); - fn weight(&self, _call: &()) -> Weight { - Weight::zero() - } - fn prepare( - self, - _val: Self::Val, - _origin: &DispatchOriginOf<()>, - _call: &(), - _info: &DispatchInfoOf<()>, - _len: usize, - ) -> Result { - Ok(()) - } - fn validate( - &self, - origin: DispatchOriginOf<()>, - _call: &(), - _info: &DispatchInfoOf<()>, - _len: usize, - self_implicit: Self::Implicit, - inherited_implication: &impl Implication, - _source: TransactionSource, - ) -> ValidateResult { - COUNTER.with(|c| { - let mut counter = c.borrow_mut(); - - assert_eq!(self_implicit, *counter); - assert_eq!( - self, - &MockExtension { also_implicit: *counter, explicit: *counter + 1 } - ); - - // Implications must be call then 1 to 22 then 1 to 22 odd. - let mut assert_implications = Vec::new(); - assert_implications.push(CALL_IMPLICIT); - for i in *counter + 2..23 { - assert_implications.push(i); - } - for i in *counter + 2..23 { - if i % 2 == 1 { - assert_implications.push(i); - } - } - assert_eq!(inherited_implication.encode(), assert_implications); - - *counter += 2; - }); - Ok((ValidTransaction::default(), (), origin)) - } - fn post_dispatch_details( - _pre: Self::Pre, - _info: &DispatchInfoOf<()>, - _post_info: &PostDispatchInfoOf<()>, - _len: usize, - _result: &DispatchResult, - ) -> Result { - Ok(Weight::zero()) - } - } - - // Test for one nested structure - - let ext = ( - MockExtension { also_implicit: 1, explicit: 2 }, - MockExtension { also_implicit: 3, explicit: 4 }, - ( - MockExtension { also_implicit: 5, explicit: 6 }, - MockExtension { also_implicit: 7, explicit: 8 }, - ( - MockExtension { also_implicit: 9, explicit: 10 }, - MockExtension { also_implicit: 11, explicit: 12 }, - ), - MockExtension { also_implicit: 13, explicit: 14 }, - MockExtension { also_implicit: 15, explicit: 16 }, - ), - MockExtension { also_implicit: 17, explicit: 18 }, - (MockExtension { also_implicit: 19, explicit: 20 },), - MockExtension { also_implicit: 21, explicit: 22 }, - ); - - let implicit = ext.implicit().unwrap(); - - let res = ext - .validate( - (), - &(), - &DispatchInfoOf::<()>::default(), - 0, - implicit, - &TxBaseImplication(CALL_IMPLICIT), - TransactionSource::Local, - ) - .expect("valid"); - - assert_eq!(res.0, ValidTransaction::default()); - - // Test for another nested structure - - COUNTER.with(|c| { - *c.borrow_mut() = 1; - }); - - let ext = ( - MockExtension { also_implicit: 1, explicit: 2 }, - MockExtension { also_implicit: 3, explicit: 4 }, - MockExtension { also_implicit: 5, explicit: 6 }, - MockExtension { also_implicit: 7, explicit: 8 }, - MockExtension { also_implicit: 9, explicit: 10 }, - MockExtension { also_implicit: 11, explicit: 12 }, - ( - MockExtension { also_implicit: 13, explicit: 14 }, - MockExtension { also_implicit: 15, explicit: 16 }, - MockExtension { also_implicit: 17, explicit: 18 }, - MockExtension { also_implicit: 19, explicit: 20 }, - MockExtension { also_implicit: 21, explicit: 22 }, - ), - ); - - let implicit = ext.implicit().unwrap(); - - let res = ext - .validate( - (), - &(), - &DispatchInfoOf::<()>::default(), - 0, - implicit, - &TxBaseImplication(CALL_IMPLICIT), - TransactionSource::Local, - ) - .expect("valid"); - - assert_eq!(res.0, ValidTransaction::default()); - } -} diff --git a/substrate/primitives/runtime/src/type_with_default.rs b/substrate/primitives/runtime/src/type_with_default.rs index b0eca22e5c1a..1465393640dc 100644 --- a/substrate/primitives/runtime/src/type_with_default.rs +++ b/substrate/primitives/runtime/src/type_with_default.rs @@ -31,7 +31,7 @@ use num_traits::{ CheckedAdd, CheckedDiv, CheckedMul, CheckedNeg, CheckedRem, CheckedShl, CheckedShr, CheckedSub, Num, NumCast, PrimInt, Saturating, ToPrimitive, }; -use scale_info::{StaticTypeInfo, TypeInfo}; +use scale_info::TypeInfo; use sp_core::Get; #[cfg(feature = "serde")] @@ -40,8 +40,7 @@ use serde::{Deserialize, Serialize}; /// A type that wraps another type and provides a default value. /// /// Passes through arithmetical and many other operations to the inner value. -/// Type information for metadata is the same as the inner value's type. -#[derive(Encode, Decode, Debug, MaxEncodedLen)] +#[derive(Encode, Decode, TypeInfo, Debug, MaxEncodedLen)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub struct TypeWithDefault>(T, PhantomData); @@ -51,17 +50,6 @@ impl> TypeWithDefault { } } -// Hides implementation details from the outside (for metadata type information). -// -// The type info showed in metadata is the one of the inner value's type. -impl + 'static> TypeInfo for TypeWithDefault { - type Identity = Self; - - fn type_info() -> scale_info::Type { - T::type_info() - } -} - impl> Clone for TypeWithDefault { fn clone(&self) -> Self { Self(self.0.clone(), PhantomData) @@ -103,6 +91,24 @@ impl> Default for TypeWithDefault { } } +impl, D: Get> From for TypeWithDefault { + fn from(value: u16) -> Self { + Self::new(value.into()) + } +} + +impl, D: Get> From for TypeWithDefault { + fn from(value: u32) -> Self { + Self::new(value.into()) + } +} + +impl, D: Get> From for TypeWithDefault { + fn from(value: u64) -> Self { + Self::new(value.into()) + } +} + impl> CheckedNeg for TypeWithDefault { fn checked_neg(&self) -> Option { self.0.checked_neg().map(Self::new) @@ -199,45 +205,24 @@ impl> AddAssign for TypeWithDefault { } } +impl, D: Get> From for TypeWithDefault { + fn from(value: u8) -> Self { + Self::new(value.into()) + } +} + impl> Display for TypeWithDefault { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { write!(f, "{}", self.0) } } -macro_rules! impl_from { - ($for_type:ty $(, $from_type:ty)*) => { - $( - impl> From<$from_type> for TypeWithDefault<$for_type, D> { - fn from(value: $from_type) -> Self { - Self::new(value.into()) - } - } - )* - } -} -impl_from!(u128, u128, u64, u32, u16, u8); -impl_from!(u64, u64, u32, u16, u8); -impl_from!(u32, u32, u16, u8); -impl_from!(u16, u16, u8); -impl_from!(u8, u8); - -macro_rules! impl_try_from { - ($for_type:ty $(, $try_from_type:ty)*) => { - $( - impl> TryFrom<$try_from_type> for TypeWithDefault<$for_type, D> { - type Error = <$for_type as TryFrom<$try_from_type>>::Error; - fn try_from(n: $try_from_type) -> Result, Self::Error> { - <$for_type as TryFrom<$try_from_type>>::try_from(n).map(Self::new) - } - } - )* - } -} -impl_try_from!(u8, u16, u32, u64, u128); -impl_try_from!(u16, u32, u64, u128); -impl_try_from!(u32, u64, u128); -impl_try_from!(u64, u128); +impl, D: Get> TryFrom for TypeWithDefault { + type Error = >::Error; + fn try_from(n: u128) -> Result, Self::Error> { + T::try_from(n).map(Self::new) + } +} impl, D: Get> TryFrom for TypeWithDefault { type Error = >::Error; @@ -519,70 +504,3 @@ impl> CompactAs for TypeWithDefault { Ok(Self::new(val)) } } - -#[cfg(test)] -mod tests { - use super::TypeWithDefault; - use scale_info::TypeInfo; - use sp_arithmetic::traits::{AtLeast16Bit, AtLeast32Bit, AtLeast8Bit}; - use sp_core::Get; - - #[test] - #[allow(dead_code)] - fn test_type_with_default_impl_base_arithmetic() { - trait WrapAtLeast8Bit: AtLeast8Bit {} - trait WrapAtLeast16Bit: AtLeast16Bit {} - trait WrapAtLeast32Bit: AtLeast32Bit {} - - struct Getu8; - impl Get for Getu8 { - fn get() -> u8 { - 0 - } - } - type U8WithDefault = TypeWithDefault; - impl WrapAtLeast8Bit for U8WithDefault {} - - struct Getu16; - impl Get for Getu16 { - fn get() -> u16 { - 0 - } - } - type U16WithDefault = TypeWithDefault; - impl WrapAtLeast16Bit for U16WithDefault {} - - struct Getu32; - impl Get for Getu32 { - fn get() -> u32 { - 0 - } - } - type U32WithDefault = TypeWithDefault; - impl WrapAtLeast32Bit for U32WithDefault {} - - struct Getu64; - impl Get for Getu64 { - fn get() -> u64 { - 0 - } - } - type U64WithDefault = TypeWithDefault; - impl WrapAtLeast32Bit for U64WithDefault {} - - struct Getu128; - impl Get for Getu128 { - fn get() -> u128 { - 0 - } - } - type U128WithDefault = TypeWithDefault; - impl WrapAtLeast32Bit for U128WithDefault {} - - assert_eq!(U8WithDefault::type_info(), ::type_info()); - assert_eq!(U16WithDefault::type_info(), ::type_info()); - assert_eq!(U32WithDefault::type_info(), ::type_info()); - assert_eq!(U64WithDefault::type_info(), ::type_info()); - assert_eq!(U128WithDefault::type_info(), ::type_info()); - } -} diff --git a/substrate/primitives/session/Cargo.toml b/substrate/primitives/session/Cargo.toml index 72be81c1222e..6abf83505530 100644 --- a/substrate/primitives/session/Cargo.toml +++ b/substrate/primitives/session/Cargo.toml @@ -20,9 +20,9 @@ codec = { features = ["derive"], workspace = true } scale-info = { features = ["derive"], workspace = true } sp-api = { workspace = true } sp-core = { workspace = true } -sp-keystore = { optional = true, workspace = true } sp-runtime = { optional = true, workspace = true } sp-staking = { workspace = true } +sp-keystore = { optional = true, workspace = true } [features] default = ["std"] diff --git a/substrate/primitives/staking/Cargo.toml b/substrate/primitives/staking/Cargo.toml index 42694cdbb674..35e7e4f60413 100644 --- a/substrate/primitives/staking/Cargo.toml +++ b/substrate/primitives/staking/Cargo.toml @@ -16,10 +16,10 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] +serde = { features = ["alloc", "derive"], optional = true, workspace = true } codec = { features = ["derive"], workspace = true } -impl-trait-for-tuples = { workspace = true } scale-info = { features = ["derive"], workspace = true } -serde = { features = ["alloc", "derive"], optional = true, workspace = true } +impl-trait-for-tuples = { workspace = true } sp-core = { workspace = true } sp-runtime = { workspace = true } diff --git a/substrate/primitives/staking/src/offence.rs b/substrate/primitives/staking/src/offence.rs index e73e8efe5839..2c2ebc1fc971 100644 --- a/substrate/primitives/staking/src/offence.rs +++ b/substrate/primitives/staking/src/offence.rs @@ -242,28 +242,3 @@ impl OffenceReportSystem for () { Ok(()) } } - -/// Wrapper type representing the severity of an offence. -/// -/// As of now the only meaningful value taken into account -/// when deciding the severity of an offence is the associated -/// slash amount `Perbill`. -/// -/// For instance used for the purposes of distinguishing who should be -/// prioritized for disablement. -#[derive( - Clone, Copy, PartialEq, Eq, Encode, Decode, sp_runtime::RuntimeDebug, scale_info::TypeInfo, -)] -pub struct OffenceSeverity(pub Perbill); - -impl PartialOrd for OffenceSeverity { - fn partial_cmp(&self, other: &Self) -> Option { - self.0.partial_cmp(&other.0) - } -} - -impl Ord for OffenceSeverity { - fn cmp(&self, other: &Self) -> core::cmp::Ordering { - self.0.cmp(&other.0) - } -} diff --git a/substrate/primitives/state-machine/Cargo.toml b/substrate/primitives/state-machine/Cargo.toml index 5bc06b8cb509..e1c67feb7ac5 100644 --- a/substrate/primitives/state-machine/Cargo.toml +++ b/substrate/primitives/state-machine/Cargo.toml @@ -17,28 +17,28 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -arbitrary = { features = ["derive"], optional = true, workspace = true } codec = { workspace = true } hash-db = { workspace = true } log = { workspace = true } parking_lot = { optional = true, workspace = true, default-features = true } rand = { optional = true, workspace = true, default-features = true } smallvec = { workspace = true, default-features = true } +thiserror = { optional = true, workspace = true } +tracing = { optional = true, workspace = true, default-features = true } sp-core = { workspace = true } sp-externalities = { workspace = true } sp-panic-handler = { optional = true, workspace = true, default-features = true } sp-trie = { workspace = true } -thiserror = { optional = true, workspace = true } -tracing = { optional = true, workspace = true, default-features = true } trie-db = { workspace = true } +arbitrary = { features = ["derive"], optional = true, workspace = true } [dev-dependencies] -arbitrary = { features = ["derive"], workspace = true } array-bytes = { workspace = true, default-features = true } -assert_matches = { workspace = true } pretty_assertions = { workspace = true } rand = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } +assert_matches = { workspace = true } +arbitrary = { features = ["derive"], workspace = true } [features] default = ["std"] diff --git a/substrate/primitives/state-machine/fuzz/Cargo.toml b/substrate/primitives/state-machine/fuzz/Cargo.toml index 16bf5b92025f..416c00c34fda 100644 --- a/substrate/primitives/state-machine/fuzz/Cargo.toml +++ b/substrate/primitives/state-machine/fuzz/Cargo.toml @@ -13,8 +13,8 @@ libfuzzer-sys = "0.4" sp-runtime = { path = "../../runtime" } [dependencies.sp-state-machine] -features = ["fuzzing"] path = ".." +features = ["fuzzing"] # Prevent this from interfering with workspaces [workspace] diff --git a/substrate/primitives/state-machine/src/trie_backend.rs b/substrate/primitives/state-machine/src/trie_backend.rs index 8d4dfd34240d..f91ce5d2e52f 100644 --- a/substrate/primitives/state-machine/src/trie_backend.rs +++ b/substrate/primitives/state-machine/src/trie_backend.rs @@ -73,10 +73,7 @@ pub trait TrieCacheProvider { #[cfg(feature = "std")] impl TrieCacheProvider for LocalTrieCache { - type Cache<'a> - = TrieCache<'a, H> - where - H: 'a; + type Cache<'a> = TrieCache<'a, H> where H: 'a; fn as_trie_db_cache(&self, storage_root: H::Out) -> Self::Cache<'_> { self.as_trie_db_cache(storage_root) @@ -93,10 +90,7 @@ impl TrieCacheProvider for LocalTrieCache { #[cfg(feature = "std")] impl TrieCacheProvider for &LocalTrieCache { - type Cache<'a> - = TrieCache<'a, H> - where - Self: 'a; + type Cache<'a> = TrieCache<'a, H> where Self: 'a; fn as_trie_db_cache(&self, storage_root: H::Out) -> Self::Cache<'_> { (*self).as_trie_db_cache(storage_root) @@ -145,10 +139,7 @@ impl trie_db::TrieCache> for UnimplementedCacheProvider< #[cfg(not(feature = "std"))] impl TrieCacheProvider for UnimplementedCacheProvider { - type Cache<'a> - = UnimplementedCacheProvider - where - H: 'a; + type Cache<'a> = UnimplementedCacheProvider where H: 'a; fn as_trie_db_cache(&self, _storage_root: ::Out) -> Self::Cache<'_> { unimplemented!() @@ -185,10 +176,7 @@ impl trie_db::TrieRecorder for UnimplementedRecorderProvider< #[cfg(not(feature = "std"))] impl TrieRecorderProvider for UnimplementedRecorderProvider { - type Recorder<'a> - = UnimplementedRecorderProvider - where - H: 'a; + type Recorder<'a> = UnimplementedRecorderProvider where H: 'a; fn drain_storage_proof(self) -> Option { unimplemented!() diff --git a/substrate/primitives/statement-store/Cargo.toml b/substrate/primitives/statement-store/Cargo.toml index df66cfcfc2e6..aac676caedc9 100644 --- a/substrate/primitives/statement-store/Cargo.toml +++ b/substrate/primitives/statement-store/Cargo.toml @@ -18,23 +18,23 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { features = ["derive"], workspace = true } scale-info = { features = ["derive"], workspace = true } -sp-api = { workspace = true } -sp-application-crypto = { workspace = true } sp-core = { workspace = true } sp-crypto-hashing = { workspace = true } -sp-externalities = { workspace = true } sp-runtime = { workspace = true } +sp-api = { workspace = true } +sp-application-crypto = { workspace = true } sp-runtime-interface = { workspace = true } +sp-externalities = { workspace = true } thiserror = { optional = true, workspace = true } # ECIES dependencies -aes-gcm = { optional = true, workspace = true } -curve25519-dalek = { optional = true, workspace = true } ed25519-dalek = { optional = true, workspace = true, default-features = true } +x25519-dalek = { optional = true, features = ["static_secrets"], workspace = true } +curve25519-dalek = { optional = true, workspace = true } +aes-gcm = { optional = true, workspace = true } hkdf = { optional = true, workspace = true } -rand = { features = ["small_rng"], optional = true, workspace = true, default-features = true } sha2 = { optional = true, workspace = true, default-features = true } -x25519-dalek = { optional = true, features = ["static_secrets"], workspace = true } +rand = { features = ["small_rng"], optional = true, workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/primitives/timestamp/Cargo.toml b/substrate/primitives/timestamp/Cargo.toml index 619f1eaa142b..0fcd5be98e6f 100644 --- a/substrate/primitives/timestamp/Cargo.toml +++ b/substrate/primitives/timestamp/Cargo.toml @@ -18,9 +18,9 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] async-trait = { optional = true, workspace = true } codec = { features = ["derive"], workspace = true } +thiserror = { optional = true, workspace = true } sp-inherents = { workspace = true } sp-runtime = { workspace = true } -thiserror = { optional = true, workspace = true } [features] default = ["std"] diff --git a/substrate/primitives/trie/Cargo.toml b/substrate/primitives/trie/Cargo.toml index 65a9727ed2ae..7f27bb097290 100644 --- a/substrate/primitives/trie/Cargo.toml +++ b/substrate/primitives/trie/Cargo.toml @@ -29,20 +29,20 @@ nohash-hasher = { optional = true, workspace = true } parking_lot = { optional = true, workspace = true, default-features = true } rand = { optional = true, workspace = true, default-features = true } scale-info = { features = ["derive"], workspace = true } -schnellru = { optional = true, workspace = true } -sp-core = { workspace = true } -sp-externalities = { workspace = true } thiserror = { optional = true, workspace = true } tracing = { optional = true, workspace = true, default-features = true } trie-db = { workspace = true } trie-root = { workspace = true } +sp-core = { workspace = true } +sp-externalities = { workspace = true } +schnellru = { optional = true, workspace = true } [dev-dependencies] array-bytes = { workspace = true, default-features = true } criterion = { workspace = true, default-features = true } -sp-runtime = { workspace = true, default-features = true } trie-bench = { workspace = true } trie-standardmap = { workspace = true } +sp-runtime = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/primitives/trie/src/node_codec.rs b/substrate/primitives/trie/src/node_codec.rs index 400f57f3b1bf..78896988ec4c 100644 --- a/substrate/primitives/trie/src/node_codec.rs +++ b/substrate/primitives/trie/src/node_codec.rs @@ -110,10 +110,6 @@ where NodeHeader::Null => Ok(NodePlan::Empty), NodeHeader::HashedValueBranch(nibble_count) | NodeHeader::Branch(_, nibble_count) => { let padding = nibble_count % nibble_ops::NIBBLE_PER_BYTE != 0; - // data should be at least of size offset + 1 - if data.len() < input.offset + 1 { - return Err(Error::BadFormat) - } // check that the padding is valid (if any) if padding && nibble_ops::pad_left(data[input.offset]) != 0 { return Err(Error::BadFormat) @@ -158,10 +154,6 @@ where }, NodeHeader::HashedValueLeaf(nibble_count) | NodeHeader::Leaf(nibble_count) => { let padding = nibble_count % nibble_ops::NIBBLE_PER_BYTE != 0; - // data should be at least of size offset + 1 - if data.len() < input.offset + 1 { - return Err(Error::BadFormat) - } // check that the padding is valid (if any) if padding && nibble_ops::pad_left(data[input.offset]) != 0 { return Err(Error::BadFormat) diff --git a/substrate/primitives/trie/src/recorder.rs b/substrate/primitives/trie/src/recorder.rs index 4ec13066ded7..2886577eddc6 100644 --- a/substrate/primitives/trie/src/recorder.rs +++ b/substrate/primitives/trie/src/recorder.rs @@ -252,10 +252,7 @@ pub struct TrieRecorder<'a, H: Hasher> { } impl crate::TrieRecorderProvider for Recorder { - type Recorder<'a> - = TrieRecorder<'a, H> - where - H: 'a; + type Recorder<'a> = TrieRecorder<'a, H> where H: 'a; fn drain_storage_proof(self) -> Option { Some(Recorder::drain_storage_proof(self)) diff --git a/substrate/primitives/trie/src/storage_proof.rs b/substrate/primitives/trie/src/storage_proof.rs index bf0dc72e650b..a9f6298742f6 100644 --- a/substrate/primitives/trie/src/storage_proof.rs +++ b/substrate/primitives/trie/src/storage_proof.rs @@ -232,8 +232,7 @@ pub mod tests { use super::*; use crate::{tests::create_storage_proof, StorageProof}; - type Hasher = sp_core::Blake2Hasher; - type Layout = crate::LayoutV1; + type Layout = crate::LayoutV1; const TEST_DATA: &[(&[u8], &[u8])] = &[(b"key1", &[1; 64]), (b"key2", &[2; 64]), (b"key3", &[3; 64]), (b"key11", &[4; 64])]; @@ -246,11 +245,4 @@ pub mod tests { Err(StorageProofError::DuplicateNodes) )); } - - #[test] - fn invalid_compact_proof_does_not_panic_when_decoding() { - let invalid_proof = CompactProof { encoded_nodes: vec![vec![135]] }; - let result = invalid_proof.to_memory_db::(None); - assert!(result.is_err()); - } } diff --git a/substrate/primitives/version/Cargo.toml b/substrate/primitives/version/Cargo.toml index 7fa983d02823..0424304989b7 100644 --- a/substrate/primitives/version/Cargo.toml +++ b/substrate/primitives/version/Cargo.toml @@ -22,11 +22,11 @@ impl-serde = { optional = true, workspace = true } parity-wasm = { optional = true, workspace = true } scale-info = { features = ["derive"], workspace = true } serde = { features = ["alloc", "derive"], optional = true, workspace = true } +thiserror = { optional = true, workspace = true } sp-crypto-hashing-proc-macro = { workspace = true, default-features = true } sp-runtime = { workspace = true } sp-std = { workspace = true } sp-version-proc-macro = { workspace = true } -thiserror = { optional = true, workspace = true } [features] default = ["std"] diff --git a/substrate/primitives/wasm-interface/Cargo.toml b/substrate/primitives/wasm-interface/Cargo.toml index 9f8eea5102d6..9d0310fd22e8 100644 --- a/substrate/primitives/wasm-interface/Cargo.toml +++ b/substrate/primitives/wasm-interface/Cargo.toml @@ -17,11 +17,11 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -anyhow = { optional = true, workspace = true } codec = { features = ["derive"], workspace = true } impl-trait-for-tuples = { workspace = true } log = { optional = true, workspace = true, default-features = true } wasmtime = { optional = true, workspace = true } +anyhow = { optional = true, workspace = true } [features] default = ["std"] diff --git a/substrate/primitives/weights/Cargo.toml b/substrate/primitives/weights/Cargo.toml index 9cd0d9ac2e20..c4e1897dbb8e 100644 --- a/substrate/primitives/weights/Cargo.toml +++ b/substrate/primitives/weights/Cargo.toml @@ -19,11 +19,11 @@ targets = ["x86_64-unknown-linux-gnu"] bounded-collections = { workspace = true } codec = { features = ["derive"], workspace = true } scale-info = { features = ["derive"], workspace = true } -schemars = { optional = true, workspace = true } serde = { optional = true, features = ["alloc", "derive"], workspace = true } smallvec = { workspace = true, default-features = true } sp-arithmetic = { workspace = true } sp-debug-derive = { workspace = true } +schemars = { optional = true, workspace = true } [features] default = ["std"] diff --git a/substrate/scripts/ci/node-template-release/Cargo.toml b/substrate/scripts/ci/node-template-release/Cargo.toml index 5b90044d44dd..d335dbcf3971 100644 --- a/substrate/scripts/ci/node-template-release/Cargo.toml +++ b/substrate/scripts/ci/node-template-release/Cargo.toml @@ -18,7 +18,7 @@ clap = { features = ["derive"], workspace = true } flate2 = { workspace = true } fs_extra = { workspace = true } glob = { workspace = true } -itertools = { workspace = true } tar = { workspace = true } tempfile = { workspace = true } toml_edit = { workspace = true } +itertools = { workspace = true } diff --git a/substrate/test-utils/Cargo.toml b/substrate/test-utils/Cargo.toml index 87c9cb731e3a..4f7a70906859 100644 --- a/substrate/test-utils/Cargo.toml +++ b/substrate/test-utils/Cargo.toml @@ -20,5 +20,5 @@ futures = { workspace = true } tokio = { features = ["macros", "time"], workspace = true, default-features = true } [dev-dependencies] -sc-service = { workspace = true, default-features = true } trybuild = { features = ["diff"], workspace = true } +sc-service = { workspace = true, default-features = true } diff --git a/substrate/test-utils/cli/Cargo.toml b/substrate/test-utils/cli/Cargo.toml index b11e67bc49bc..3fbcf2090683 100644 --- a/substrate/test-utils/cli/Cargo.toml +++ b/substrate/test-utils/cli/Cargo.toml @@ -16,17 +16,17 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] +substrate-rpc-client = { workspace = true, default-features = true } +sp-rpc = { workspace = true, default-features = true } assert_cmd = { workspace = true } -futures = { workspace = true } nix = { features = ["signal"], workspace = true } -node-cli = { workspace = true } -node-primitives = { workspace = true, default-features = true } regex = { workspace = true } +tokio = { features = ["full"], workspace = true, default-features = true } +node-primitives = { workspace = true, default-features = true } +node-cli = { workspace = true } sc-cli = { workspace = true, default-features = true } sc-service = { workspace = true, default-features = true } -sp-rpc = { workspace = true, default-features = true } -substrate-rpc-client = { workspace = true, default-features = true } -tokio = { features = ["full"], workspace = true, default-features = true } +futures = { workspace = true } [features] try-runtime = ["node-cli/try-runtime"] diff --git a/substrate/test-utils/client/Cargo.toml b/substrate/test-utils/client/Cargo.toml index e7ab4c8c8367..ebd1eab5980d 100644 --- a/substrate/test-utils/client/Cargo.toml +++ b/substrate/test-utils/client/Cargo.toml @@ -20,6 +20,8 @@ array-bytes = { workspace = true, default-features = true } async-trait = { workspace = true } codec = { workspace = true, default-features = true } futures = { workspace = true } +serde = { workspace = true, default-features = true } +serde_json = { workspace = true, default-features = true } sc-client-api = { workspace = true, default-features = true } sc-client-db = { features = [ "test-helpers", @@ -27,9 +29,9 @@ sc-client-db = { features = [ sc-consensus = { workspace = true, default-features = true } sc-executor = { workspace = true, default-features = true } sc-offchain = { workspace = true, default-features = true } -sc-service = { workspace = true } -serde = { workspace = true, default-features = true } -serde_json = { workspace = true, default-features = true } +sc-service = { features = [ + "test-helpers", +], workspace = true } sp-blockchain = { workspace = true, default-features = true } sp-consensus = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } diff --git a/substrate/test-utils/client/src/lib.rs b/substrate/test-utils/client/src/lib.rs index 5a4e6c911694..c07640653d56 100644 --- a/substrate/test-utils/client/src/lib.rs +++ b/substrate/test-utils/client/src/lib.rs @@ -27,7 +27,9 @@ pub use sc_client_db::{self, Backend, BlocksPruning}; pub use sc_executor::{self, WasmExecutionMethod, WasmExecutor}; pub use sc_service::{client, RpcHandlers}; pub use sp_consensus; -pub use sp_keyring::{Ed25519Keyring, Sr25519Keyring}; +pub use sp_keyring::{ + ed25519::Keyring as Ed25519Keyring, sr25519::Keyring as Sr25519Keyring, AccountKeyring, +}; pub use sp_keystore::{Keystore, KeystorePtr}; pub use sp_runtime::{Storage, StorageChild}; diff --git a/substrate/test-utils/runtime/Cargo.toml b/substrate/test-utils/runtime/Cargo.toml index 7af692b437f6..1c82c73072bc 100644 --- a/substrate/test-utils/runtime/Cargo.toml +++ b/substrate/test-utils/runtime/Cargo.toml @@ -16,43 +16,43 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { features = ["derive"], workspace = true } -frame-executive = { workspace = true } -frame-metadata-hash-extension = { workspace = true } -frame-support = { workspace = true } -frame-system = { workspace = true } -frame-system-rpc-runtime-api = { workspace = true } -pallet-babe = { workspace = true } -pallet-balances = { workspace = true } -pallet-timestamp = { workspace = true } -sc-service = { optional = true, workspace = true } -scale-info = { features = ["derive"], workspace = true } -sp-api = { workspace = true } sp-application-crypto = { features = ["serde"], workspace = true } -sp-block-builder = { workspace = true } sp-consensus-aura = { features = ["serde"], workspace = true } sp-consensus-babe = { features = ["serde"], workspace = true } -sp-consensus-grandpa = { features = ["serde"], workspace = true } -sp-core = { features = ["serde"], workspace = true } -sp-crypto-hashing = { workspace = true } -sp-externalities = { workspace = true } sp-genesis-builder = { workspace = true } +sp-block-builder = { workspace = true } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } sp-inherents = { workspace = true } -sp-io = { workspace = true } sp-keyring = { workspace = true } sp-offchain = { workspace = true } -sp-runtime = { features = ["serde"], workspace = true } +sp-core = { features = ["serde"], workspace = true } +sp-crypto-hashing = { workspace = true } +sp-io = { workspace = true } +frame-support = { workspace = true } +sp-version = { workspace = true } sp-session = { workspace = true } -sp-state-machine = { workspace = true } -sp-transaction-pool = { workspace = true } +sp-api = { workspace = true } +sp-runtime = { features = ["serde"], workspace = true } +pallet-babe = { workspace = true } +pallet-balances = { workspace = true } +frame-executive = { workspace = true } +frame-metadata-hash-extension = { workspace = true } +frame-system = { workspace = true } +frame-system-rpc-runtime-api = { workspace = true } +pallet-timestamp = { workspace = true } +sp-consensus-grandpa = { features = ["serde"], workspace = true } sp-trie = { workspace = true } -sp-version = { workspace = true } +sp-transaction-pool = { workspace = true } trie-db = { workspace = true } +sc-service = { features = ["test-helpers"], optional = true, workspace = true } +sp-state-machine = { workspace = true } +sp-externalities = { workspace = true } # 3rd party array-bytes = { optional = true, workspace = true, default-features = true } -log = { workspace = true } serde_json = { workspace = true, features = ["alloc"] } +log = { workspace = true } tracing = { workspace = true, default-features = false } [dev-dependencies] @@ -61,11 +61,11 @@ sc-block-builder = { workspace = true, default-features = true } sc-chain-spec = { workspace = true, default-features = true } sc-executor = { workspace = true, default-features = true } sc-executor-common = { workspace = true, default-features = true } -serde = { features = ["alloc", "derive"], workspace = true } -serde_json = { features = ["alloc"], workspace = true } sp-consensus = { workspace = true, default-features = true } -sp-tracing = { workspace = true, default-features = true } substrate-test-runtime-client = { workspace = true } +sp-tracing = { workspace = true, default-features = true } +serde = { features = ["alloc", "derive"], workspace = true } +serde_json = { features = ["alloc"], workspace = true } [build-dependencies] substrate-wasm-builder = { optional = true, features = ["metadata-hash"], workspace = true, default-features = true } diff --git a/substrate/test-utils/runtime/client/src/lib.rs b/substrate/test-utils/runtime/client/src/lib.rs index a5a37660660c..435f3f5ebacb 100644 --- a/substrate/test-utils/runtime/client/src/lib.rs +++ b/substrate/test-utils/runtime/client/src/lib.rs @@ -45,7 +45,7 @@ pub mod prelude { Backend, ExecutorDispatch, TestClient, TestClientBuilder, WasmExecutionMethod, }; // Keyring - pub use super::Sr25519Keyring; + pub use super::{AccountKeyring, Sr25519Keyring}; } /// Test client database backend. diff --git a/substrate/test-utils/runtime/client/src/trait_tests.rs b/substrate/test-utils/runtime/client/src/trait_tests.rs index 815e05163281..c3a5f173d14e 100644 --- a/substrate/test-utils/runtime/client/src/trait_tests.rs +++ b/substrate/test-utils/runtime/client/src/trait_tests.rs @@ -23,7 +23,7 @@ use std::sync::Arc; use crate::{ - BlockBuilderExt, ClientBlockImportExt, Sr25519Keyring, TestClientBuilder, TestClientBuilderExt, + AccountKeyring, BlockBuilderExt, ClientBlockImportExt, TestClientBuilder, TestClientBuilderExt, }; use futures::executor::block_on; use sc_block_builder::BlockBuilderBuilder; @@ -132,8 +132,8 @@ where // this push is required as otherwise B2 has the same hash as A2 and won't get imported builder .push_transfer(Transfer { - from: Sr25519Keyring::Alice.into(), - to: Sr25519Keyring::Ferdie.into(), + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), amount: 41, nonce: 0, }) @@ -179,8 +179,8 @@ where // this push is required as otherwise C3 has the same hash as B3 and won't get imported builder .push_transfer(Transfer { - from: Sr25519Keyring::Alice.into(), - to: Sr25519Keyring::Ferdie.into(), + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), amount: 1, nonce: 1, }) @@ -199,8 +199,8 @@ where // this push is required as otherwise D2 has the same hash as B2 and won't get imported builder .push_transfer(Transfer { - from: Sr25519Keyring::Alice.into(), - to: Sr25519Keyring::Ferdie.into(), + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), amount: 1, nonce: 0, }) @@ -295,8 +295,8 @@ where // this push is required as otherwise B2 has the same hash as A2 and won't get imported builder .push_transfer(Transfer { - from: Sr25519Keyring::Alice.into(), - to: Sr25519Keyring::Ferdie.into(), + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), amount: 41, nonce: 0, }) @@ -338,8 +338,8 @@ where // this push is required as otherwise C3 has the same hash as B3 and won't get imported builder .push_transfer(Transfer { - from: Sr25519Keyring::Alice.into(), - to: Sr25519Keyring::Ferdie.into(), + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), amount: 1, nonce: 1, }) @@ -357,8 +357,8 @@ where // this push is required as otherwise D2 has the same hash as B2 and won't get imported builder .push_transfer(Transfer { - from: Sr25519Keyring::Alice.into(), - to: Sr25519Keyring::Ferdie.into(), + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), amount: 1, nonce: 0, }) @@ -464,8 +464,8 @@ where // this push is required as otherwise B2 has the same hash as A2 and won't get imported builder .push_transfer(Transfer { - from: Sr25519Keyring::Alice.into(), - to: Sr25519Keyring::Ferdie.into(), + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), amount: 41, nonce: 0, }) @@ -507,8 +507,8 @@ where // this push is required as otherwise C3 has the same hash as B3 and won't get imported builder .push_transfer(Transfer { - from: Sr25519Keyring::Alice.into(), - to: Sr25519Keyring::Ferdie.into(), + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), amount: 1, nonce: 1, }) @@ -526,8 +526,8 @@ where // this push is required as otherwise D2 has the same hash as B2 and won't get imported builder .push_transfer(Transfer { - from: Sr25519Keyring::Alice.into(), - to: Sr25519Keyring::Ferdie.into(), + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), amount: 1, nonce: 0, }) diff --git a/substrate/test-utils/runtime/src/extrinsic.rs b/substrate/test-utils/runtime/src/extrinsic.rs index 491086bef497..8f94dd10a834 100644 --- a/substrate/test-utils/runtime/src/extrinsic.rs +++ b/substrate/test-utils/runtime/src/extrinsic.rs @@ -25,7 +25,7 @@ use codec::Encode; use frame_metadata_hash_extension::CheckMetadataHash; use frame_system::{CheckNonce, CheckWeight}; use sp_core::crypto::Pair as TraitPair; -use sp_keyring::Sr25519Keyring; +use sp_keyring::AccountKeyring; use sp_runtime::{ generic::Preamble, traits::TransactionExtension, transaction_validity::TransactionPriority, Perbill, @@ -54,8 +54,8 @@ impl Transfer { impl Default for TransferData { fn default() -> Self { Self { - from: Sr25519Keyring::Alice.into(), - to: Sr25519Keyring::Bob.into(), + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Bob.into(), amount: 0, nonce: 0, } @@ -69,7 +69,7 @@ impl TryFrom<&Extrinsic> for TransferData { match uxt { Extrinsic { function: RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest, value }), - preamble: Preamble::Signed(from, _, ((CheckNonce(nonce), ..), ..)), + preamble: Preamble::Signed(from, _, _, ((CheckNonce(nonce), ..), ..)), } => Ok(TransferData { from: *from, to: *dest, amount: *value, nonce: *nonce }), Extrinsic { function: RuntimeCall::SubstrateTest(PalletCall::bench_call { transfer }), @@ -93,7 +93,7 @@ impl ExtrinsicBuilder { pub fn new(function: impl Into) -> Self { Self { function: function.into(), - signer: Some(Sr25519Keyring::Alice.pair()), + signer: Some(AccountKeyring::Alice.pair()), nonce: None, metadata_hash: None, } diff --git a/substrate/test-utils/runtime/src/genesismap.rs b/substrate/test-utils/runtime/src/genesismap.rs index 5c0c146d45a5..9e972886b377 100644 --- a/substrate/test-utils/runtime/src/genesismap.rs +++ b/substrate/test-utils/runtime/src/genesismap.rs @@ -27,7 +27,7 @@ use sp_core::{ storage::{well_known_keys, StateVersion, Storage}, Pair, }; -use sp_keyring::Sr25519Keyring; +use sp_keyring::{AccountKeyring, Sr25519Keyring}; use sp_runtime::{ traits::{Block as BlockT, Hash as HashT, Header as HeaderT}, BuildStorage, @@ -60,11 +60,11 @@ impl Default for GenesisStorageBuilder { ], (0..16_usize) .into_iter() - .map(|i| Sr25519Keyring::numeric(i).public()) + .map(|i| AccountKeyring::numeric(i).public()) .chain(vec![ - Sr25519Keyring::Alice.into(), - Sr25519Keyring::Bob.into(), - Sr25519Keyring::Charlie.into(), + AccountKeyring::Alice.into(), + AccountKeyring::Bob.into(), + AccountKeyring::Charlie.into(), ]) .collect(), 1000 * currency::DOLLARS, diff --git a/substrate/test-utils/runtime/src/lib.rs b/substrate/test-utils/runtime/src/lib.rs index 666776865316..1314d9d6dd45 100644 --- a/substrate/test-utils/runtime/src/lib.rs +++ b/substrate/test-utils/runtime/src/lib.rs @@ -47,7 +47,7 @@ use frame_system::{ }; use scale_info::TypeInfo; use sp_application_crypto::Ss58Codec; -use sp_keyring::Sr25519Keyring; +use sp_keyring::AccountKeyring; use sp_application_crypto::{ecdsa, ed25519, sr25519, RuntimeAppPublic}; use sp_core::{OpaqueMetadata, RuntimeDebug}; @@ -292,7 +292,6 @@ impl sp_runtime::traits::TransactionExtension for CheckSubstrateCal _len: usize, _self_implicit: Self::Implicit, _inherited_implication: &impl Encode, - _source: TransactionSource, ) -> Result< (ValidTransaction, Self::Val, ::RuntimeOrigin), TransactionValidityError, @@ -731,8 +730,8 @@ impl_runtime_apis! { let patch = match name.as_ref() { "staging" => { let endowed_accounts: Vec = vec![ - Sr25519Keyring::Bob.public().into(), - Sr25519Keyring::Charlie.public().into(), + AccountKeyring::Bob.public().into(), + AccountKeyring::Charlie.public().into(), ]; json!({ @@ -741,8 +740,8 @@ impl_runtime_apis! { }, "substrateTest": { "authorities": [ - Sr25519Keyring::Alice.public().to_ss58check(), - Sr25519Keyring::Ferdie.public().to_ss58check() + AccountKeyring::Alice.public().to_ss58check(), + AccountKeyring::Ferdie.public().to_ss58check() ], } }) @@ -911,11 +910,11 @@ pub mod storage_key_generator { let balances_map_keys = (0..16_usize) .into_iter() - .map(|i| Sr25519Keyring::numeric(i).public().to_vec()) + .map(|i| AccountKeyring::numeric(i).public().to_vec()) .chain(vec![ - Sr25519Keyring::Alice.public().to_vec(), - Sr25519Keyring::Bob.public().to_vec(), - Sr25519Keyring::Charlie.public().to_vec(), + AccountKeyring::Alice.public().to_vec(), + AccountKeyring::Bob.public().to_vec(), + AccountKeyring::Charlie.public().to_vec(), ]) .map(|pubkey| { sp_crypto_hashing::blake2_128(&pubkey) @@ -1054,7 +1053,7 @@ mod tests { use sp_core::{storage::well_known_keys::HEAP_PAGES, traits::CallContext}; use sp_runtime::{ traits::{DispatchTransaction, Hash as _}, - transaction_validity::{InvalidTransaction, TransactionSource::External, ValidTransaction}, + transaction_validity::{InvalidTransaction, ValidTransaction}, }; use substrate_test_runtime_client::{ prelude::*, runtime::TestAPI, DefaultTestClientBuilderExt, TestClientBuilder, @@ -1133,8 +1132,8 @@ mod tests { pub fn new_test_ext() -> sp_io::TestExternalities { genesismap::GenesisStorageBuilder::new( - vec![Sr25519Keyring::One.public().into(), Sr25519Keyring::Two.public().into()], - vec![Sr25519Keyring::One.into(), Sr25519Keyring::Two.into()], + vec![AccountKeyring::One.public().into(), AccountKeyring::Two.public().into()], + vec![AccountKeyring::One.into(), AccountKeyring::Two.into()], 1000 * currency::DOLLARS, ) .build() @@ -1202,7 +1201,7 @@ mod tests { fn check_substrate_check_signed_extension_works() { sp_tracing::try_init_simple(); new_test_ext().execute_with(|| { - let x = Sr25519Keyring::Alice.into(); + let x = AccountKeyring::Alice.into(); let info = DispatchInfo::default(); let len = 0_usize; assert_eq!( @@ -1212,8 +1211,6 @@ mod tests { &ExtrinsicBuilder::new_call_with_priority(16).build().function, &info, len, - External, - 0, ) .unwrap() .0 @@ -1228,8 +1225,6 @@ mod tests { &ExtrinsicBuilder::new_call_do_not_propagate().build().function, &info, len, - External, - 0, ) .unwrap() .0 @@ -1472,8 +1467,8 @@ mod tests { }, "substrateTest": { "authorities": [ - Sr25519Keyring::Ferdie.public().to_ss58check(), - Sr25519Keyring::Alice.public().to_ss58check() + AccountKeyring::Ferdie.public().to_ss58check(), + AccountKeyring::Alice.public().to_ss58check() ], } }); @@ -1502,8 +1497,8 @@ mod tests { let authority_key_vec = Vec::::decode(&mut &value[..]).unwrap(); assert_eq!(authority_key_vec.len(), 2); - assert_eq!(authority_key_vec[0], Sr25519Keyring::Ferdie.public()); - assert_eq!(authority_key_vec[1], Sr25519Keyring::Alice.public()); + assert_eq!(authority_key_vec[0], AccountKeyring::Ferdie.public()); + assert_eq!(authority_key_vec[1], AccountKeyring::Alice.public()); //Babe|Authorities let value: Vec = get_from_storage( diff --git a/substrate/test-utils/runtime/transaction-pool/Cargo.toml b/substrate/test-utils/runtime/transaction-pool/Cargo.toml index 501c9f99ebf1..3cdaea642263 100644 --- a/substrate/test-utils/runtime/transaction-pool/Cargo.toml +++ b/substrate/test-utils/runtime/transaction-pool/Cargo.toml @@ -19,9 +19,9 @@ codec = { workspace = true, default-features = true } futures = { workspace = true } log = { workspace = true } parking_lot = { workspace = true, default-features = true } +thiserror = { workspace = true } sc-transaction-pool = { workspace = true, default-features = true } sc-transaction-pool-api = { workspace = true, default-features = true } sp-blockchain = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } substrate-test-runtime-client = { workspace = true } -thiserror = { workspace = true } diff --git a/substrate/test-utils/runtime/transaction-pool/src/lib.rs b/substrate/test-utils/runtime/transaction-pool/src/lib.rs index 93e5855eefc6..6a4f38f63e82 100644 --- a/substrate/test-utils/runtime/transaction-pool/src/lib.rs +++ b/substrate/test-utils/runtime/transaction-pool/src/lib.rs @@ -43,7 +43,7 @@ use substrate_test_runtime_client::{ AccountId, Block, BlockNumber, Extrinsic, ExtrinsicBuilder, Hash, Header, Nonce, Transfer, TransferData, }, - Sr25519Keyring::{self, *}, + AccountKeyring::{self, *}, }; /// Error type used by [`TestApi`]. @@ -338,7 +338,7 @@ trait TagFrom { impl TagFrom for AccountId { fn tag_from(&self) -> u8 { - let f = Sr25519Keyring::iter().enumerate().find(|k| AccountId::from(k.1) == *self); + let f = AccountKeyring::iter().enumerate().find(|k| AccountId::from(k.1) == *self); u8::try_from(f.unwrap().0).unwrap() } } @@ -534,7 +534,7 @@ impl sp_blockchain::HeaderMetadata for TestApi { /// Generate transfer extrinsic with a given nonce. /// /// Part of the test api. -pub fn uxt(who: Sr25519Keyring, nonce: Nonce) -> Extrinsic { +pub fn uxt(who: AccountKeyring, nonce: Nonce) -> Extrinsic { let dummy = codec::Decode::decode(&mut TrailingZeroInput::zeroes()).unwrap(); let transfer = Transfer { from: who.into(), to: dummy, nonce, amount: 1 }; ExtrinsicBuilder::new_transfer(transfer).build() diff --git a/substrate/utils/binary-merkle-tree/Cargo.toml b/substrate/utils/binary-merkle-tree/Cargo.toml index 86d64face80e..9577d94ef0bf 100644 --- a/substrate/utils/binary-merkle-tree/Cargo.toml +++ b/substrate/utils/binary-merkle-tree/Cargo.toml @@ -12,16 +12,16 @@ homepage.workspace = true workspace = true [dependencies] -array-bytes = { optional = true, workspace = true, default-features = true } codec = { workspace = true, features = ["derive"] } -hash-db = { workspace = true } +array-bytes = { optional = true, workspace = true, default-features = true } log = { optional = true, workspace = true } +hash-db = { workspace = true } [dev-dependencies] array-bytes = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } -sp-tracing = { workspace = true, default-features = true } [features] debug = ["array-bytes", "log"] diff --git a/substrate/utils/frame/benchmarking-cli/Cargo.toml b/substrate/utils/frame/benchmarking-cli/Cargo.toml index c38a7e4f77d8..8a4a06b1b40a 100644 --- a/substrate/utils/frame/benchmarking-cli/Cargo.toml +++ b/substrate/utils/frame/benchmarking-cli/Cargo.toml @@ -16,27 +16,25 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -Inflector = { workspace = true } array-bytes = { workspace = true, default-features = true } chrono = { workspace = true } clap = { features = ["derive"], workspace = true } codec = { workspace = true, default-features = true } comfy-table = { workspace = true } -cumulus-client-parachain-inherent = { workspace = true, default-features = true } -cumulus-primitives-proof-size-hostfunction = { workspace = true, default-features = true } -frame-benchmarking = { workspace = true, default-features = true } -frame-support = { workspace = true, default-features = true } -frame-system = { workspace = true, default-features = true } -gethostname = { workspace = true } handlebars = { workspace = true } -hex = { workspace = true, default-features = true } +Inflector = { workspace = true } itertools = { workspace = true } linked-hash-map = { workspace = true } log = { workspace = true, default-features = true } -polkadot-parachain-primitives = { workspace = true, default-features = true } -polkadot-primitives = { workspace = true, default-features = true } rand = { features = ["small_rng"], workspace = true, default-features = true } rand_pcg = { workspace = true } +serde = { workspace = true, default-features = true } +serde_json = { workspace = true, default-features = true } +thiserror = { workspace = true } +thousands = { workspace = true } +frame-benchmarking = { workspace = true, default-features = true } +frame-support = { workspace = true, default-features = true } +frame-system = { workspace = true, default-features = true } sc-block-builder = { workspace = true, default-features = true } sc-chain-spec = { workspace = true } sc-cli = { workspace = true } @@ -44,34 +42,35 @@ sc-client-api = { workspace = true, default-features = true } sc-client-db = { workspace = true } sc-executor = { workspace = true, default-features = true } sc-executor-common = { workspace = true } -sc-runtime-utilities = { workspace = true, default-features = true } sc-service = { workspace = true } sc-sysinfo = { workspace = true, default-features = true } -serde = { workspace = true, default-features = true } -serde_json = { workspace = true, default-features = true } sp-api = { workspace = true, default-features = true } -sp-block-builder = { workspace = true, default-features = true } sp-blockchain = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } -sp-crypto-hashing = { workspace = true, default-features = true } sp-database = { workspace = true, default-features = true } sp-externalities = { workspace = true, default-features = true } sp-genesis-builder = { workspace = true, default-features = true } sp-inherents = { workspace = true, default-features = true } -sp-io = { workspace = true, default-features = true } sp-keystore = { workspace = true, default-features = true } +sp-crypto-hashing = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } sp-state-machine = { workspace = true, default-features = true } sp-storage = { workspace = true, default-features = true } -sp-timestamp = { workspace = true, default-features = true } -sp-transaction-pool = { workspace = true, default-features = true } sp-trie = { workspace = true, default-features = true } +sp-block-builder = { workspace = true, default-features = true } +sp-transaction-pool = { workspace = true, default-features = true } sp-version = { workspace = true, default-features = true } +sp-timestamp = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } sp-wasm-interface = { workspace = true, default-features = true } subxt = { workspace = true, features = ["native"] } subxt-signer = { workspace = true, features = ["unstable-eth"] } -thiserror = { workspace = true } -thousands = { workspace = true } +cumulus-primitives-proof-size-hostfunction = { workspace = true, default-features = true } +cumulus-client-parachain-inherent = { workspace = true, default-features = true } +polkadot-parachain-primitives = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } +gethostname = { workspace = true } +hex = { workspace = true, default-features = true } [dev-dependencies] cumulus-test-runtime = { workspace = true, default-features = true } diff --git a/substrate/utils/frame/benchmarking-cli/src/lib.rs b/substrate/utils/frame/benchmarking-cli/src/lib.rs index e1c3c5fe3706..1e8642e54d70 100644 --- a/substrate/utils/frame/benchmarking-cli/src/lib.rs +++ b/substrate/utils/frame/benchmarking-cli/src/lib.rs @@ -30,6 +30,7 @@ pub use extrinsic::{ExtrinsicBuilder, ExtrinsicCmd, ExtrinsicFactory}; pub use machine::{MachineCmd, SUBSTRATE_REFERENCE_HARDWARE}; pub use overhead::{ remark_builder::{DynamicRemarkBuilder, SubstrateRemarkBuilder}, + runtime_utilities::fetch_latest_metadata_from_code_blob, OpaqueBlock, OverheadCmd, }; pub use pallet::PalletCmd; diff --git a/substrate/utils/frame/benchmarking-cli/src/overhead/command.rs b/substrate/utils/frame/benchmarking-cli/src/overhead/command.rs index 8df8ee5464f7..8102f14b4f4b 100644 --- a/substrate/utils/frame/benchmarking-cli/src/overhead/command.rs +++ b/substrate/utils/frame/benchmarking-cli/src/overhead/command.rs @@ -18,6 +18,7 @@ //! Contains the [`OverheadCmd`] as entry point for the CLI to execute //! the *overhead* benchmarks. +use super::runtime_utilities::*; use crate::{ extrinsic::{ bench::{Benchmark, BenchmarkParams as ExtrinsicBenchmarkParams}, @@ -36,7 +37,7 @@ use crate::{ }, }; use clap::{error::ErrorKind, Args, CommandFactory, Parser}; -use codec::{Decode, Encode}; +use codec::Encode; use cumulus_client_parachain_inherent::MockValidationDataInherentDataProvider; use fake_runtime_api::RuntimeApi as FakeRuntimeApi; use frame_support::Deserialize; @@ -49,7 +50,6 @@ use sc_cli::{CliConfiguration, Database, ImportParams, Result, SharedParams}; use sc_client_api::{execution_extensions::ExecutionExtensions, UsageProvider}; use sc_client_db::{BlocksPruning, DatabaseSettings}; use sc_executor::WasmExecutor; -use sc_runtime_utilities::fetch_latest_metadata_from_code_blob; use sc_service::{new_client, new_db_backend, BasePath, ClientConfig, TFullClient, TaskManager}; use serde::Serialize; use serde_json::{json, Value}; @@ -317,7 +317,7 @@ impl OverheadCmd { Some(self.params.genesis_builder_preset.clone()), ), self.params.para_id, - )); + )) }; Err("Neither a runtime nor a chain-spec were specified".to_string().into()) @@ -335,7 +335,7 @@ impl OverheadCmd { ErrorKind::MissingRequiredArgument, "Provide either a runtime via `--runtime` or a chain spec via `--chain`" .to_string(), - )); + )) } match self.params.genesis_builder { @@ -344,7 +344,7 @@ impl OverheadCmd { return Err(( ErrorKind::MissingRequiredArgument, "Provide a chain spec via `--chain`.".to_string(), - )); + )) }, _ => {}, }; @@ -400,12 +400,8 @@ impl OverheadCmd { .with_allow_missing_host_functions(true) .build(); - let opaque_metadata = - fetch_latest_metadata_from_code_blob(&executor, state_handler.get_code_bytes()?) - .map_err(|_| { - <&str as Into>::into("Unable to fetch latest stable metadata") - })?; - let metadata = subxt::Metadata::decode(&mut (*opaque_metadata).as_slice())?; + let metadata = + fetch_latest_metadata_from_code_blob(&executor, state_handler.get_code_bytes()?)?; // At this point we know what kind of chain we are dealing with. let chain_type = identify_chain(&metadata, para_id); @@ -686,7 +682,6 @@ mod tests { OverheadCmd, }; use clap::Parser; - use codec::Decode; use sc_executor::WasmExecutor; #[test] @@ -695,9 +690,8 @@ mod tests { let code_bytes = westend_runtime::WASM_BINARY .expect("To run this test, build the wasm binary of westend-runtime") .to_vec(); - let opaque_metadata = + let metadata = super::fetch_latest_metadata_from_code_blob(&executor, code_bytes.into()).unwrap(); - let metadata = subxt::Metadata::decode(&mut (*opaque_metadata).as_slice()).unwrap(); let chain_type = identify_chain(&metadata, None); assert_eq!(chain_type, ChainType::Relaychain); assert_eq!(chain_type.requires_proof_recording(), false); @@ -709,9 +703,8 @@ mod tests { let code_bytes = cumulus_test_runtime::WASM_BINARY .expect("To run this test, build the wasm binary of cumulus-test-runtime") .to_vec(); - let opaque_metadata = + let metadata = super::fetch_latest_metadata_from_code_blob(&executor, code_bytes.into()).unwrap(); - let metadata = subxt::Metadata::decode(&mut (*opaque_metadata).as_slice()).unwrap(); let chain_type = identify_chain(&metadata, Some(100)); assert_eq!(chain_type, ChainType::Parachain(100)); assert!(chain_type.requires_proof_recording()); @@ -724,9 +717,8 @@ mod tests { let code_bytes = substrate_test_runtime::WASM_BINARY .expect("To run this test, build the wasm binary of substrate-test-runtime") .to_vec(); - let opaque_metadata = + let metadata = super::fetch_latest_metadata_from_code_blob(&executor, code_bytes.into()).unwrap(); - let metadata = subxt::Metadata::decode(&mut (*opaque_metadata).as_slice()).unwrap(); let chain_type = identify_chain(&metadata, None); assert_eq!(chain_type, ChainType::Unknown); assert_eq!(chain_type.requires_proof_recording(), false); diff --git a/substrate/utils/frame/benchmarking-cli/src/overhead/mod.rs b/substrate/utils/frame/benchmarking-cli/src/overhead/mod.rs index de524d9ebc18..89c23d1fb6c1 100644 --- a/substrate/utils/frame/benchmarking-cli/src/overhead/mod.rs +++ b/substrate/utils/frame/benchmarking-cli/src/overhead/mod.rs @@ -20,5 +20,6 @@ pub mod template; mod fake_runtime_api; pub mod remark_builder; +pub mod runtime_utilities; pub use command::{OpaqueBlock, OverheadCmd}; diff --git a/substrate/utils/frame/benchmarking-cli/src/overhead/remark_builder.rs b/substrate/utils/frame/benchmarking-cli/src/overhead/remark_builder.rs index 3a2d8776d1e1..a1d5f282d9f8 100644 --- a/substrate/utils/frame/benchmarking-cli/src/overhead/remark_builder.rs +++ b/substrate/utils/frame/benchmarking-cli/src/overhead/remark_builder.rs @@ -54,15 +54,13 @@ impl> DynamicRemarkBuilder { log::debug!("Found metadata API version {}.", metadata_api_version); let opaque_metadata = if metadata_api_version > 1 { - let Ok(supported_metadata_versions) = api.metadata_versions(genesis) else { + let Ok(mut supported_metadata_versions) = api.metadata_versions(genesis) else { return Err("Unable to fetch metadata versions".to_string().into()); }; let latest = supported_metadata_versions - .into_iter() - .filter(|v| *v != u32::MAX) - .max() - .ok_or("No stable metadata versions supported".to_string())?; + .pop() + .ok_or("No metadata version supported".to_string())?; api.metadata_at_version(genesis, latest) .map_err(|e| format!("Unable to fetch metadata: {:?}", e))? diff --git a/substrate/client/runtime-utilities/src/lib.rs b/substrate/utils/frame/benchmarking-cli/src/overhead/runtime_utilities.rs similarity index 56% rename from substrate/client/runtime-utilities/src/lib.rs rename to substrate/utils/frame/benchmarking-cli/src/overhead/runtime_utilities.rs index 1ae3e2f1105a..c498da38afb0 100644 --- a/substrate/client/runtime-utilities/src/lib.rs +++ b/substrate/utils/frame/benchmarking-cli/src/overhead/runtime_utilities.rs @@ -1,29 +1,21 @@ // This file is part of Substrate. // Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! Substrate client runtime utilities. -//! -//! Provides convenient APIs to ease calling functions contained by a FRAME -//! runtime WASM blob. -#![warn(missing_docs)] +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. use codec::{Decode, Encode}; -use error::{Error, Result}; use sc_executor::WasmExecutor; use sp_core::{ traits::{CallContext, CodeExecutor, FetchRuntimeCode, RuntimeCode}, @@ -33,35 +25,36 @@ use sp_state_machine::BasicExternalities; use sp_wasm_interface::HostFunctions; use std::borrow::Cow; -pub mod error; - /// Fetches the latest metadata from the given runtime blob. pub fn fetch_latest_metadata_from_code_blob( executor: &WasmExecutor, code_bytes: Cow<[u8]>, -) -> Result { +) -> sc_cli::Result { let runtime_caller = RuntimeCaller::new(executor, code_bytes); let version_result = runtime_caller.call("Metadata_metadata_versions", ()); - match version_result { + let opaque_metadata: OpaqueMetadata = match version_result { Ok(supported_versions) => { - let supported_versions = Vec::::decode(&mut supported_versions.as_slice())?; - let latest_stable = supported_versions - .into_iter() - .filter(|v| *v != u32::MAX) - .max() - .ok_or(Error::StableMetadataVersionNotFound)?; - - let encoded = runtime_caller.call("Metadata_metadata_at_version", latest_stable)?; - + let latest_version = Vec::::decode(&mut supported_versions.as_slice()) + .map_err(|e| format!("Unable to decode version list: {e}"))? + .pop() + .ok_or("No metadata versions supported".to_string())?; + + let encoded = runtime_caller + .call("Metadata_metadata_at_version", latest_version) + .map_err(|_| "Unable to fetch metadata from blob".to_string())?; Option::::decode(&mut encoded.as_slice())? - .ok_or(Error::OpaqueMetadataNotFound) + .ok_or_else(|| "Metadata not found".to_string())? }, Err(_) => { - let encoded = runtime_caller.call("Metadata_metadata", ())?; - Decode::decode(&mut encoded.as_slice()).map_err(Into::into) + let encoded = runtime_caller + .call("Metadata_metadata", ()) + .map_err(|_| "Unable to fetch metadata from blob".to_string())?; + Decode::decode(&mut encoded.as_slice())? }, - } + }; + + Ok(subxt::Metadata::decode(&mut (*opaque_metadata).as_slice())?) } struct BasicCodeFetcher<'a> { @@ -76,11 +69,11 @@ impl<'a> FetchRuntimeCode for BasicCodeFetcher<'a> { } impl<'a> BasicCodeFetcher<'a> { - fn new(code: Cow<'a, [u8]>) -> Self { + pub fn new(code: Cow<'a, [u8]>) -> Self { Self { hash: sp_crypto_hashing::blake2_256(&code).to_vec(), code } } - fn runtime_code(&'a self) -> RuntimeCode<'a> { + pub fn runtime_code(&'a self) -> RuntimeCode<'a> { RuntimeCode { code_fetcher: self as &'a dyn FetchRuntimeCode, heap_pages: None, @@ -90,20 +83,17 @@ impl<'a> BasicCodeFetcher<'a> { } /// Simple utility that is used to call into the runtime. -pub struct RuntimeCaller<'a, 'b, HF: HostFunctions> { +struct RuntimeCaller<'a, 'b, HF: HostFunctions> { executor: &'b WasmExecutor, code_fetcher: BasicCodeFetcher<'a>, } impl<'a, 'b, HF: HostFunctions> RuntimeCaller<'a, 'b, HF> { - /// Instantiate a new runtime caller. pub fn new(executor: &'b WasmExecutor, code_bytes: Cow<'a, [u8]>) -> Self { Self { executor, code_fetcher: BasicCodeFetcher::new(code_bytes) } } - /// Calls a runtime function represented by a `method` name and `parity-scale-codec` - /// encodable arguments that will be passed to it. - pub fn call(&self, method: &str, data: impl Encode) -> Result> { + fn call(&self, method: &str, data: impl Encode) -> sc_executor_common::error::Result> { let mut ext = BasicExternalities::default(); self.executor .call( @@ -114,33 +104,24 @@ impl<'a, 'b, HF: HostFunctions> RuntimeCaller<'a, 'b, HF> { CallContext::Offchain, ) .0 - .map_err(Into::into) } } #[cfg(test)] mod tests { + use crate::overhead::command::ParachainHostFunctions; use codec::Decode; use sc_executor::WasmExecutor; use sp_version::RuntimeVersion; - type ParachainHostFunctions = ( - cumulus_primitives_proof_size_hostfunction::storage_proof_size::HostFunctions, - sp_io::SubstrateHostFunctions, - ); - #[test] fn test_fetch_latest_metadata_from_blob_fetches_metadata() { let executor: WasmExecutor = WasmExecutor::builder().build(); let code_bytes = cumulus_test_runtime::WASM_BINARY .expect("To run this test, build the wasm binary of cumulus-test-runtime") .to_vec(); - let metadata = subxt::Metadata::decode( - &mut (*super::fetch_latest_metadata_from_code_blob(&executor, code_bytes.into()) - .unwrap()) - .as_slice(), - ) - .unwrap(); + let metadata = + super::fetch_latest_metadata_from_code_blob(&executor, code_bytes.into()).unwrap(); assert!(metadata.pallet_by_name("ParachainInfo").is_some()); } diff --git a/substrate/utils/frame/benchmarking-cli/src/pallet/command.rs b/substrate/utils/frame/benchmarking-cli/src/pallet/command.rs index 0c068fc585ba..6f7e79f16384 100644 --- a/substrate/utils/frame/benchmarking-cli/src/pallet/command.rs +++ b/substrate/utils/frame/benchmarking-cli/src/pallet/command.rs @@ -96,7 +96,6 @@ pub(crate) type PovModesMap = #[derive(Debug, Clone)] struct SelectedBenchmark { pallet: String, - instance: String, extrinsic: String, components: Vec<(BenchmarkParameter, u32, u32)>, pov_modes: Vec<(String, String)>, @@ -153,7 +152,7 @@ fn combine_batches( } /// Explains possible reasons why the metadata for the benchmarking could not be found. -const ERROR_API_NOT_FOUND: &'static str = "Did not find the benchmarking runtime api. \ +const ERROR_METADATA_NOT_FOUND: &'static str = "Did not find the benchmarking metadata. \ This could mean that you either did not build the node correctly with the \ `--features runtime-benchmarks` flag, or the chain spec that you are using was \ not created by a node that was compiled with the flag"; @@ -307,33 +306,6 @@ impl PalletCmd { .with_runtime_cache_size(2) .build(); - let runtime_version: sp_version::RuntimeVersion = Self::exec_state_machine( - StateMachine::new( - state, - &mut Default::default(), - &executor, - "Core_version", - &[], - &mut Self::build_extensions(executor.clone(), state.recorder()), - &runtime_code, - CallContext::Offchain, - ), - "Could not find `Core::version` runtime api.", - )?; - - let benchmark_api_version = runtime_version - .api_version( - &, - sp_runtime::generic::UncheckedExtrinsic<(), (), (), ()>, - >, - > as sp_api::RuntimeApiInfo>::ID, - ) - .ok_or_else(|| ERROR_API_NOT_FOUND)?; - let (list, storage_info): (Vec, Vec) = Self::exec_state_machine( StateMachine::new( @@ -346,7 +318,7 @@ impl PalletCmd { &runtime_code, CallContext::Offchain, ), - ERROR_API_NOT_FOUND, + ERROR_METADATA_NOT_FOUND, )?; // Use the benchmark list and the user input to determine the set of benchmarks to run. @@ -366,7 +338,7 @@ impl PalletCmd { let pov_modes = Self::parse_pov_modes(&benchmarks_to_run)?; let mut failed = Vec::<(String, String)>::new(); - 'outer: for (i, SelectedBenchmark { pallet, instance, extrinsic, components, .. }) in + 'outer: for (i, SelectedBenchmark { pallet, extrinsic, components, .. }) in benchmarks_to_run.clone().into_iter().enumerate() { log::info!( @@ -420,31 +392,7 @@ impl PalletCmd { } all_components }; - for (s, selected_components) in all_components.iter().enumerate() { - let params = |verify: bool, repeats: u32| -> Vec { - if benchmark_api_version >= 2 { - ( - pallet.as_bytes(), - instance.as_bytes(), - extrinsic.as_bytes(), - &selected_components.clone(), - verify, - repeats, - ) - .encode() - } else { - ( - pallet.as_bytes(), - extrinsic.as_bytes(), - &selected_components.clone(), - verify, - repeats, - ) - .encode() - } - }; - // First we run a verification if !self.no_verify { let state = &state_without_tracking; @@ -459,7 +407,14 @@ impl PalletCmd { &mut Default::default(), &executor, "Benchmark_dispatch_benchmark", - ¶ms(true, 1), + &( + pallet.as_bytes(), + extrinsic.as_bytes(), + &selected_components.clone(), + true, // run verification code + 1, // no need to do internal repeats + ) + .encode(), &mut Self::build_extensions(executor.clone(), state.recorder()), &runtime_code, CallContext::Offchain, @@ -492,7 +447,14 @@ impl PalletCmd { &mut Default::default(), &executor, "Benchmark_dispatch_benchmark", - ¶ms(false, self.repeat), + &( + pallet.as_bytes(), + extrinsic.as_bytes(), + &selected_components.clone(), + false, // don't run verification code for final values + self.repeat, + ) + .encode(), &mut Self::build_extensions(executor.clone(), state.recorder()), &runtime_code, CallContext::Offchain, @@ -527,7 +489,14 @@ impl PalletCmd { &mut Default::default(), &executor, "Benchmark_dispatch_benchmark", - ¶ms(false, self.repeat), + &( + pallet.as_bytes(), + extrinsic.as_bytes(), + &selected_components.clone(), + false, // don't run verification code for final values + self.repeat, + ) + .encode(), &mut Self::build_extensions(executor.clone(), state.recorder()), &runtime_code, CallContext::Offchain, @@ -602,7 +571,6 @@ impl PalletCmd { { benchmarks_to_run.push(( item.pallet.clone(), - item.instance.clone(), benchmark.name.clone(), benchmark.components.clone(), benchmark.pov_modes.clone(), @@ -613,15 +581,13 @@ impl PalletCmd { // Convert `Vec` to `String` for better readability. let benchmarks_to_run: Vec<_> = benchmarks_to_run .into_iter() - .map(|(pallet, instance, extrinsic, components, pov_modes)| { - let pallet = String::from_utf8(pallet).expect("Encoded from String; qed"); - let instance = String::from_utf8(instance).expect("Encoded from String; qed"); + .map(|(pallet, extrinsic, components, pov_modes)| { + let pallet = String::from_utf8(pallet.clone()).expect("Encoded from String; qed"); let extrinsic = String::from_utf8(extrinsic.clone()).expect("Encoded from String; qed"); SelectedBenchmark { pallet, - instance, extrinsic, components, pov_modes: pov_modes diff --git a/substrate/utils/frame/generate-bags/Cargo.toml b/substrate/utils/frame/generate-bags/Cargo.toml index c03f85ece05d..c37c42646699 100644 --- a/substrate/utils/frame/generate-bags/Cargo.toml +++ b/substrate/utils/frame/generate-bags/Cargo.toml @@ -13,8 +13,8 @@ workspace = true [dependencies] # FRAME -frame-election-provider-support = { workspace = true, default-features = true } frame-support = { workspace = true, default-features = true } +frame-election-provider-support = { workspace = true, default-features = true } frame-system = { workspace = true, default-features = true } pallet-staking = { workspace = true, default-features = true } sp-staking = { workspace = true, default-features = true } diff --git a/substrate/utils/frame/generate-bags/node-runtime/Cargo.toml b/substrate/utils/frame/generate-bags/node-runtime/Cargo.toml index aace0f4ad23f..3d5748647257 100644 --- a/substrate/utils/frame/generate-bags/node-runtime/Cargo.toml +++ b/substrate/utils/frame/generate-bags/node-runtime/Cargo.toml @@ -13,8 +13,8 @@ publish = false workspace = true [dependencies] -generate-bags = { workspace = true, default-features = true } kitchensink-runtime = { workspace = true } +generate-bags = { workspace = true, default-features = true } # third-party clap = { features = ["derive"], workspace = true } diff --git a/substrate/utils/frame/omni-bencher/Cargo.toml b/substrate/utils/frame/omni-bencher/Cargo.toml index d0d7f1a3428f..345a7288d45b 100644 --- a/substrate/utils/frame/omni-bencher/Cargo.toml +++ b/substrate/utils/frame/omni-bencher/Cargo.toml @@ -15,16 +15,16 @@ workspace = true clap = { features = ["derive"], workspace = true } cumulus-primitives-proof-size-hostfunction = { workspace = true, default-features = true } frame-benchmarking-cli = { workspace = true } -log = { workspace = true } sc-cli = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } sp-statement-store = { workspace = true, default-features = true } tracing-subscriber = { workspace = true } +log = { workspace = true } [dev-dependencies] +tempfile = { workspace = true } assert_cmd = { workspace = true } cumulus-test-runtime = { workspace = true } -sc-chain-spec = { workspace = true } -sp-genesis-builder = { workspace = true, default-features = true } sp-tracing = { workspace = true, default-features = true } -tempfile = { workspace = true } +sp-genesis-builder = { workspace = true, default-features = true } +sc-chain-spec = { workspace = true } diff --git a/substrate/utils/frame/omni-bencher/src/main.rs b/substrate/utils/frame/omni-bencher/src/main.rs index f0f9ab753b07..7d8aa891dc4a 100644 --- a/substrate/utils/frame/omni-bencher/src/main.rs +++ b/substrate/utils/frame/omni-bencher/src/main.rs @@ -24,6 +24,8 @@ use tracing_subscriber::EnvFilter; fn main() -> Result<()> { setup_logger(); + log::warn!("The FRAME omni-bencher is not yet battle tested - double check the results.",); + command::Command::parse().run() } diff --git a/substrate/utils/frame/remote-externalities/Cargo.toml b/substrate/utils/frame/remote-externalities/Cargo.toml index 4ed0e1edf3e4..41a0091027c1 100644 --- a/substrate/utils/frame/remote-externalities/Cargo.toml +++ b/substrate/utils/frame/remote-externalities/Cargo.toml @@ -15,20 +15,20 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { workspace = true, default-features = true } -futures = { workspace = true } -indicatif = { workspace = true } jsonrpsee = { features = ["http-client"], workspace = true } +codec = { workspace = true, default-features = true } log = { workspace = true, default-features = true } serde = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } sp-crypto-hashing = { workspace = true, default-features = true } +sp-state-machine = { workspace = true, default-features = true } sp-io = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } -sp-state-machine = { workspace = true, default-features = true } -spinners = { workspace = true } -substrate-rpc-client = { workspace = true, default-features = true } tokio = { features = ["macros", "rt-multi-thread"], workspace = true, default-features = true } +substrate-rpc-client = { workspace = true, default-features = true } +futures = { workspace = true } +indicatif = { workspace = true } +spinners = { workspace = true } tokio-retry = { workspace = true } [dev-dependencies] diff --git a/substrate/utils/frame/remote-externalities/src/lib.rs b/substrate/utils/frame/remote-externalities/src/lib.rs index 4c49663260bb..75a2ac2aef41 100644 --- a/substrate/utils/frame/remote-externalities/src/lib.rs +++ b/substrate/utils/frame/remote-externalities/src/lib.rs @@ -20,8 +20,6 @@ //! An equivalent of `sp_io::TestExternalities` that can load its state from a remote substrate //! based chain, or a local state snapshot file. -mod logging; - use codec::{Compact, Decode, Encode}; use indicatif::{ProgressBar, ProgressStyle}; use jsonrpsee::{core::params::ArrayParams, http_client::HttpClient}; @@ -39,6 +37,7 @@ use sp_runtime::{ StateVersion, }; use sp_state_machine::TestExternalities; +use spinners::{Spinner, Spinners}; use std::{ cmp::{max, min}, fs, @@ -50,8 +49,6 @@ use std::{ use substrate_rpc_client::{rpc_params, BatchRequestBuilder, ChainApi, ClientT, StateApi}; use tokio_retry::{strategy::FixedInterval, Retry}; -type Result = std::result::Result; - type KeyValue = (StorageKey, StorageData); type TopKeyValues = Vec; type ChildKeyValues = Vec<(ChildInfo, Vec)>; @@ -90,7 +87,7 @@ impl Snapshot { } } - fn load(path: &PathBuf) -> Result> { + fn load(path: &PathBuf) -> Result, &'static str> { let bytes = fs::read(path).map_err(|_| "fs::read failed.")?; // The first item in the SCALE encoded struct bytes is the snapshot version. We decode and // check that first, before proceeding to decode the rest of the snapshot. @@ -171,9 +168,9 @@ impl Transport { } // Build an HttpClient from a URI. - async fn init(&mut self) -> Result<()> { + async fn init(&mut self) -> Result<(), &'static str> { if let Self::Uri(uri) = self { - debug!(target: LOG_TARGET, "initializing remote client to {uri:?}"); + log::debug!(target: LOG_TARGET, "initializing remote client to {:?}", uri); // If we have a ws uri, try to convert it to an http uri. // We use an HTTP client rather than WS because WS starts to choke with "accumulated @@ -181,11 +178,11 @@ impl Transport { // from a node running a default configuration. let uri = if uri.starts_with("ws://") { let uri = uri.replace("ws://", "http://"); - info!(target: LOG_TARGET, "replacing ws:// in uri with http://: {uri:?} (ws is currently unstable for fetching remote storage, for more see https://github.com/paritytech/jsonrpsee/issues/1086)"); + log::info!(target: LOG_TARGET, "replacing ws:// in uri with http://: {:?} (ws is currently unstable for fetching remote storage, for more see https://github.com/paritytech/jsonrpsee/issues/1086)", uri); uri } else if uri.starts_with("wss://") { let uri = uri.replace("wss://", "https://"); - info!(target: LOG_TARGET, "replacing wss:// in uri with https://: {uri:?} (ws is currently unstable for fetching remote storage, for more see https://github.com/paritytech/jsonrpsee/issues/1086)"); + log::info!(target: LOG_TARGET, "replacing wss:// in uri with https://: {:?} (ws is currently unstable for fetching remote storage, for more see https://github.com/paritytech/jsonrpsee/issues/1086)", uri); uri } else { uri.clone() @@ -196,7 +193,7 @@ impl Transport { .request_timeout(std::time::Duration::from_secs(60 * 5)) .build(uri) .map_err(|e| { - error!(target: LOG_TARGET, "error: {e:?}"); + log::error!(target: LOG_TARGET, "error: {:?}", e); "failed to build http client" })?; @@ -367,23 +364,23 @@ where &self, key: StorageKey, maybe_at: Option, - ) -> Result> { + ) -> Result, &'static str> { trace!(target: LOG_TARGET, "rpc: get_storage"); self.as_online().rpc_client().storage(key, maybe_at).await.map_err(|e| { - error!(target: LOG_TARGET, "Error = {e:?}"); + error!(target: LOG_TARGET, "Error = {:?}", e); "rpc get_storage failed." }) } /// Get the latest finalized head. - async fn rpc_get_head(&self) -> Result { + async fn rpc_get_head(&self) -> Result { trace!(target: LOG_TARGET, "rpc: finalized_head"); // sadly this pretty much unreadable... ChainApi::<(), _, B::Header, ()>::finalized_head(self.as_online().rpc_client()) .await .map_err(|e| { - error!(target: LOG_TARGET, "Error = {e:?}"); + error!(target: LOG_TARGET, "Error = {:?}", e); "rpc finalized_head failed." }) } @@ -393,13 +390,13 @@ where prefix: Option, start_key: Option, at: B::Hash, - ) -> Result> { + ) -> Result, &'static str> { self.as_online() .rpc_client() .storage_keys_paged(prefix, Self::DEFAULT_KEY_DOWNLOAD_PAGE, start_key, Some(at)) .await .map_err(|e| { - error!(target: LOG_TARGET, "Error = {e:?}"); + error!(target: LOG_TARGET, "Error = {:?}", e); "rpc get_keys failed" }) } @@ -410,7 +407,7 @@ where prefix: &StorageKey, block: B::Hash, parallel: usize, - ) -> Result> { + ) -> Result, &'static str> { /// Divide the workload and return the start key of each chunks. Guaranteed to return a /// non-empty list. fn gen_start_keys(prefix: &StorageKey) -> Vec { @@ -494,7 +491,7 @@ where block: B::Hash, start_key: Option<&StorageKey>, end_key: Option<&StorageKey>, - ) -> Result> { + ) -> Result, &'static str> { let mut last_key: Option<&StorageKey> = start_key; let mut keys: Vec = vec![]; @@ -521,11 +518,11 @@ where // scraping out of range or no more matches, // we are done either way if page_len < Self::DEFAULT_KEY_DOWNLOAD_PAGE as usize { - debug!(target: LOG_TARGET, "last page received: {page_len}"); + log::debug!(target: LOG_TARGET, "last page received: {}", page_len); break } - debug!( + log::debug!( target: LOG_TARGET, "new total = {}, full page received: {}", keys.len(), @@ -592,10 +589,11 @@ where let total_payloads = payloads.len(); while start_index < total_payloads { - debug!( + log::debug!( target: LOG_TARGET, - "Remaining payloads: {} Batch request size: {batch_size}", + "Remaining payloads: {} Batch request size: {}", total_payloads - start_index, + batch_size, ); let end_index = usize::min(start_index + batch_size, total_payloads); @@ -622,16 +620,18 @@ where retries += 1; let failure_log = format!( - "Batch request failed ({retries}/{} retries). Error: {e}", - Self::MAX_RETRIES + "Batch request failed ({}/{} retries). Error: {}", + retries, + Self::MAX_RETRIES, + e ); // after 2 subsequent failures something very wrong is happening. log a warning // and reset the batch size down to 1. if retries >= 2 { - warn!("{failure_log}"); + log::warn!("{}", failure_log); batch_size = 1; } else { - debug!("{failure_log}"); + log::debug!("{}", failure_log); // Decrease batch size by DECREASE_FACTOR batch_size = (batch_size as f32 * Self::BATCH_SIZE_DECREASE_FACTOR) as usize; @@ -655,11 +655,13 @@ where ) }; - debug!( + log::debug!( target: LOG_TARGET, - "Request duration: {request_duration:?} Target duration: {:?} Last batch size: {} Next batch size: {batch_size}", + "Request duration: {:?} Target duration: {:?} Last batch size: {} Next batch size: {}", + request_duration, Self::REQUEST_DURATION_TARGET, end_index - start_index, + batch_size ); let batch_response_len = batch_response.len(); @@ -687,24 +689,21 @@ where prefix: StorageKey, at: B::Hash, pending_ext: &mut TestExternalities>, - ) -> Result> { - let keys = logging::with_elapsed_async( - || async { - // TODO: We could start downloading when having collected the first batch of keys. - // https://github.com/paritytech/polkadot-sdk/issues/2494 - let keys = self - .rpc_get_keys_parallel(&prefix, at, Self::PARALLEL_REQUESTS) - .await? - .into_iter() - .collect::>(); - - Ok(keys) - }, - "Scraping keys...", - |keys| format!("Found {} keys", keys.len()), - ) - .await?; - + ) -> Result, &'static str> { + let start = Instant::now(); + let mut sp = Spinner::with_timer(Spinners::Dots, "Scraping keys...".into()); + // TODO We could start downloading when having collected the first batch of keys + // https://github.com/paritytech/polkadot-sdk/issues/2494 + let keys = self + .rpc_get_keys_parallel(&prefix, at, Self::PARALLEL_REQUESTS) + .await? + .into_iter() + .collect::>(); + sp.stop_with_message(format!( + "✅ Found {} keys ({:.2}s)", + keys.len(), + start.elapsed().as_secs_f32() + )); if keys.is_empty() { return Ok(Default::default()) } @@ -736,7 +735,7 @@ where let storage_data = match storage_data_result { Ok(storage_data) => storage_data.into_iter().flatten().collect::>(), Err(e) => { - error!(target: LOG_TARGET, "Error while getting storage data: {e}"); + log::error!(target: LOG_TARGET, "Error while getting storage data: {}", e); return Err("Error while getting storage data") }, }; @@ -752,31 +751,27 @@ where .map(|(key, maybe_value)| match maybe_value { Some(data) => (key.clone(), data), None => { - warn!(target: LOG_TARGET, "key {key:?} had none corresponding value."); + log::warn!(target: LOG_TARGET, "key {:?} had none corresponding value.", &key); let data = StorageData(vec![]); (key.clone(), data) }, }) .collect::>(); - logging::with_elapsed( - || { - pending_ext.batch_insert(key_values.clone().into_iter().filter_map(|(k, v)| { - // Don't insert the child keys here, they need to be inserted separately with - // all their data in the load_child_remote function. - match is_default_child_storage_key(&k.0) { - true => None, - false => Some((k.0, v.0)), - } - })); - - Ok(()) - }, - "Inserting keys into DB...", - |_| "Inserted keys into DB".into(), - ) - .expect("must succeed; qed"); - + let mut sp = Spinner::with_timer(Spinners::Dots, "Inserting keys into DB...".into()); + let start = Instant::now(); + pending_ext.batch_insert(key_values.clone().into_iter().filter_map(|(k, v)| { + // Don't insert the child keys here, they need to be inserted separately with all their + // data in the load_child_remote function. + match is_default_child_storage_key(&k.0) { + true => None, + false => Some((k.0, v.0)), + } + })); + sp.stop_with_message(format!( + "✅ Inserted keys into DB ({:.2}s)", + start.elapsed().as_secs_f32() + )); Ok(key_values) } @@ -786,7 +781,7 @@ where prefixed_top_key: &StorageKey, child_keys: Vec, at: B::Hash, - ) -> Result> { + ) -> Result, &'static str> { let child_keys_len = child_keys.len(); let payloads = child_keys @@ -808,7 +803,7 @@ where match Self::get_storage_data_dynamic_batch_size(client, payloads, &bar).await { Ok(storage_data) => storage_data, Err(e) => { - error!(target: LOG_TARGET, "batch processing failed: {e:?}"); + log::error!(target: LOG_TARGET, "batch processing failed: {:?}", e); return Err("batch processing failed") }, }; @@ -821,7 +816,7 @@ where .map(|(key, maybe_value)| match maybe_value { Some(v) => (key.clone(), v), None => { - warn!(target: LOG_TARGET, "key {key:?} had no corresponding value."); + log::warn!(target: LOG_TARGET, "key {:?} had no corresponding value.", &key); (key.clone(), StorageData(vec![])) }, }) @@ -833,7 +828,7 @@ where prefixed_top_key: &StorageKey, child_prefix: StorageKey, at: B::Hash, - ) -> Result> { + ) -> Result, &'static str> { let retry_strategy = FixedInterval::new(Self::KEYS_PAGE_RETRY_INTERVAL).take(Self::MAX_RETRIES); let mut all_child_keys = Vec::new(); @@ -855,7 +850,7 @@ where let child_keys = Retry::spawn(retry_strategy.clone(), get_child_keys_closure) .await .map_err(|e| { - error!(target: LOG_TARGET, "Error = {e:?}"); + error!(target: LOG_TARGET, "Error = {:?}", e); "rpc child_get_keys failed." })?; @@ -901,7 +896,7 @@ where &self, top_kv: &[KeyValue], pending_ext: &mut TestExternalities>, - ) -> Result { + ) -> Result { let child_roots = top_kv .iter() .filter(|(k, _)| is_default_child_storage_key(k.as_ref())) @@ -909,7 +904,7 @@ where .collect::>(); if child_roots.is_empty() { - info!(target: LOG_TARGET, "👩‍👦 no child roots found to scrape"); + info!(target: LOG_TARGET, "👩‍👦 no child roots found to scrape",); return Ok(Default::default()) } @@ -935,7 +930,7 @@ where let un_prefixed = match ChildType::from_prefixed_key(&prefixed_top_key) { Some((ChildType::ParentKeyId, storage_key)) => storage_key, None => { - error!(target: LOG_TARGET, "invalid key: {prefixed_top_key:?}"); + log::error!(target: LOG_TARGET, "invalid key: {:?}", prefixed_top_key); return Err("Invalid child key") }, }; @@ -959,13 +954,13 @@ where async fn load_top_remote( &self, pending_ext: &mut TestExternalities>, - ) -> Result { + ) -> Result { let config = self.as_online(); let at = self .as_online() .at .expect("online config must be initialized by this point; qed."); - info!(target: LOG_TARGET, "scraping key-pairs from remote at block height {at:?}"); + log::info!(target: LOG_TARGET, "scraping key-pairs from remote at block height {:?}", at); let mut keys_and_values = Vec::new(); for prefix in &config.hashed_prefixes { @@ -973,7 +968,7 @@ where let additional_key_values = self.rpc_get_pairs(StorageKey(prefix.to_vec()), at, pending_ext).await?; let elapsed = now.elapsed(); - info!( + log::info!( target: LOG_TARGET, "adding data for hashed prefix: {:?}, took {:.2}s", HexDisplay::from(prefix), @@ -984,7 +979,7 @@ where for key in &config.hashed_keys { let key = StorageKey(key.to_vec()); - info!( + log::info!( target: LOG_TARGET, "adding data for hashed key: {:?}", HexDisplay::from(&key) @@ -995,7 +990,7 @@ where keys_and_values.push((key, value)); }, None => { - warn!( + log::warn!( target: LOG_TARGET, "no data found for hashed key: {:?}", HexDisplay::from(&key) @@ -1010,16 +1005,17 @@ where /// The entry point of execution, if `mode` is online. /// /// initializes the remote client in `transport`, and sets the `at` field, if not specified. - async fn init_remote_client(&mut self) -> Result<()> { + async fn init_remote_client(&mut self) -> Result<(), &'static str> { // First, initialize the http client. self.as_online_mut().transport.init().await?; // Then, if `at` is not set, set it. if self.as_online().at.is_none() { let at = self.rpc_get_head().await?; - info!( + log::info!( target: LOG_TARGET, - "since no at is provided, setting it to latest finalized head, {at:?}", + "since no at is provided, setting it to latest finalized head, {:?}", + at ); self.as_online_mut().at = Some(at); } @@ -1044,7 +1040,7 @@ where .filter(|p| *p != DEFAULT_CHILD_STORAGE_KEY_PREFIX) .count() == 0 { - info!( + log::info!( target: LOG_TARGET, "since no prefix is filtered, the data for all pallets will be downloaded" ); @@ -1054,7 +1050,7 @@ where Ok(()) } - async fn load_header(&self) -> Result { + async fn load_header(&self) -> Result { let retry_strategy = FixedInterval::new(Self::KEYS_PAGE_RETRY_INTERVAL).take(Self::MAX_RETRIES); let get_header_closure = || { @@ -1073,12 +1069,14 @@ where /// `load_child_remote`. /// /// Must be called after `init_remote_client`. - async fn load_remote_and_maybe_save(&mut self) -> Result>> { + async fn load_remote_and_maybe_save( + &mut self, + ) -> Result>, &'static str> { let state_version = StateApi::::runtime_version(self.as_online().rpc_client(), None) .await .map_err(|e| { - error!(target: LOG_TARGET, "Error = {e:?}"); + error!(target: LOG_TARGET, "Error = {:?}", e); "rpc runtime_version failed." }) .map(|v| v.state_version())?; @@ -1102,10 +1100,11 @@ where self.load_header().await?, ); let encoded = snapshot.encode(); - info!( + log::info!( target: LOG_TARGET, - "writing snapshot of {} bytes to {path:?}", + "writing snapshot of {} bytes to {:?}", encoded.len(), + path ); std::fs::write(path, encoded).map_err(|_| "fs::write failed")?; @@ -1120,35 +1119,33 @@ where Ok(pending_ext) } - async fn do_load_remote(&mut self) -> Result> { + async fn do_load_remote(&mut self) -> Result, &'static str> { self.init_remote_client().await?; let inner_ext = self.load_remote_and_maybe_save().await?; Ok(RemoteExternalities { header: self.load_header().await?, inner_ext }) } - fn do_load_offline(&mut self, config: OfflineConfig) -> Result> { - let (header, inner_ext) = logging::with_elapsed( - || { - info!(target: LOG_TARGET, "Loading snapshot from {:?}", &config.state_snapshot.path); - - let Snapshot { header, state_version, raw_storage, storage_root, .. } = - Snapshot::::load(&config.state_snapshot.path)?; - let inner_ext = TestExternalities::from_raw_snapshot( - raw_storage, - storage_root, - self.overwrite_state_version.unwrap_or(state_version), - ); - - Ok((header, inner_ext)) - }, - "Loading snapshot...", - |_| "Loaded snapshot".into(), - )?; + fn do_load_offline( + &mut self, + config: OfflineConfig, + ) -> Result, &'static str> { + let mut sp = Spinner::with_timer(Spinners::Dots, "Loading snapshot...".into()); + let start = Instant::now(); + info!(target: LOG_TARGET, "Loading snapshot from {:?}", &config.state_snapshot.path); + let Snapshot { snapshot_version: _, header, state_version, raw_storage, storage_root } = + Snapshot::::load(&config.state_snapshot.path)?; + + let inner_ext = TestExternalities::from_raw_snapshot( + raw_storage, + storage_root, + self.overwrite_state_version.unwrap_or(state_version), + ); + sp.stop_with_message(format!("✅ Loaded snapshot ({:.2}s)", start.elapsed().as_secs_f32())); Ok(RemoteExternalities { inner_ext, header }) } - pub(crate) async fn pre_build(mut self) -> Result> { + pub(crate) async fn pre_build(mut self) -> Result, &'static str> { let mut ext = match self.mode.clone() { Mode::Offline(config) => self.do_load_offline(config)?, Mode::Online(_) => self.do_load_remote().await?, @@ -1162,7 +1159,7 @@ where // inject manual key values. if !self.hashed_key_values.is_empty() { - info!( + log::info!( target: LOG_TARGET, "extending externalities with {} manually injected key-values", self.hashed_key_values.len() @@ -1172,7 +1169,7 @@ where // exclude manual key values. if !self.hashed_blacklist.is_empty() { - info!( + log::info!( target: LOG_TARGET, "excluding externalities from {} keys", self.hashed_blacklist.len() @@ -1224,7 +1221,7 @@ where self } - pub async fn build(self) -> Result> { + pub async fn build(self) -> Result, &'static str> { let mut ext = self.pre_build().await?; ext.commit_all().unwrap(); diff --git a/substrate/utils/frame/remote-externalities/src/logging.rs b/substrate/utils/frame/remote-externalities/src/logging.rs deleted file mode 100644 index 7ab901c004de..000000000000 --- a/substrate/utils/frame/remote-externalities/src/logging.rs +++ /dev/null @@ -1,86 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use std::{ - future::Future, - io::{self, IsTerminal}, - time::Instant, -}; - -use spinners::{Spinner, Spinners}; - -use super::Result; - -// A simple helper to time a operation with a nice spinner, start message, and end message. -// -// The spinner is only displayed when stdout is a terminal. -pub(super) fn with_elapsed(f: F, start_msg: &str, end_msg: EndMsg) -> Result -where - F: FnOnce() -> Result, - EndMsg: FnOnce(&R) -> String, -{ - let timer = Instant::now(); - let mut maybe_sp = start(start_msg); - - Ok(end(f()?, timer, maybe_sp.as_mut(), end_msg)) -} - -// A simple helper to time an async operation with a nice spinner, start message, and end message. -// -// The spinner is only displayed when stdout is a terminal. -pub(super) async fn with_elapsed_async( - f: F, - start_msg: &str, - end_msg: EndMsg, -) -> Result -where - F: FnOnce() -> Fut, - Fut: Future>, - EndMsg: FnOnce(&R) -> String, -{ - let timer = Instant::now(); - let mut maybe_sp = start(start_msg); - - Ok(end(f().await?, timer, maybe_sp.as_mut(), end_msg)) -} - -fn start(start_msg: &str) -> Option { - let msg = format!("⏳ {start_msg}"); - - if io::stdout().is_terminal() { - Some(Spinner::new(Spinners::Dots, msg)) - } else { - println!("{msg}"); - - None - } -} - -fn end(val: T, timer: Instant, maybe_sp: Option<&mut Spinner>, end_msg: EndMsg) -> T -where - EndMsg: FnOnce(&T) -> String, -{ - let msg = format!("✅ {} in {:.2}s", end_msg(&val), timer.elapsed().as_secs_f32()); - - if let Some(sp) = maybe_sp { - sp.stop_with_message(msg); - } else { - println!("{msg}"); - } - - val -} diff --git a/substrate/utils/frame/rpc/client/Cargo.toml b/substrate/utils/frame/rpc/client/Cargo.toml index 6282621e1c75..d26be3a13124 100644 --- a/substrate/utils/frame/rpc/client/Cargo.toml +++ b/substrate/utils/frame/rpc/client/Cargo.toml @@ -15,13 +15,13 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = { workspace = true } jsonrpsee = { features = ["ws-client"], workspace = true } -log = { workspace = true, default-features = true } sc-rpc-api = { workspace = true, default-features = true } +async-trait = { workspace = true } serde = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } +log = { workspace = true, default-features = true } [dev-dependencies] -sp-core = { workspace = true, default-features = true } tokio = { features = ["macros", "rt-multi-thread", "sync"], workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } diff --git a/substrate/utils/frame/rpc/support/Cargo.toml b/substrate/utils/frame/rpc/support/Cargo.toml index 45b2bc6fa9b3..82652c8fa262 100644 --- a/substrate/utils/frame/rpc/support/Cargo.toml +++ b/substrate/utils/frame/rpc/support/Cargo.toml @@ -16,16 +16,16 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { workspace = true, default-features = true } -frame-support = { workspace = true, default-features = true } jsonrpsee = { features = ["jsonrpsee-types"], workspace = true } -sc-rpc-api = { workspace = true, default-features = true } serde = { workspace = true, default-features = true } +frame-support = { workspace = true, default-features = true } +sc-rpc-api = { workspace = true, default-features = true } sp-storage = { workspace = true, default-features = true } [dev-dependencies] -frame-system = { workspace = true, default-features = true } -jsonrpsee = { features = ["jsonrpsee-types", "ws-client"], workspace = true } scale-info = { workspace = true, default-features = true } +jsonrpsee = { features = ["jsonrpsee-types", "ws-client"], workspace = true } +tokio = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } -tokio = { workspace = true, default-features = true } +frame-system = { workspace = true, default-features = true } diff --git a/substrate/utils/frame/rpc/system/Cargo.toml b/substrate/utils/frame/rpc/system/Cargo.toml index 68dfbb833c6f..5757a48498c7 100644 --- a/substrate/utils/frame/rpc/system/Cargo.toml +++ b/substrate/utils/frame/rpc/system/Cargo.toml @@ -16,16 +16,16 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] +futures = { workspace = true } codec = { workspace = true, default-features = true } docify = { workspace = true } -frame-system-rpc-runtime-api = { workspace = true, default-features = true } -futures = { workspace = true } jsonrpsee = { features = [ "client-core", "macros", "server-core", ], workspace = true } log = { workspace = true, default-features = true } +frame-system-rpc-runtime-api = { workspace = true, default-features = true } sc-rpc-api = { workspace = true, default-features = true } sc-transaction-pool-api = { workspace = true, default-features = true } sp-api = { workspace = true, default-features = true } @@ -35,8 +35,8 @@ sp-core = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } [dev-dependencies] -assert_matches = { workspace = true } sc-transaction-pool = { workspace = true, default-features = true } +tokio = { workspace = true, default-features = true } +assert_matches = { workspace = true } sp-tracing = { workspace = true, default-features = true } substrate-test-runtime-client = { workspace = true } -tokio = { workspace = true, default-features = true } diff --git a/substrate/utils/frame/rpc/system/src/lib.rs b/substrate/utils/frame/rpc/system/src/lib.rs index e1b3994c03dd..824c871a3562 100644 --- a/substrate/utils/frame/rpc/system/src/lib.rs +++ b/substrate/utils/frame/rpc/system/src/lib.rs @@ -224,7 +224,7 @@ mod tests { transaction_validity::{InvalidTransaction, TransactionValidityError}, ApplyExtrinsicResult, }; - use substrate_test_runtime_client::{runtime::Transfer, Sr25519Keyring}; + use substrate_test_runtime_client::{runtime::Transfer, AccountKeyring}; fn deny_unsafe() -> Extensions { let mut ext = Extensions::new(); @@ -256,8 +256,8 @@ mod tests { let source = sp_runtime::transaction_validity::TransactionSource::External; let new_transaction = |nonce: u64| { let t = Transfer { - from: Sr25519Keyring::Alice.into(), - to: Sr25519Keyring::Bob.into(), + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Bob.into(), amount: 5, nonce, }; @@ -273,7 +273,7 @@ mod tests { let accounts = System::new(client, pool); // when - let nonce = accounts.nonce(Sr25519Keyring::Alice.into()).await; + let nonce = accounts.nonce(AccountKeyring::Alice.into()).await; // then assert_eq!(nonce.unwrap(), 2); @@ -321,8 +321,8 @@ mod tests { let accounts = System::new(client, pool); let tx = Transfer { - from: Sr25519Keyring::Alice.into(), - to: Sr25519Keyring::Bob.into(), + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Bob.into(), amount: 5, nonce: 0, } @@ -357,8 +357,8 @@ mod tests { let accounts = System::new(client, pool); let tx = Transfer { - from: Sr25519Keyring::Alice.into(), - to: Sr25519Keyring::Bob.into(), + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Bob.into(), amount: 5, nonce: 100, } diff --git a/substrate/utils/prometheus/Cargo.toml b/substrate/utils/prometheus/Cargo.toml index b8dfd6fb2bee..9bdec3cb8183 100644 --- a/substrate/utils/prometheus/Cargo.toml +++ b/substrate/utils/prometheus/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] http-body-util = { workspace = true } hyper = { features = ["http1", "server"], workspace = true } -hyper-util = { features = ["server-auto", "server-graceful", "tokio"], workspace = true } +hyper-util = { features = ["server-auto", "tokio"], workspace = true } log = { workspace = true, default-features = true } prometheus = { workspace = true } thiserror = { workspace = true } diff --git a/substrate/utils/prometheus/src/lib.rs b/substrate/utils/prometheus/src/lib.rs index ae39cb4a7dd3..35597cad03d8 100644 --- a/substrate/utils/prometheus/src/lib.rs +++ b/substrate/utils/prometheus/src/lib.rs @@ -87,7 +87,7 @@ async fn request_metrics( /// to serve metrics. pub async fn init_prometheus(prometheus_addr: SocketAddr, registry: Registry) -> Result<(), Error> { let listener = tokio::net::TcpListener::bind(&prometheus_addr).await.map_err(|e| { - log::error!(target: "prometheus", "Error binding to '{prometheus_addr:?}': {e:?}"); + log::error!(target: "prometheus", "Error binding to '{:#?}': {:#?}", prometheus_addr, e); Error::PortInUse(prometheus_addr) })?; @@ -102,7 +102,6 @@ async fn init_prometheus_with_listener( log::info!(target: "prometheus", "〽️ Prometheus exporter started at {}", listener.local_addr()?); let server = hyper_util::server::conn::auto::Builder::new(hyper_util::rt::TokioExecutor::new()); - let graceful = hyper_util::server::graceful::GracefulShutdown::new(); loop { let io = match listener.accept().await { @@ -121,7 +120,6 @@ async fn init_prometheus_with_listener( hyper::service::service_fn(move |req| request_metrics(req, registry.clone())), ) .into_owned(); - let conn = graceful.watch(conn); tokio::spawn(async move { if let Err(err) = conn.await { diff --git a/substrate/utils/wasm-builder/Cargo.toml b/substrate/utils/wasm-builder/Cargo.toml index 6645dd1803bf..8f0e8a23e54a 100644 --- a/substrate/utils/wasm-builder/Cargo.toml +++ b/substrate/utils/wasm-builder/Cargo.toml @@ -18,28 +18,28 @@ targets = ["x86_64-unknown-linux-gnu"] build-helper = { workspace = true } cargo_metadata = { workspace = true } console = { workspace = true } -filetime = { workspace = true } -jobserver = { workspace = true } -parity-wasm = { workspace = true } -polkavm-linker = { workspace = true } -sp-maybe-compressed-blob = { workspace = true, default-features = true } strum = { features = ["derive"], workspace = true, default-features = true } tempfile = { workspace = true } toml = { workspace = true } walkdir = { workspace = true } +sp-maybe-compressed-blob = { workspace = true, default-features = true } +filetime = { workspace = true } wasm-opt = { workspace = true } +parity-wasm = { workspace = true } +polkavm-linker = { workspace = true } +jobserver = { workspace = true } # Dependencies required for the `metadata-hash` feature. -array-bytes = { optional = true, workspace = true, default-features = true } -codec = { optional = true, workspace = true, default-features = true } -frame-metadata = { features = ["current", "unstable"], optional = true, workspace = true, default-features = true } merkleized-metadata = { optional = true, workspace = true } sc-executor = { optional = true, workspace = true, default-features = true } -shlex = { workspace = true } sp-core = { optional = true, workspace = true, default-features = true } sp-io = { optional = true, workspace = true, default-features = true } -sp-tracing = { optional = true, workspace = true, default-features = true } sp-version = { optional = true, workspace = true, default-features = true } +frame-metadata = { features = ["current"], optional = true, workspace = true, default-features = true } +codec = { optional = true, workspace = true, default-features = true } +array-bytes = { optional = true, workspace = true, default-features = true } +sp-tracing = { optional = true, workspace = true, default-features = true } +shlex = { workspace = true } [features] # Enable support for generating the metadata hash. diff --git a/substrate/utils/wasm-builder/src/builder.rs b/substrate/utils/wasm-builder/src/builder.rs index 5bdc743eac31..a40aafe1d812 100644 --- a/substrate/utils/wasm-builder/src/builder.rs +++ b/substrate/utils/wasm-builder/src/builder.rs @@ -235,8 +235,7 @@ impl WasmBuilder { /// Build the WASM binary. pub fn build(mut self) { - let target = RuntimeTarget::new(); - + let target = crate::runtime_target(); if target == RuntimeTarget::Wasm { if self.export_heap_base { self.rust_flags.push("-Clink-arg=--export=__heap_base".into()); diff --git a/substrate/utils/wasm-builder/src/lib.rs b/substrate/utils/wasm-builder/src/lib.rs index ce90f492e08f..420ecd63e1dc 100644 --- a/substrate/utils/wasm-builder/src/lib.rs +++ b/substrate/utils/wasm-builder/src/lib.rs @@ -112,6 +112,7 @@ //! wasm32-unknown-unknown --toolchain nightly-2020-02-20`. use std::{ + collections::BTreeSet, env, fs, io::BufRead, path::{Path, PathBuf}, @@ -253,22 +254,26 @@ struct CargoCommand { program: String, args: Vec, version: Option, + target_list: Option>, } impl CargoCommand { fn new(program: &str) -> Self { let version = Self::extract_version(program, &[]); + let target_list = Self::extract_target_list(program, &[]); - CargoCommand { program: program.into(), args: Vec::new(), version } + CargoCommand { program: program.into(), args: Vec::new(), version, target_list } } fn new_with_args(program: &str, args: &[&str]) -> Self { let version = Self::extract_version(program, args); + let target_list = Self::extract_target_list(program, args); CargoCommand { program: program.into(), args: args.iter().map(ToString::to_string).collect(), version, + target_list, } } @@ -289,6 +294,23 @@ impl CargoCommand { Version::extract(&version) } + fn extract_target_list(program: &str, args: &[&str]) -> Option> { + // This is technically an unstable option, but we don't care because we only need this + // to build RISC-V runtimes, and those currently require a specific nightly toolchain + // anyway, so it's totally fine for this to fail in other cases. + let list = Command::new(program) + .args(args) + .args(&["rustc", "-Z", "unstable-options", "--print", "target-list"]) + // Make sure if we're called from within a `build.rs` the host toolchain won't override + // a rustup toolchain we've picked. + .env_remove("RUSTC") + .output() + .ok() + .and_then(|o| String::from_utf8(o.stdout).ok())?; + + Some(list.trim().split("\n").map(ToString::to_string).collect()) + } + /// Returns the version of this cargo command or `None` if it failed to extract the version. fn version(&self) -> Option { self.version @@ -304,10 +326,19 @@ impl CargoCommand { fn supports_substrate_runtime_env(&self, target: RuntimeTarget) -> bool { match target { RuntimeTarget::Wasm => self.supports_substrate_runtime_env_wasm(), - RuntimeTarget::Riscv => true, + RuntimeTarget::Riscv => self.supports_substrate_runtime_env_riscv(), } } + /// Check if the supplied cargo command supports our RISC-V runtime environment. + fn supports_substrate_runtime_env_riscv(&self) -> bool { + let Some(target_list) = self.target_list.as_ref() else { return false }; + // This is our custom target which currently doesn't exist on any upstream toolchain, + // so if it exists it's guaranteed to be our custom toolchain and have have everything + // we need, so any further version checks are unnecessary at this point. + target_list.contains("riscv32ema-unknown-none-elf") + } + /// Check if the supplied cargo command supports our Substrate wasm environment. /// /// This means that either the cargo version is at minimum 1.68.0 or this is a nightly cargo. @@ -378,6 +409,13 @@ fn get_bool_environment_variable(name: &str) -> Option { } } +/// Returns whether we need to also compile the standard library when compiling the runtime. +fn build_std_required() -> bool { + let default = runtime_target() == RuntimeTarget::Wasm; + + crate::get_bool_environment_variable(crate::WASM_BUILD_STD).unwrap_or(default) +} + #[derive(Copy, Clone, PartialEq, Eq)] enum RuntimeTarget { Wasm, @@ -385,55 +423,36 @@ enum RuntimeTarget { } impl RuntimeTarget { - /// Creates a new instance. - fn new() -> Self { - let Some(value) = env::var_os(RUNTIME_TARGET) else { - return Self::Wasm; - }; - - if value == "wasm" { - Self::Wasm - } else if value == "riscv" { - Self::Riscv - } else { - build_helper::warning!( - "RUNTIME_TARGET environment variable must be set to either \"wasm\" or \"riscv\"" - ); - std::process::exit(1); - } - } - - /// Figures out the target parameter value for rustc. - fn rustc_target(self) -> String { + fn rustc_target(self) -> &'static str { match self { - RuntimeTarget::Wasm => "wasm32-unknown-unknown".to_string(), - RuntimeTarget::Riscv => { - let path = polkavm_linker::target_json_32_path().expect("riscv not found"); - path.into_os_string().into_string().unwrap() - }, + RuntimeTarget::Wasm => "wasm32-unknown-unknown", + RuntimeTarget::Riscv => "riscv32ema-unknown-none-elf", } } - /// Figures out the target directory name used by cargo. - fn rustc_target_dir(self) -> &'static str { + fn build_subdirectory(self) -> &'static str { + // Keep the build directories separate so that when switching between + // the targets we won't trigger unnecessary rebuilds. match self { - RuntimeTarget::Wasm => "wasm32-unknown-unknown", - RuntimeTarget::Riscv => "riscv32emac-unknown-none-polkavm", + RuntimeTarget::Wasm => "wbuild", + RuntimeTarget::Riscv => "rbuild", } } +} - /// Figures out the build-std argument. - fn rustc_target_build_std(self) -> Option<&'static str> { - if !crate::get_bool_environment_variable(crate::WASM_BUILD_STD).unwrap_or(true) { - return None; - } - - // This is a nightly-only flag. - let arg = match self { - RuntimeTarget::Wasm => "build-std", - RuntimeTarget::Riscv => "build-std=core,alloc", - }; +fn runtime_target() -> RuntimeTarget { + let Some(value) = env::var_os(RUNTIME_TARGET) else { + return RuntimeTarget::Wasm; + }; - Some(arg) + if value == "wasm" { + RuntimeTarget::Wasm + } else if value == "riscv" { + RuntimeTarget::Riscv + } else { + build_helper::warning!( + "the '{RUNTIME_TARGET}' environment variable has an invalid value; it must be either 'wasm' or 'riscv'" + ); + std::process::exit(1); } } diff --git a/substrate/utils/wasm-builder/src/prerequisites.rs b/substrate/utils/wasm-builder/src/prerequisites.rs index 9abfd1725237..4de6b87f618d 100644 --- a/substrate/utils/wasm-builder/src/prerequisites.rs +++ b/substrate/utils/wasm-builder/src/prerequisites.rs @@ -196,14 +196,11 @@ fn check_wasm_toolchain_installed( error, colorize_aux_message(&"-".repeat(60)), )) - }; + } } let version = dummy_crate.get_rustc_version(); - - let target = RuntimeTarget::new(); - assert!(target == RuntimeTarget::Wasm); - if target.rustc_target_build_std().is_some() { + if crate::build_std_required() { if let Some(sysroot) = dummy_crate.get_sysroot() { let src_path = Path::new(sysroot.trim()).join("lib").join("rustlib").join("src").join("rust"); diff --git a/substrate/utils/wasm-builder/src/wasm_project.rs b/substrate/utils/wasm-builder/src/wasm_project.rs index 6530e4c22fb9..26edd2ea1f22 100644 --- a/substrate/utils/wasm-builder/src/wasm_project.rs +++ b/substrate/utils/wasm-builder/src/wasm_project.rs @@ -109,15 +109,6 @@ fn crate_metadata(cargo_manifest: &Path) -> Metadata { crate_metadata } -/// Keep the build directories separate so that when switching between the -/// targets we won't trigger unnecessary rebuilds. -fn build_subdirectory(target: RuntimeTarget) -> &'static str { - match target { - RuntimeTarget::Wasm => "wbuild", - RuntimeTarget::Riscv => "rbuild", - } -} - /// Creates the WASM project, compiles the WASM binary and compacts the WASM binary. /// /// # Returns @@ -134,7 +125,7 @@ pub(crate) fn create_and_compile( #[cfg(feature = "metadata-hash")] enable_metadata_hash: Option, ) -> (Option, WasmBinaryBloaty) { let runtime_workspace_root = get_wasm_workspace_root(); - let runtime_workspace = runtime_workspace_root.join(build_subdirectory(target)); + let runtime_workspace = runtime_workspace_root.join(target.build_subdirectory()); let crate_metadata = crate_metadata(orig_project_cargo_toml); @@ -779,7 +770,7 @@ impl BuildConfiguration { .collect::>() .iter() .rev() - .take_while(|c| c.as_os_str() != build_subdirectory(target)) + .take_while(|c| c.as_os_str() != target.build_subdirectory()) .last() .expect("We put the runtime project within a `target/.../[rw]build` path; qed") .as_os_str() @@ -850,7 +841,9 @@ fn build_bloaty_blob( "-C target-cpu=mvp -C target-feature=-sign-ext -C link-arg=--export-table ", ); }, - RuntimeTarget::Riscv => (), + RuntimeTarget::Riscv => { + rustflags.push_str("-C target-feature=+lui-addi-fusion -C relocation-model=pie -C link-arg=--emit-relocs -C link-arg=--unique "); + }, } rustflags.push_str(default_rustflags); @@ -914,9 +907,10 @@ fn build_bloaty_blob( // // So here we force the compiler to also compile the standard library crates for us // to make sure that they also only use the MVP features. - if let Some(arg) = target.rustc_target_build_std() { - build_cmd.arg("-Z").arg(arg); - + if crate::build_std_required() { + // Unfortunately this is still a nightly-only flag, but FWIW it is pretty widely used + // so it's unlikely to break without a replacement. + build_cmd.arg("-Z").arg("build-std"); if !cargo_cmd.supports_nightly_features() { build_cmd.env("RUSTC_BOOTSTRAP", "1"); } @@ -940,7 +934,7 @@ fn build_bloaty_blob( let blob_name = get_blob_name(target, &manifest_path); let target_directory = project .join("target") - .join(target.rustc_target_dir()) + .join(target.rustc_target()) .join(blob_build_profile.directory()); match target { RuntimeTarget::Riscv => { @@ -974,7 +968,7 @@ fn build_bloaty_blob( }, }; - std::fs::write(&polkavm_path, program) + std::fs::write(&polkavm_path, program.as_bytes()) .expect("writing the blob to a file always works"); } diff --git a/templates/minimal/README.md b/templates/minimal/README.md index 22f396c243ef..cf43d71d8849 100644 --- a/templates/minimal/README.md +++ b/templates/minimal/README.md @@ -105,11 +105,12 @@ Omni Node, nonetheless. #### Run Omni Node -Start Omni Node in development mode (sets up block production and finalization based on manual seal, -sealing a new block every 3 seconds), with a minimal template runtime chain spec. +Start Omni Node with manual seal (3 seconds block times), minimal template runtime based +chain spec. We'll use `--tmp` flag to start the node with its configurations stored in a +temporary directory, which will be deleted at the end of the process. ```sh -polkadot-omni-node --chain --dev +polkadot-omni-node --chain --dev-block-time 3000 --tmp ``` ### Minimal Template Node @@ -159,7 +160,7 @@ Then make the changes in the network specification like so: # ... chain = "dev" chain_spec_path = "" -default_args = ["--dev"] +default_args = ["--dev-block-time 3000"] # .. ``` diff --git a/templates/minimal/node/Cargo.toml b/templates/minimal/node/Cargo.toml index a2a999f02671..956efca34532 100644 --- a/templates/minimal/node/Cargo.toml +++ b/templates/minimal/node/Cargo.toml @@ -14,15 +14,15 @@ build = "build.rs" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -clap = { features = ["derive"], workspace = true } docify = { workspace = true } +clap = { features = ["derive"], workspace = true } futures = { features = ["thread-pool"], workspace = true } futures-timer = { workspace = true } jsonrpsee = { features = ["server"], workspace = true } serde_json = { workspace = true, default-features = true } -minimal-template-runtime = { workspace = true } polkadot-sdk = { workspace = true, features = ["experimental", "node"] } +minimal-template-runtime = { workspace = true } [build-dependencies] polkadot-sdk = { workspace = true, features = ["substrate-build-script-utils"] } diff --git a/templates/minimal/node/src/service.rs b/templates/minimal/node/src/service.rs index 5988dbf3ce6e..b4e6fc0b728b 100644 --- a/templates/minimal/node/src/service.rs +++ b/templates/minimal/node/src/service.rs @@ -134,7 +134,7 @@ pub fn new_full::Ha config.prometheus_config.as_ref().map(|cfg| &cfg.registry), ); - let (network, system_rpc_tx, tx_handler_controller, sync_service) = + let (network, system_rpc_tx, tx_handler_controller, network_starter, sync_service) = sc_service::build_network(sc_service::BuildNetworkParams { config: &config, net_config, @@ -264,5 +264,6 @@ pub fn new_full::Ha _ => {}, } + network_starter.start_network(); Ok(task_manager) } diff --git a/templates/minimal/pallets/template/Cargo.toml b/templates/minimal/pallets/template/Cargo.toml index e11ce0e9955c..9a02d4daeaac 100644 --- a/templates/minimal/pallets/template/Cargo.toml +++ b/templates/minimal/pallets/template/Cargo.toml @@ -14,11 +14,11 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } polkadot-sdk = { workspace = true, default-features = false, features = [ "experimental", "runtime", ] } -scale-info = { features = ["derive"], workspace = true } [features] diff --git a/templates/minimal/runtime/Cargo.toml b/templates/minimal/runtime/Cargo.toml index 1554e92c0bf5..b803c74539ef 100644 --- a/templates/minimal/runtime/Cargo.toml +++ b/templates/minimal/runtime/Cargo.toml @@ -11,6 +11,7 @@ publish = false [dependencies] codec = { workspace = true } +scale-info = { workspace = true } polkadot-sdk = { workspace = true, features = [ "pallet-balances", "pallet-sudo", @@ -19,7 +20,6 @@ polkadot-sdk = { workspace = true, features = [ "pallet-transaction-payment-rpc-runtime-api", "runtime", ] } -scale-info = { workspace = true } serde_json = { workspace = true, default-features = false, features = ["alloc"] } # local pallet templates diff --git a/templates/minimal/runtime/src/lib.rs b/templates/minimal/runtime/src/lib.rs index 72eded5bfd13..ecdba739c50e 100644 --- a/templates/minimal/runtime/src/lib.rs +++ b/templates/minimal/runtime/src/lib.rs @@ -41,7 +41,7 @@ pub mod genesis_config_presets { use super::*; use crate::{ interface::{Balance, MinimumBalance}, - sp_keyring::Sr25519Keyring, + sp_keyring::AccountKeyring, BalancesConfig, RuntimeGenesisConfig, SudoConfig, }; @@ -51,14 +51,17 @@ pub mod genesis_config_presets { /// Returns a development genesis config preset. pub fn development_config_genesis() -> Value { let endowment = >::get().max(1) * 1000; - frame_support::build_struct_json_patch!(RuntimeGenesisConfig { + let config = RuntimeGenesisConfig { balances: BalancesConfig { - balances: Sr25519Keyring::iter() + balances: AccountKeyring::iter() .map(|a| (a.to_account_id(), endowment)) .collect::>(), }, - sudo: SudoConfig { key: Some(Sr25519Keyring::Alice.to_account_id()) }, - }) + sudo: SudoConfig { key: Some(AccountKeyring::Alice.to_account_id()) }, + ..Default::default() + }; + + serde_json::to_value(config).expect("Could not build genesis config.") } /// Get the set of the available genesis config presets. diff --git a/templates/minimal/zombienet-omni-node.toml b/templates/minimal/zombienet-omni-node.toml index acd5b121c674..33b0fceba68c 100644 --- a/templates/minimal/zombienet-omni-node.toml +++ b/templates/minimal/zombienet-omni-node.toml @@ -2,7 +2,7 @@ default_command = "polkadot-omni-node" chain = "dev" chain_spec_path = "" -default_args = ["--dev"] +default_args = ["--dev-block-time 3000"] [[relaychain.nodes]] name = "alice" diff --git a/templates/parachain/README.md b/templates/parachain/README.md index c1e333df9e9e..65a6979041f2 100644 --- a/templates/parachain/README.md +++ b/templates/parachain/README.md @@ -27,7 +27,6 @@ - [Connect with the Polkadot-JS Apps Front-End](#connect-with-the-polkadot-js-apps-front-end) - [Takeaways](#takeaways) -- [Runtime development](#runtime-development) - [Contributing](#contributing) - [Getting Help](#getting-help) @@ -108,11 +107,13 @@ with the relay chain ID where this instantiation of parachain-template will conn #### Run Omni Node -Start Omni Node with the generated chain spec. We'll start it in development mode (without a relay chain config), producing -and finalizing blocks based on manual seal, configured below to seal a block with each second. +Start Omni Node with the generated chain spec. We'll start it development mode (without a relay chain config), +with a temporary directory for configuration (given `--tmp`), and block production set to create a block with +every second. ```bash -polkadot-omni-node --chain --dev --dev-block-time 1000 +polkadot-omni-node --chain --tmp --dev-block-time 1000 + ``` However, such a setup is not close to what would run in production, and for that we need to setup a local @@ -196,37 +197,6 @@ Development parachains: - 💰 Are preconfigured with a genesis state that includes several prefunded development accounts. - 🧑‍⚖️ Development accounts are used as validators, collators, and `sudo` accounts. -## Runtime development - -We recommend using [`chopsticks`](https://github.com/AcalaNetwork/chopsticks) when the focus is more on the runtime -development and `OmniNode` is enough as is. - -### Install chopsticks - -To use `chopsticks`, please install the latest version according to the installation [guide](https://github.com/AcalaNetwork/chopsticks?tab=readme-ov-file#install). - -### Build a raw chain spec - -Build the `parachain-template-runtime` as mentioned before in this guide and use `chain-spec-builder` -again but this time by passing `--raw-storage` flag: - -```sh -chain-spec-builder create --raw-storage --relay-chain "rococo-local" --para-id 1000 --runtime \ - target/release/wbuild/parachain-template-runtime/parachain_template_runtime.wasm named-preset development -``` - -### Start `chopsticks` with the chain spec - -```sh -npx @acala-network/chopsticks@latest --chain-spec -``` - -### Alternatives - -`OmniNode` can be still used for runtime development if using the `--dev` flag, while `parachain-template-node` doesn't -support it at this moment. It can still be used to test a runtime in a full setup where it is started alongside a -relay chain network (see [Parachain Template node](#parachain-template-node) setup). - ## Contributing - 🔄 This template is automatically updated after releases in the main [Polkadot SDK monorepo](https://github.com/paritytech/polkadot-sdk). diff --git a/templates/parachain/node/Cargo.toml b/templates/parachain/node/Cargo.toml index c16a9f1c0a78..20175cca1dab 100644 --- a/templates/parachain/node/Cargo.toml +++ b/templates/parachain/node/Cargo.toml @@ -12,22 +12,22 @@ build = "build.rs" [dependencies] clap = { features = ["derive"], workspace = true } -codec = { workspace = true, default-features = true } -color-print = { workspace = true } -docify = { workspace = true } -futures = { workspace = true } -jsonrpsee = { features = ["server"], workspace = true } log = { workspace = true, default-features = true } +codec = { workspace = true, default-features = true } serde = { features = ["derive"], workspace = true, default-features = true } +jsonrpsee = { features = ["server"], workspace = true } +futures = { workspace = true } serde_json = { workspace = true, default-features = true } +docify = { workspace = true } +color-print = { workspace = true } polkadot-sdk = { workspace = true, features = ["node"] } parachain-template-runtime = { workspace = true } # Substrate -prometheus-endpoint = { workspace = true, default-features = true } sc-tracing = { workspace = true, default-features = true } +prometheus-endpoint = { workspace = true, default-features = true } [build-dependencies] polkadot-sdk = { workspace = true, features = ["substrate-build-script-utils"] } diff --git a/templates/parachain/node/src/chain_spec.rs b/templates/parachain/node/src/chain_spec.rs index 08bbec6f22d0..4ee1ceed0a97 100644 --- a/templates/parachain/node/src/chain_spec.rs +++ b/templates/parachain/node/src/chain_spec.rs @@ -8,8 +8,6 @@ use serde::{Deserialize, Serialize}; /// Specialized `ChainSpec` for the normal parachain runtime. pub type ChainSpec = sc_service::GenericChainSpec; -/// The relay chain that you want to configure this parachain to connect to. -pub const RELAY_CHAIN: &str = "rococo-local"; /// The extensions for the [`ChainSpec`]. #[derive(Debug, Clone, PartialEq, Serialize, Deserialize, ChainSpecGroup, ChainSpecExtension)] @@ -38,13 +36,16 @@ pub fn development_chain_spec() -> ChainSpec { ChainSpec::builder( runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), - Extensions { relay_chain: RELAY_CHAIN.into(), para_id: runtime::PARACHAIN_ID }, + Extensions { + relay_chain: "rococo-local".into(), + // You MUST set this to the correct network! + para_id: 1000, + }, ) .with_name("Development") .with_id("dev") .with_chain_type(ChainType::Development) .with_genesis_config_preset_name(sp_genesis_builder::DEV_RUNTIME_PRESET) - .with_properties(properties) .build() } @@ -58,7 +59,11 @@ pub fn local_chain_spec() -> ChainSpec { #[allow(deprecated)] ChainSpec::builder( runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), - Extensions { relay_chain: RELAY_CHAIN.into(), para_id: runtime::PARACHAIN_ID }, + Extensions { + relay_chain: "rococo-local".into(), + // You MUST set this to the correct network! + para_id: 1000, + }, ) .with_name("Local Testnet") .with_id("local_testnet") diff --git a/templates/parachain/node/src/service.rs b/templates/parachain/node/src/service.rs index 482c806a0d27..3c312a5e5c40 100644 --- a/templates/parachain/node/src/service.rs +++ b/templates/parachain/node/src/service.rs @@ -271,7 +271,7 @@ pub async fn start_parachain_node( // NOTE: because we use Aura here explicitly, we can use `CollatorSybilResistance::Resistant` // when starting the network. - let (network, system_rpc_tx, tx_handler_controller, sync_service) = + let (network, system_rpc_tx, tx_handler_controller, start_network, sync_service) = build_network(BuildNetworkParams { parachain_config: ¶chain_config, net_config, @@ -407,5 +407,7 @@ pub async fn start_parachain_node( )?; } + start_network.start_network(); + Ok((task_manager, client)) } diff --git a/templates/parachain/runtime/Cargo.toml b/templates/parachain/runtime/Cargo.toml index dd3c9bd703c0..4e6c6c9dd800 100644 --- a/templates/parachain/runtime/Cargo.toml +++ b/templates/parachain/runtime/Cargo.toml @@ -13,17 +13,17 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [build-dependencies] -docify = { workspace = true } substrate-wasm-builder = { optional = true, workspace = true, default-features = true } +docify = { workspace = true } [dependencies] codec = { features = ["derive"], workspace = true } -docify = { workspace = true } hex-literal = { optional = true, workspace = true, default-features = true } log = { workspace = true } scale-info = { features = ["derive"], workspace = true } -serde_json = { workspace = true, default-features = false, features = ["alloc"] } smallvec = { workspace = true, default-features = true } +docify = { workspace = true } +serde_json = { workspace = true, default-features = false, features = ["alloc"] } # Local pallet-parachain-template = { workspace = true } diff --git a/templates/parachain/runtime/src/genesis_config_presets.rs b/templates/parachain/runtime/src/genesis_config_presets.rs index 19316a066b73..744c85083260 100644 --- a/templates/parachain/runtime/src/genesis_config_presets.rs +++ b/templates/parachain/runtime/src/genesis_config_presets.rs @@ -8,7 +8,6 @@ use alloc::{vec, vec::Vec}; use polkadot_sdk::{staging_xcm as xcm, *}; use cumulus_primitives_core::ParaId; -use frame_support::build_struct_json_patch; use parachains_common::AuraId; use serde_json::Value; use sp_genesis_builder::PresetId; @@ -16,8 +15,8 @@ use sp_keyring::Sr25519Keyring; /// The default XCM version to set in genesis config. const SAFE_XCM_VERSION: u32 = xcm::prelude::XCM_VERSION; -/// Parachain id used for genesis config presets of parachain template. -pub const PARACHAIN_ID: u32 = 1000; +/// Parachain id used for gensis config presets of parachain template. +const PARACHAIN_ID: u32 = 1000; /// Generate the session keys from individual elements. /// @@ -32,7 +31,7 @@ fn testnet_genesis( root: AccountId, id: ParaId, ) -> Value { - build_struct_json_patch!(RuntimeGenesisConfig { + let config = RuntimeGenesisConfig { balances: BalancesConfig { balances: endowed_accounts .iter() @@ -40,10 +39,11 @@ fn testnet_genesis( .map(|k| (k, 1u128 << 60)) .collect::>(), }, - parachain_info: ParachainInfoConfig { parachain_id: id }, + parachain_info: ParachainInfoConfig { parachain_id: id, ..Default::default() }, collator_selection: CollatorSelectionConfig { invulnerables: invulnerables.iter().cloned().map(|(acc, _)| acc).collect::>(), candidacy_bond: EXISTENTIAL_DEPOSIT * 16, + ..Default::default() }, session: SessionConfig { keys: invulnerables @@ -56,10 +56,17 @@ fn testnet_genesis( ) }) .collect::>(), + ..Default::default() + }, + polkadot_xcm: PolkadotXcmConfig { + safe_xcm_version: Some(SAFE_XCM_VERSION), + ..Default::default() }, - polkadot_xcm: PolkadotXcmConfig { safe_xcm_version: Some(SAFE_XCM_VERSION) }, sudo: SudoConfig { key: Some(root) }, - }) + ..Default::default() + }; + + serde_json::to_value(config).expect("Could not build genesis config.") } fn local_testnet_genesis() -> Value { diff --git a/templates/parachain/runtime/src/lib.rs b/templates/parachain/runtime/src/lib.rs index d1d680bed55c..78a2a960c6d9 100644 --- a/templates/parachain/runtime/src/lib.rs +++ b/templates/parachain/runtime/src/lib.rs @@ -35,7 +35,6 @@ use frame_support::weights::{ constants::WEIGHT_REF_TIME_PER_SECOND, Weight, WeightToFeeCoefficient, WeightToFeeCoefficients, WeightToFeePolynomial, }; -pub use genesis_config_presets::PARACHAIN_ID; pub use sp_consensus_aura::sr25519::AuthorityId as AuraId; pub use sp_runtime::{MultiAddress, Perbill, Permill}; diff --git a/templates/solochain/node/Cargo.toml b/templates/solochain/node/Cargo.toml index 90f576c88c23..4c0ab31df95e 100644 --- a/templates/solochain/node/Cargo.toml +++ b/templates/solochain/node/Cargo.toml @@ -17,41 +17,41 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] clap = { features = ["derive"], workspace = true } futures = { features = ["thread-pool"], workspace = true } -jsonrpsee = { features = ["server"], workspace = true } serde_json = { workspace = true, default-features = true } +jsonrpsee = { features = ["server"], workspace = true } # substrate client -sc-basic-authorship = { workspace = true, default-features = true } sc-cli = { workspace = true, default-features = true } -sc-client-api = { workspace = true, default-features = true } -sc-consensus = { workspace = true, default-features = true } -sc-consensus-aura = { workspace = true, default-features = true } -sc-consensus-grandpa = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } sc-executor = { workspace = true, default-features = true } sc-network = { workspace = true, default-features = true } -sc-offchain = { workspace = true, default-features = true } sc-service = { workspace = true, default-features = true } sc-telemetry = { workspace = true, default-features = true } sc-transaction-pool = { workspace = true, default-features = true } sc-transaction-pool-api = { workspace = true, default-features = true } +sc-offchain = { workspace = true, default-features = true } +sc-consensus = { workspace = true, default-features = true } +sc-consensus-aura = { workspace = true, default-features = true } sp-consensus-aura = { workspace = true, default-features = true } +sc-consensus-grandpa = { workspace = true, default-features = true } sp-consensus-grandpa = { workspace = true, default-features = true } -sp-core = { workspace = true, default-features = true } sp-genesis-builder = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sc-basic-authorship = { workspace = true, default-features = true } # substrate primitives -sp-api = { workspace = true, default-features = true } -sp-block-builder = { workspace = true, default-features = true } -sp-blockchain = { workspace = true, default-features = true } -sp-inherents = { workspace = true, default-features = true } -sp-io = { workspace = true, default-features = true } -sp-keyring = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } sp-timestamp = { workspace = true, default-features = true } +sp-inherents = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-block-builder = { workspace = true, default-features = true } # frame and pallets -frame-metadata-hash-extension = { workspace = true, default-features = true } frame-system = { workspace = true, default-features = true } +frame-metadata-hash-extension = { workspace = true, default-features = true } pallet-transaction-payment = { workspace = true, default-features = true } pallet-transaction-payment-rpc = { workspace = true, default-features = true } substrate-frame-rpc-system = { workspace = true, default-features = true } diff --git a/templates/solochain/node/src/service.rs b/templates/solochain/node/src/service.rs index 79d97fbab8df..d6fcebe239f7 100644 --- a/templates/solochain/node/src/service.rs +++ b/templates/solochain/node/src/service.rs @@ -169,7 +169,7 @@ pub fn new_full< Vec::default(), )); - let (network, system_rpc_tx, tx_handler_controller, sync_service) = + let (network, system_rpc_tx, tx_handler_controller, network_starter, sync_service) = sc_service::build_network(sc_service::BuildNetworkParams { config: &config, net_config, @@ -329,5 +329,6 @@ pub fn new_full< ); } + network_starter.start_network(); Ok(task_manager) } diff --git a/templates/solochain/runtime/Cargo.toml b/templates/solochain/runtime/Cargo.toml index 1cff982fbf3c..837849e844b1 100644 --- a/templates/solochain/runtime/Cargo.toml +++ b/templates/solochain/runtime/Cargo.toml @@ -23,11 +23,11 @@ scale-info = { features = [ serde_json = { workspace = true, default-features = false, features = ["alloc"] } # frame -frame-executive = { workspace = true } -frame-metadata-hash-extension = { workspace = true } frame-support = { features = ["experimental"], workspace = true } frame-system = { workspace = true } frame-try-runtime = { optional = true, workspace = true } +frame-executive = { workspace = true } +frame-metadata-hash-extension = { workspace = true } # frame pallets pallet-aura = { workspace = true } @@ -46,12 +46,11 @@ sp-consensus-aura = { features = [ sp-consensus-grandpa = { features = [ "serde", ], workspace = true } +sp-keyring = { workspace = true } sp-core = { features = [ "serde", ], workspace = true } -sp-genesis-builder = { workspace = true } sp-inherents = { workspace = true } -sp-keyring = { workspace = true } sp-offchain = { workspace = true } sp-runtime = { features = [ "serde", @@ -62,6 +61,7 @@ sp-transaction-pool = { workspace = true } sp-version = { features = [ "serde", ], workspace = true } +sp-genesis-builder = { workspace = true } # RPC related frame-system-rpc-runtime-api = { workspace = true } diff --git a/templates/solochain/runtime/src/genesis_config_presets.rs b/templates/solochain/runtime/src/genesis_config_presets.rs index 6af8dc9cd18a..7c444456a600 100644 --- a/templates/solochain/runtime/src/genesis_config_presets.rs +++ b/templates/solochain/runtime/src/genesis_config_presets.rs @@ -17,12 +17,11 @@ use crate::{AccountId, BalancesConfig, RuntimeGenesisConfig, SudoConfig}; use alloc::{vec, vec::Vec}; -use frame_support::build_struct_json_patch; use serde_json::Value; use sp_consensus_aura::sr25519::AuthorityId as AuraId; use sp_consensus_grandpa::AuthorityId as GrandpaId; use sp_genesis_builder::{self, PresetId}; -use sp_keyring::Sr25519Keyring; +use sp_keyring::AccountKeyring; // Returns the genesis config presets populated with given parameters. fn testnet_genesis( @@ -30,7 +29,7 @@ fn testnet_genesis( endowed_accounts: Vec, root: AccountId, ) -> Value { - build_struct_json_patch!(RuntimeGenesisConfig { + let config = RuntimeGenesisConfig { balances: BalancesConfig { balances: endowed_accounts .iter() @@ -43,9 +42,13 @@ fn testnet_genesis( }, grandpa: pallet_grandpa::GenesisConfig { authorities: initial_authorities.iter().map(|x| (x.1.clone(), 1)).collect::>(), + ..Default::default() }, sudo: SudoConfig { key: Some(root) }, - }) + ..Default::default() + }; + + serde_json::to_value(config).expect("Could not build genesis config.") } /// Return the development genesis config. @@ -56,12 +59,12 @@ pub fn development_config_genesis() -> Value { sp_keyring::Ed25519Keyring::Alice.public().into(), )], vec![ - Sr25519Keyring::Alice.to_account_id(), - Sr25519Keyring::Bob.to_account_id(), - Sr25519Keyring::AliceStash.to_account_id(), - Sr25519Keyring::BobStash.to_account_id(), + AccountKeyring::Alice.to_account_id(), + AccountKeyring::Bob.to_account_id(), + AccountKeyring::AliceStash.to_account_id(), + AccountKeyring::BobStash.to_account_id(), ], - sp_keyring::Sr25519Keyring::Alice.to_account_id(), + sp_keyring::AccountKeyring::Alice.to_account_id(), ) } @@ -78,11 +81,11 @@ pub fn local_config_genesis() -> Value { sp_keyring::Ed25519Keyring::Bob.public().into(), ), ], - Sr25519Keyring::iter() - .filter(|v| v != &Sr25519Keyring::One && v != &Sr25519Keyring::Two) + AccountKeyring::iter() + .filter(|v| v != &AccountKeyring::One && v != &AccountKeyring::Two) .map(|v| v.to_account_id()) .collect::>(), - Sr25519Keyring::Alice.to_account_id(), + AccountKeyring::Alice.to_account_id(), ) } diff --git a/templates/zombienet/Cargo.toml b/templates/zombienet/Cargo.toml index 805e4ddbcee2..f29325dbe6a9 100644 --- a/templates/zombienet/Cargo.toml +++ b/templates/zombienet/Cargo.toml @@ -10,10 +10,10 @@ edition.workspace = true publish = false [dependencies] -anyhow = { workspace = true } env_logger = { workspace = true } log = { workspace = true } tokio = { workspace = true, features = ["rt-multi-thread"] } +anyhow = { workspace = true } zombienet-sdk = { workspace = true } [features] diff --git a/umbrella/Cargo.toml b/umbrella/Cargo.toml index d2a47ade7f87..7f50658c4e16 100644 --- a/umbrella/Cargo.toml +++ b/umbrella/Cargo.toml @@ -120,6 +120,7 @@ std = [ "pallet-recovery?/std", "pallet-referenda?/std", "pallet-remark?/std", + "pallet-revive-fixtures?/std", "pallet-revive-mock-network?/std", "pallet-revive?/std", "pallet-root-offences?/std", @@ -290,6 +291,7 @@ runtime-benchmarks = [ "pallet-membership?/runtime-benchmarks", "pallet-message-queue?/runtime-benchmarks", "pallet-migrations?/runtime-benchmarks", + "pallet-mixnet?/runtime-benchmarks", "pallet-mmr?/runtime-benchmarks", "pallet-multisig?/runtime-benchmarks", "pallet-nft-fractionalization?/runtime-benchmarks", @@ -361,7 +363,6 @@ runtime-benchmarks = [ "staging-node-inspect?/runtime-benchmarks", "staging-xcm-builder?/runtime-benchmarks", "staging-xcm-executor?/runtime-benchmarks", - "staging-xcm?/runtime-benchmarks", "xcm-runtime-apis?/runtime-benchmarks", ] try-runtime = [ @@ -540,7 +541,7 @@ with-tracing = [ "sp-tracing?/with-tracing", "sp-tracing?/with-tracing", ] -runtime-full = ["assets-common", "binary-merkle-tree", "bp-header-chain", "bp-messages", "bp-parachains", "bp-polkadot", "bp-polkadot-core", "bp-relayers", "bp-runtime", "bp-test-utils", "bp-xcm-bridge-hub", "bp-xcm-bridge-hub-router", "bridge-hub-common", "bridge-runtime-common", "cumulus-pallet-aura-ext", "cumulus-pallet-dmp-queue", "cumulus-pallet-parachain-system", "cumulus-pallet-parachain-system-proc-macro", "cumulus-pallet-session-benchmarking", "cumulus-pallet-solo-to-para", "cumulus-pallet-xcm", "cumulus-pallet-xcmp-queue", "cumulus-ping", "cumulus-primitives-aura", "cumulus-primitives-core", "cumulus-primitives-parachain-inherent", "cumulus-primitives-proof-size-hostfunction", "cumulus-primitives-storage-weight-reclaim", "cumulus-primitives-timestamp", "cumulus-primitives-utility", "frame-benchmarking", "frame-benchmarking-pallet-pov", "frame-election-provider-solution-type", "frame-election-provider-support", "frame-executive", "frame-metadata-hash-extension", "frame-support", "frame-support-procedural", "frame-support-procedural-tools-derive", "frame-system", "frame-system-benchmarking", "frame-system-rpc-runtime-api", "frame-try-runtime", "pallet-alliance", "pallet-asset-conversion", "pallet-asset-conversion-ops", "pallet-asset-conversion-tx-payment", "pallet-asset-rate", "pallet-asset-tx-payment", "pallet-assets", "pallet-assets-freezer", "pallet-atomic-swap", "pallet-aura", "pallet-authority-discovery", "pallet-authorship", "pallet-babe", "pallet-bags-list", "pallet-balances", "pallet-beefy", "pallet-beefy-mmr", "pallet-bounties", "pallet-bridge-grandpa", "pallet-bridge-messages", "pallet-bridge-parachains", "pallet-bridge-relayers", "pallet-broker", "pallet-child-bounties", "pallet-collator-selection", "pallet-collective", "pallet-collective-content", "pallet-contracts", "pallet-contracts-proc-macro", "pallet-contracts-uapi", "pallet-conviction-voting", "pallet-core-fellowship", "pallet-delegated-staking", "pallet-democracy", "pallet-dev-mode", "pallet-election-provider-multi-phase", "pallet-election-provider-support-benchmarking", "pallet-elections-phragmen", "pallet-fast-unstake", "pallet-glutton", "pallet-grandpa", "pallet-identity", "pallet-im-online", "pallet-indices", "pallet-insecure-randomness-collective-flip", "pallet-lottery", "pallet-membership", "pallet-message-queue", "pallet-migrations", "pallet-mixnet", "pallet-mmr", "pallet-multisig", "pallet-nft-fractionalization", "pallet-nfts", "pallet-nfts-runtime-api", "pallet-nis", "pallet-node-authorization", "pallet-nomination-pools", "pallet-nomination-pools-benchmarking", "pallet-nomination-pools-runtime-api", "pallet-offences", "pallet-offences-benchmarking", "pallet-paged-list", "pallet-parameters", "pallet-preimage", "pallet-proxy", "pallet-ranked-collective", "pallet-recovery", "pallet-referenda", "pallet-remark", "pallet-revive", "pallet-revive-proc-macro", "pallet-revive-uapi", "pallet-root-offences", "pallet-root-testing", "pallet-safe-mode", "pallet-salary", "pallet-scheduler", "pallet-scored-pool", "pallet-session", "pallet-session-benchmarking", "pallet-skip-feeless-payment", "pallet-society", "pallet-staking", "pallet-staking-reward-curve", "pallet-staking-reward-fn", "pallet-staking-runtime-api", "pallet-state-trie-migration", "pallet-statement", "pallet-sudo", "pallet-timestamp", "pallet-tips", "pallet-transaction-payment", "pallet-transaction-payment-rpc-runtime-api", "pallet-transaction-storage", "pallet-treasury", "pallet-tx-pause", "pallet-uniques", "pallet-utility", "pallet-verify-signature", "pallet-vesting", "pallet-whitelist", "pallet-xcm", "pallet-xcm-benchmarks", "pallet-xcm-bridge-hub", "pallet-xcm-bridge-hub-router", "parachains-common", "polkadot-core-primitives", "polkadot-parachain-primitives", "polkadot-primitives", "polkadot-runtime-common", "polkadot-runtime-metrics", "polkadot-runtime-parachains", "polkadot-sdk-frame", "sc-chain-spec-derive", "sc-tracing-proc-macro", "slot-range-helper", "snowbridge-beacon-primitives", "snowbridge-core", "snowbridge-ethereum", "snowbridge-outbound-queue-merkle-tree", "snowbridge-outbound-queue-runtime-api", "snowbridge-pallet-ethereum-client", "snowbridge-pallet-ethereum-client-fixtures", "snowbridge-pallet-inbound-queue", "snowbridge-pallet-inbound-queue-fixtures", "snowbridge-pallet-outbound-queue", "snowbridge-pallet-system", "snowbridge-router-primitives", "snowbridge-runtime-common", "snowbridge-system-runtime-api", "sp-api", "sp-api-proc-macro", "sp-application-crypto", "sp-arithmetic", "sp-authority-discovery", "sp-block-builder", "sp-consensus-aura", "sp-consensus-babe", "sp-consensus-beefy", "sp-consensus-grandpa", "sp-consensus-pow", "sp-consensus-slots", "sp-core", "sp-crypto-ec-utils", "sp-crypto-hashing", "sp-crypto-hashing-proc-macro", "sp-debug-derive", "sp-externalities", "sp-genesis-builder", "sp-inherents", "sp-io", "sp-keyring", "sp-keystore", "sp-metadata-ir", "sp-mixnet", "sp-mmr-primitives", "sp-npos-elections", "sp-offchain", "sp-runtime", "sp-runtime-interface", "sp-runtime-interface-proc-macro", "sp-session", "sp-staking", "sp-state-machine", "sp-statement-store", "sp-std", "sp-storage", "sp-timestamp", "sp-tracing", "sp-transaction-pool", "sp-transaction-storage-proof", "sp-trie", "sp-version", "sp-version-proc-macro", "sp-wasm-interface", "sp-weights", "staging-parachain-info", "staging-xcm", "staging-xcm-builder", "staging-xcm-executor", "substrate-bip39", "testnet-parachains-constants", "tracing-gum-proc-macro", "xcm-procedural", "xcm-runtime-apis"] +runtime-full = ["assets-common", "binary-merkle-tree", "bp-header-chain", "bp-messages", "bp-parachains", "bp-polkadot", "bp-polkadot-core", "bp-relayers", "bp-runtime", "bp-test-utils", "bp-xcm-bridge-hub", "bp-xcm-bridge-hub-router", "bridge-hub-common", "bridge-runtime-common", "cumulus-pallet-aura-ext", "cumulus-pallet-dmp-queue", "cumulus-pallet-parachain-system", "cumulus-pallet-parachain-system-proc-macro", "cumulus-pallet-session-benchmarking", "cumulus-pallet-solo-to-para", "cumulus-pallet-xcm", "cumulus-pallet-xcmp-queue", "cumulus-ping", "cumulus-primitives-aura", "cumulus-primitives-core", "cumulus-primitives-parachain-inherent", "cumulus-primitives-proof-size-hostfunction", "cumulus-primitives-storage-weight-reclaim", "cumulus-primitives-timestamp", "cumulus-primitives-utility", "frame-benchmarking", "frame-benchmarking-pallet-pov", "frame-election-provider-solution-type", "frame-election-provider-support", "frame-executive", "frame-metadata-hash-extension", "frame-support", "frame-support-procedural", "frame-support-procedural-tools-derive", "frame-system", "frame-system-benchmarking", "frame-system-rpc-runtime-api", "frame-try-runtime", "pallet-alliance", "pallet-asset-conversion", "pallet-asset-conversion-ops", "pallet-asset-conversion-tx-payment", "pallet-asset-rate", "pallet-asset-tx-payment", "pallet-assets", "pallet-assets-freezer", "pallet-atomic-swap", "pallet-aura", "pallet-authority-discovery", "pallet-authorship", "pallet-babe", "pallet-bags-list", "pallet-balances", "pallet-beefy", "pallet-beefy-mmr", "pallet-bounties", "pallet-bridge-grandpa", "pallet-bridge-messages", "pallet-bridge-parachains", "pallet-bridge-relayers", "pallet-broker", "pallet-child-bounties", "pallet-collator-selection", "pallet-collective", "pallet-collective-content", "pallet-contracts", "pallet-contracts-proc-macro", "pallet-contracts-uapi", "pallet-conviction-voting", "pallet-core-fellowship", "pallet-delegated-staking", "pallet-democracy", "pallet-dev-mode", "pallet-election-provider-multi-phase", "pallet-election-provider-support-benchmarking", "pallet-elections-phragmen", "pallet-fast-unstake", "pallet-glutton", "pallet-grandpa", "pallet-identity", "pallet-im-online", "pallet-indices", "pallet-insecure-randomness-collective-flip", "pallet-lottery", "pallet-membership", "pallet-message-queue", "pallet-migrations", "pallet-mixnet", "pallet-mmr", "pallet-multisig", "pallet-nft-fractionalization", "pallet-nfts", "pallet-nfts-runtime-api", "pallet-nis", "pallet-node-authorization", "pallet-nomination-pools", "pallet-nomination-pools-benchmarking", "pallet-nomination-pools-runtime-api", "pallet-offences", "pallet-offences-benchmarking", "pallet-paged-list", "pallet-parameters", "pallet-preimage", "pallet-proxy", "pallet-ranked-collective", "pallet-recovery", "pallet-referenda", "pallet-remark", "pallet-revive", "pallet-revive-fixtures", "pallet-revive-proc-macro", "pallet-revive-uapi", "pallet-root-offences", "pallet-root-testing", "pallet-safe-mode", "pallet-salary", "pallet-scheduler", "pallet-scored-pool", "pallet-session", "pallet-session-benchmarking", "pallet-skip-feeless-payment", "pallet-society", "pallet-staking", "pallet-staking-reward-curve", "pallet-staking-reward-fn", "pallet-staking-runtime-api", "pallet-state-trie-migration", "pallet-statement", "pallet-sudo", "pallet-timestamp", "pallet-tips", "pallet-transaction-payment", "pallet-transaction-payment-rpc-runtime-api", "pallet-transaction-storage", "pallet-treasury", "pallet-tx-pause", "pallet-uniques", "pallet-utility", "pallet-verify-signature", "pallet-vesting", "pallet-whitelist", "pallet-xcm", "pallet-xcm-benchmarks", "pallet-xcm-bridge-hub", "pallet-xcm-bridge-hub-router", "parachains-common", "polkadot-core-primitives", "polkadot-parachain-primitives", "polkadot-primitives", "polkadot-runtime-common", "polkadot-runtime-metrics", "polkadot-runtime-parachains", "polkadot-sdk-frame", "sc-chain-spec-derive", "sc-tracing-proc-macro", "slot-range-helper", "snowbridge-beacon-primitives", "snowbridge-core", "snowbridge-ethereum", "snowbridge-outbound-queue-merkle-tree", "snowbridge-outbound-queue-runtime-api", "snowbridge-pallet-ethereum-client", "snowbridge-pallet-ethereum-client-fixtures", "snowbridge-pallet-inbound-queue", "snowbridge-pallet-inbound-queue-fixtures", "snowbridge-pallet-outbound-queue", "snowbridge-pallet-system", "snowbridge-router-primitives", "snowbridge-runtime-common", "snowbridge-system-runtime-api", "sp-api", "sp-api-proc-macro", "sp-application-crypto", "sp-arithmetic", "sp-authority-discovery", "sp-block-builder", "sp-consensus-aura", "sp-consensus-babe", "sp-consensus-beefy", "sp-consensus-grandpa", "sp-consensus-pow", "sp-consensus-slots", "sp-core", "sp-crypto-ec-utils", "sp-crypto-hashing", "sp-crypto-hashing-proc-macro", "sp-debug-derive", "sp-externalities", "sp-genesis-builder", "sp-inherents", "sp-io", "sp-keyring", "sp-keystore", "sp-metadata-ir", "sp-mixnet", "sp-mmr-primitives", "sp-npos-elections", "sp-offchain", "sp-runtime", "sp-runtime-interface", "sp-runtime-interface-proc-macro", "sp-session", "sp-staking", "sp-state-machine", "sp-statement-store", "sp-std", "sp-storage", "sp-timestamp", "sp-tracing", "sp-transaction-pool", "sp-transaction-storage-proof", "sp-trie", "sp-version", "sp-version-proc-macro", "sp-wasm-interface", "sp-weights", "staging-parachain-info", "staging-xcm", "staging-xcm-builder", "staging-xcm-executor", "substrate-bip39", "testnet-parachains-constants", "tracing-gum-proc-macro", "xcm-procedural", "xcm-runtime-apis"] runtime = [ "frame-benchmarking", "frame-benchmarking-pallet-pov", @@ -604,7 +605,7 @@ runtime = [ "sp-wasm-interface", "sp-weights", ] -node = ["asset-test-utils", "bridge-hub-test-utils", "cumulus-client-cli", "cumulus-client-collator", "cumulus-client-consensus-aura", "cumulus-client-consensus-common", "cumulus-client-consensus-proposer", "cumulus-client-consensus-relay-chain", "cumulus-client-network", "cumulus-client-parachain-inherent", "cumulus-client-pov-recovery", "cumulus-client-service", "cumulus-relay-chain-inprocess-interface", "cumulus-relay-chain-interface", "cumulus-relay-chain-minimal-node", "cumulus-relay-chain-rpc-interface", "cumulus-test-relay-sproof-builder", "emulated-integration-tests-common", "fork-tree", "frame-benchmarking-cli", "frame-remote-externalities", "frame-support-procedural-tools", "generate-bags", "mmr-gadget", "mmr-rpc", "pallet-contracts-mock-network", "pallet-revive-eth-rpc", "pallet-revive-mock-network", "pallet-transaction-payment-rpc", "parachains-runtimes-test-utils", "polkadot-approval-distribution", "polkadot-availability-bitfield-distribution", "polkadot-availability-distribution", "polkadot-availability-recovery", "polkadot-cli", "polkadot-collator-protocol", "polkadot-dispute-distribution", "polkadot-erasure-coding", "polkadot-gossip-support", "polkadot-network-bridge", "polkadot-node-collation-generation", "polkadot-node-core-approval-voting", "polkadot-node-core-approval-voting-parallel", "polkadot-node-core-av-store", "polkadot-node-core-backing", "polkadot-node-core-bitfield-signing", "polkadot-node-core-candidate-validation", "polkadot-node-core-chain-api", "polkadot-node-core-chain-selection", "polkadot-node-core-dispute-coordinator", "polkadot-node-core-parachains-inherent", "polkadot-node-core-prospective-parachains", "polkadot-node-core-provisioner", "polkadot-node-core-pvf", "polkadot-node-core-pvf-checker", "polkadot-node-core-pvf-common", "polkadot-node-core-pvf-execute-worker", "polkadot-node-core-pvf-prepare-worker", "polkadot-node-core-runtime-api", "polkadot-node-metrics", "polkadot-node-network-protocol", "polkadot-node-primitives", "polkadot-node-subsystem", "polkadot-node-subsystem-types", "polkadot-node-subsystem-util", "polkadot-omni-node-lib", "polkadot-overseer", "polkadot-rpc", "polkadot-service", "polkadot-statement-distribution", "polkadot-statement-table", "sc-allocator", "sc-authority-discovery", "sc-basic-authorship", "sc-block-builder", "sc-chain-spec", "sc-cli", "sc-client-api", "sc-client-db", "sc-consensus", "sc-consensus-aura", "sc-consensus-babe", "sc-consensus-babe-rpc", "sc-consensus-beefy", "sc-consensus-beefy-rpc", "sc-consensus-epochs", "sc-consensus-grandpa", "sc-consensus-grandpa-rpc", "sc-consensus-manual-seal", "sc-consensus-pow", "sc-consensus-slots", "sc-executor", "sc-executor-common", "sc-executor-polkavm", "sc-executor-wasmtime", "sc-informant", "sc-keystore", "sc-mixnet", "sc-network", "sc-network-common", "sc-network-gossip", "sc-network-light", "sc-network-statement", "sc-network-sync", "sc-network-transactions", "sc-network-types", "sc-offchain", "sc-proposer-metrics", "sc-rpc", "sc-rpc-api", "sc-rpc-server", "sc-rpc-spec-v2", "sc-runtime-utilities", "sc-service", "sc-state-db", "sc-statement-store", "sc-storage-monitor", "sc-sync-state-rpc", "sc-sysinfo", "sc-telemetry", "sc-tracing", "sc-transaction-pool", "sc-transaction-pool-api", "sc-utils", "snowbridge-runtime-test-common", "sp-blockchain", "sp-consensus", "sp-core-hashing", "sp-core-hashing-proc-macro", "sp-database", "sp-maybe-compressed-blob", "sp-panic-handler", "sp-rpc", "staging-chain-spec-builder", "staging-node-inspect", "staging-tracking-allocator", "std", "subkey", "substrate-build-script-utils", "substrate-frame-rpc-support", "substrate-frame-rpc-system", "substrate-prometheus-endpoint", "substrate-rpc-client", "substrate-state-trie-migration-rpc", "substrate-wasm-builder", "tracing-gum", "xcm-emulator", "xcm-simulator"] +node = ["asset-test-utils", "bridge-hub-test-utils", "cumulus-client-cli", "cumulus-client-collator", "cumulus-client-consensus-aura", "cumulus-client-consensus-common", "cumulus-client-consensus-proposer", "cumulus-client-consensus-relay-chain", "cumulus-client-network", "cumulus-client-parachain-inherent", "cumulus-client-pov-recovery", "cumulus-client-service", "cumulus-relay-chain-inprocess-interface", "cumulus-relay-chain-interface", "cumulus-relay-chain-minimal-node", "cumulus-relay-chain-rpc-interface", "cumulus-test-relay-sproof-builder", "emulated-integration-tests-common", "fork-tree", "frame-benchmarking-cli", "frame-remote-externalities", "frame-support-procedural-tools", "generate-bags", "mmr-gadget", "mmr-rpc", "pallet-contracts-mock-network", "pallet-revive-eth-rpc", "pallet-revive-mock-network", "pallet-transaction-payment-rpc", "parachains-runtimes-test-utils", "polkadot-approval-distribution", "polkadot-availability-bitfield-distribution", "polkadot-availability-distribution", "polkadot-availability-recovery", "polkadot-cli", "polkadot-collator-protocol", "polkadot-dispute-distribution", "polkadot-erasure-coding", "polkadot-gossip-support", "polkadot-network-bridge", "polkadot-node-collation-generation", "polkadot-node-core-approval-voting", "polkadot-node-core-approval-voting-parallel", "polkadot-node-core-av-store", "polkadot-node-core-backing", "polkadot-node-core-bitfield-signing", "polkadot-node-core-candidate-validation", "polkadot-node-core-chain-api", "polkadot-node-core-chain-selection", "polkadot-node-core-dispute-coordinator", "polkadot-node-core-parachains-inherent", "polkadot-node-core-prospective-parachains", "polkadot-node-core-provisioner", "polkadot-node-core-pvf", "polkadot-node-core-pvf-checker", "polkadot-node-core-pvf-common", "polkadot-node-core-pvf-execute-worker", "polkadot-node-core-pvf-prepare-worker", "polkadot-node-core-runtime-api", "polkadot-node-metrics", "polkadot-node-network-protocol", "polkadot-node-primitives", "polkadot-node-subsystem", "polkadot-node-subsystem-types", "polkadot-node-subsystem-util", "polkadot-omni-node-lib", "polkadot-overseer", "polkadot-rpc", "polkadot-service", "polkadot-statement-distribution", "polkadot-statement-table", "sc-allocator", "sc-authority-discovery", "sc-basic-authorship", "sc-block-builder", "sc-chain-spec", "sc-cli", "sc-client-api", "sc-client-db", "sc-consensus", "sc-consensus-aura", "sc-consensus-babe", "sc-consensus-babe-rpc", "sc-consensus-beefy", "sc-consensus-beefy-rpc", "sc-consensus-epochs", "sc-consensus-grandpa", "sc-consensus-grandpa-rpc", "sc-consensus-manual-seal", "sc-consensus-pow", "sc-consensus-slots", "sc-executor", "sc-executor-common", "sc-executor-polkavm", "sc-executor-wasmtime", "sc-informant", "sc-keystore", "sc-mixnet", "sc-network", "sc-network-common", "sc-network-gossip", "sc-network-light", "sc-network-statement", "sc-network-sync", "sc-network-transactions", "sc-network-types", "sc-offchain", "sc-proposer-metrics", "sc-rpc", "sc-rpc-api", "sc-rpc-server", "sc-rpc-spec-v2", "sc-service", "sc-state-db", "sc-statement-store", "sc-storage-monitor", "sc-sync-state-rpc", "sc-sysinfo", "sc-telemetry", "sc-tracing", "sc-transaction-pool", "sc-transaction-pool-api", "sc-utils", "snowbridge-runtime-test-common", "sp-blockchain", "sp-consensus", "sp-core-hashing", "sp-core-hashing-proc-macro", "sp-database", "sp-maybe-compressed-blob", "sp-panic-handler", "sp-rpc", "staging-chain-spec-builder", "staging-node-inspect", "staging-tracking-allocator", "std", "subkey", "substrate-build-script-utils", "substrate-frame-rpc-support", "substrate-frame-rpc-system", "substrate-prometheus-endpoint", "substrate-rpc-client", "substrate-state-trie-migration-rpc", "substrate-wasm-builder", "tracing-gum", "xcm-emulator", "xcm-simulator"] tuples-96 = [ "frame-support-procedural?/tuples-96", "frame-support?/tuples-96", @@ -616,1891 +617,1885 @@ workspace = true [package.authors] workspace = true -[package.homepage] -workspace = true - -[package.repository] -workspace = true - [dependencies.assets-common] +path = "../cumulus/parachains/runtimes/assets/common" default-features = false optional = true -path = "../cumulus/parachains/runtimes/assets/common" [dependencies.binary-merkle-tree] +path = "../substrate/utils/binary-merkle-tree" default-features = false optional = true -path = "../substrate/utils/binary-merkle-tree" [dependencies.bp-header-chain] +path = "../bridges/primitives/header-chain" default-features = false optional = true -path = "../bridges/primitives/header-chain" [dependencies.bp-messages] +path = "../bridges/primitives/messages" default-features = false optional = true -path = "../bridges/primitives/messages" [dependencies.bp-parachains] +path = "../bridges/primitives/parachains" default-features = false optional = true -path = "../bridges/primitives/parachains" [dependencies.bp-polkadot] +path = "../bridges/chains/chain-polkadot" default-features = false optional = true -path = "../bridges/chains/chain-polkadot" [dependencies.bp-polkadot-core] +path = "../bridges/primitives/polkadot-core" default-features = false optional = true -path = "../bridges/primitives/polkadot-core" [dependencies.bp-relayers] +path = "../bridges/primitives/relayers" default-features = false optional = true -path = "../bridges/primitives/relayers" [dependencies.bp-runtime] +path = "../bridges/primitives/runtime" default-features = false optional = true -path = "../bridges/primitives/runtime" [dependencies.bp-test-utils] +path = "../bridges/primitives/test-utils" default-features = false optional = true -path = "../bridges/primitives/test-utils" [dependencies.bp-xcm-bridge-hub] +path = "../bridges/primitives/xcm-bridge-hub" default-features = false optional = true -path = "../bridges/primitives/xcm-bridge-hub" [dependencies.bp-xcm-bridge-hub-router] +path = "../bridges/primitives/xcm-bridge-hub-router" default-features = false optional = true -path = "../bridges/primitives/xcm-bridge-hub-router" [dependencies.bridge-hub-common] +path = "../cumulus/parachains/runtimes/bridge-hubs/common" default-features = false optional = true -path = "../cumulus/parachains/runtimes/bridge-hubs/common" [dependencies.bridge-runtime-common] +path = "../bridges/bin/runtime-common" default-features = false optional = true -path = "../bridges/bin/runtime-common" [dependencies.cumulus-pallet-aura-ext] +path = "../cumulus/pallets/aura-ext" default-features = false optional = true -path = "../cumulus/pallets/aura-ext" [dependencies.cumulus-pallet-dmp-queue] +path = "../cumulus/pallets/dmp-queue" default-features = false optional = true -path = "../cumulus/pallets/dmp-queue" [dependencies.cumulus-pallet-parachain-system] +path = "../cumulus/pallets/parachain-system" default-features = false optional = true -path = "../cumulus/pallets/parachain-system" [dependencies.cumulus-pallet-parachain-system-proc-macro] +path = "../cumulus/pallets/parachain-system/proc-macro" default-features = false optional = true -path = "../cumulus/pallets/parachain-system/proc-macro" [dependencies.cumulus-pallet-session-benchmarking] +path = "../cumulus/pallets/session-benchmarking" default-features = false optional = true -path = "../cumulus/pallets/session-benchmarking" [dependencies.cumulus-pallet-solo-to-para] +path = "../cumulus/pallets/solo-to-para" default-features = false optional = true -path = "../cumulus/pallets/solo-to-para" [dependencies.cumulus-pallet-xcm] +path = "../cumulus/pallets/xcm" default-features = false optional = true -path = "../cumulus/pallets/xcm" [dependencies.cumulus-pallet-xcmp-queue] +path = "../cumulus/pallets/xcmp-queue" default-features = false optional = true -path = "../cumulus/pallets/xcmp-queue" [dependencies.cumulus-ping] +path = "../cumulus/parachains/pallets/ping" default-features = false optional = true -path = "../cumulus/parachains/pallets/ping" [dependencies.cumulus-primitives-aura] +path = "../cumulus/primitives/aura" default-features = false optional = true -path = "../cumulus/primitives/aura" [dependencies.cumulus-primitives-core] +path = "../cumulus/primitives/core" default-features = false optional = true -path = "../cumulus/primitives/core" [dependencies.cumulus-primitives-parachain-inherent] +path = "../cumulus/primitives/parachain-inherent" default-features = false optional = true -path = "../cumulus/primitives/parachain-inherent" [dependencies.cumulus-primitives-proof-size-hostfunction] +path = "../cumulus/primitives/proof-size-hostfunction" default-features = false optional = true -path = "../cumulus/primitives/proof-size-hostfunction" [dependencies.cumulus-primitives-storage-weight-reclaim] +path = "../cumulus/primitives/storage-weight-reclaim" default-features = false optional = true -path = "../cumulus/primitives/storage-weight-reclaim" [dependencies.cumulus-primitives-timestamp] +path = "../cumulus/primitives/timestamp" default-features = false optional = true -path = "../cumulus/primitives/timestamp" [dependencies.cumulus-primitives-utility] +path = "../cumulus/primitives/utility" default-features = false optional = true -path = "../cumulus/primitives/utility" [dependencies.frame-benchmarking] +path = "../substrate/frame/benchmarking" default-features = false optional = true -path = "../substrate/frame/benchmarking" [dependencies.frame-benchmarking-pallet-pov] +path = "../substrate/frame/benchmarking/pov" default-features = false optional = true -path = "../substrate/frame/benchmarking/pov" [dependencies.frame-election-provider-solution-type] +path = "../substrate/frame/election-provider-support/solution-type" default-features = false optional = true -path = "../substrate/frame/election-provider-support/solution-type" [dependencies.frame-election-provider-support] +path = "../substrate/frame/election-provider-support" default-features = false optional = true -path = "../substrate/frame/election-provider-support" [dependencies.frame-executive] +path = "../substrate/frame/executive" default-features = false optional = true -path = "../substrate/frame/executive" [dependencies.frame-metadata-hash-extension] +path = "../substrate/frame/metadata-hash-extension" default-features = false optional = true -path = "../substrate/frame/metadata-hash-extension" [dependencies.frame-support] +path = "../substrate/frame/support" default-features = false optional = true -path = "../substrate/frame/support" [dependencies.frame-support-procedural] +path = "../substrate/frame/support/procedural" default-features = false optional = true -path = "../substrate/frame/support/procedural" [dependencies.frame-support-procedural-tools-derive] +path = "../substrate/frame/support/procedural/tools/derive" default-features = false optional = true -path = "../substrate/frame/support/procedural/tools/derive" [dependencies.frame-system] +path = "../substrate/frame/system" default-features = false optional = true -path = "../substrate/frame/system" [dependencies.frame-system-benchmarking] +path = "../substrate/frame/system/benchmarking" default-features = false optional = true -path = "../substrate/frame/system/benchmarking" [dependencies.frame-system-rpc-runtime-api] +path = "../substrate/frame/system/rpc/runtime-api" default-features = false optional = true -path = "../substrate/frame/system/rpc/runtime-api" [dependencies.frame-try-runtime] +path = "../substrate/frame/try-runtime" default-features = false optional = true -path = "../substrate/frame/try-runtime" [dependencies.pallet-alliance] +path = "../substrate/frame/alliance" default-features = false optional = true -path = "../substrate/frame/alliance" [dependencies.pallet-asset-conversion] +path = "../substrate/frame/asset-conversion" default-features = false optional = true -path = "../substrate/frame/asset-conversion" [dependencies.pallet-asset-conversion-ops] +path = "../substrate/frame/asset-conversion/ops" default-features = false optional = true -path = "../substrate/frame/asset-conversion/ops" [dependencies.pallet-asset-conversion-tx-payment] +path = "../substrate/frame/transaction-payment/asset-conversion-tx-payment" default-features = false optional = true -path = "../substrate/frame/transaction-payment/asset-conversion-tx-payment" [dependencies.pallet-asset-rate] +path = "../substrate/frame/asset-rate" default-features = false optional = true -path = "../substrate/frame/asset-rate" [dependencies.pallet-asset-tx-payment] +path = "../substrate/frame/transaction-payment/asset-tx-payment" default-features = false optional = true -path = "../substrate/frame/transaction-payment/asset-tx-payment" [dependencies.pallet-assets] +path = "../substrate/frame/assets" default-features = false optional = true -path = "../substrate/frame/assets" [dependencies.pallet-assets-freezer] +path = "../substrate/frame/assets-freezer" default-features = false optional = true -path = "../substrate/frame/assets-freezer" [dependencies.pallet-atomic-swap] +path = "../substrate/frame/atomic-swap" default-features = false optional = true -path = "../substrate/frame/atomic-swap" [dependencies.pallet-aura] +path = "../substrate/frame/aura" default-features = false optional = true -path = "../substrate/frame/aura" [dependencies.pallet-authority-discovery] +path = "../substrate/frame/authority-discovery" default-features = false optional = true -path = "../substrate/frame/authority-discovery" [dependencies.pallet-authorship] +path = "../substrate/frame/authorship" default-features = false optional = true -path = "../substrate/frame/authorship" [dependencies.pallet-babe] +path = "../substrate/frame/babe" default-features = false optional = true -path = "../substrate/frame/babe" [dependencies.pallet-bags-list] +path = "../substrate/frame/bags-list" default-features = false optional = true -path = "../substrate/frame/bags-list" [dependencies.pallet-balances] +path = "../substrate/frame/balances" default-features = false optional = true -path = "../substrate/frame/balances" [dependencies.pallet-beefy] +path = "../substrate/frame/beefy" default-features = false optional = true -path = "../substrate/frame/beefy" [dependencies.pallet-beefy-mmr] +path = "../substrate/frame/beefy-mmr" default-features = false optional = true -path = "../substrate/frame/beefy-mmr" [dependencies.pallet-bounties] +path = "../substrate/frame/bounties" default-features = false optional = true -path = "../substrate/frame/bounties" [dependencies.pallet-bridge-grandpa] +path = "../bridges/modules/grandpa" default-features = false optional = true -path = "../bridges/modules/grandpa" [dependencies.pallet-bridge-messages] +path = "../bridges/modules/messages" default-features = false optional = true -path = "../bridges/modules/messages" [dependencies.pallet-bridge-parachains] +path = "../bridges/modules/parachains" default-features = false optional = true -path = "../bridges/modules/parachains" [dependencies.pallet-bridge-relayers] +path = "../bridges/modules/relayers" default-features = false optional = true -path = "../bridges/modules/relayers" [dependencies.pallet-broker] +path = "../substrate/frame/broker" default-features = false optional = true -path = "../substrate/frame/broker" [dependencies.pallet-child-bounties] +path = "../substrate/frame/child-bounties" default-features = false optional = true -path = "../substrate/frame/child-bounties" [dependencies.pallet-collator-selection] +path = "../cumulus/pallets/collator-selection" default-features = false optional = true -path = "../cumulus/pallets/collator-selection" [dependencies.pallet-collective] +path = "../substrate/frame/collective" default-features = false optional = true -path = "../substrate/frame/collective" [dependencies.pallet-collective-content] +path = "../cumulus/parachains/pallets/collective-content" default-features = false optional = true -path = "../cumulus/parachains/pallets/collective-content" [dependencies.pallet-contracts] +path = "../substrate/frame/contracts" default-features = false optional = true -path = "../substrate/frame/contracts" [dependencies.pallet-contracts-proc-macro] +path = "../substrate/frame/contracts/proc-macro" default-features = false optional = true -path = "../substrate/frame/contracts/proc-macro" [dependencies.pallet-contracts-uapi] +path = "../substrate/frame/contracts/uapi" default-features = false optional = true -path = "../substrate/frame/contracts/uapi" [dependencies.pallet-conviction-voting] +path = "../substrate/frame/conviction-voting" default-features = false optional = true -path = "../substrate/frame/conviction-voting" [dependencies.pallet-core-fellowship] +path = "../substrate/frame/core-fellowship" default-features = false optional = true -path = "../substrate/frame/core-fellowship" [dependencies.pallet-delegated-staking] +path = "../substrate/frame/delegated-staking" default-features = false optional = true -path = "../substrate/frame/delegated-staking" [dependencies.pallet-democracy] +path = "../substrate/frame/democracy" default-features = false optional = true -path = "../substrate/frame/democracy" [dependencies.pallet-dev-mode] +path = "../substrate/frame/examples/dev-mode" default-features = false optional = true -path = "../substrate/frame/examples/dev-mode" [dependencies.pallet-election-provider-multi-phase] +path = "../substrate/frame/election-provider-multi-phase" default-features = false optional = true -path = "../substrate/frame/election-provider-multi-phase" [dependencies.pallet-election-provider-support-benchmarking] +path = "../substrate/frame/election-provider-support/benchmarking" default-features = false optional = true -path = "../substrate/frame/election-provider-support/benchmarking" [dependencies.pallet-elections-phragmen] +path = "../substrate/frame/elections-phragmen" default-features = false optional = true -path = "../substrate/frame/elections-phragmen" [dependencies.pallet-fast-unstake] +path = "../substrate/frame/fast-unstake" default-features = false optional = true -path = "../substrate/frame/fast-unstake" [dependencies.pallet-glutton] +path = "../substrate/frame/glutton" default-features = false optional = true -path = "../substrate/frame/glutton" [dependencies.pallet-grandpa] +path = "../substrate/frame/grandpa" default-features = false optional = true -path = "../substrate/frame/grandpa" [dependencies.pallet-identity] +path = "../substrate/frame/identity" default-features = false optional = true -path = "../substrate/frame/identity" [dependencies.pallet-im-online] +path = "../substrate/frame/im-online" default-features = false optional = true -path = "../substrate/frame/im-online" [dependencies.pallet-indices] +path = "../substrate/frame/indices" default-features = false optional = true -path = "../substrate/frame/indices" [dependencies.pallet-insecure-randomness-collective-flip] +path = "../substrate/frame/insecure-randomness-collective-flip" default-features = false optional = true -path = "../substrate/frame/insecure-randomness-collective-flip" [dependencies.pallet-lottery] +path = "../substrate/frame/lottery" default-features = false optional = true -path = "../substrate/frame/lottery" [dependencies.pallet-membership] +path = "../substrate/frame/membership" default-features = false optional = true -path = "../substrate/frame/membership" [dependencies.pallet-message-queue] +path = "../substrate/frame/message-queue" default-features = false optional = true -path = "../substrate/frame/message-queue" [dependencies.pallet-migrations] +path = "../substrate/frame/migrations" default-features = false optional = true -path = "../substrate/frame/migrations" [dependencies.pallet-mixnet] +path = "../substrate/frame/mixnet" default-features = false optional = true -path = "../substrate/frame/mixnet" [dependencies.pallet-mmr] +path = "../substrate/frame/merkle-mountain-range" default-features = false optional = true -path = "../substrate/frame/merkle-mountain-range" [dependencies.pallet-multisig] +path = "../substrate/frame/multisig" default-features = false optional = true -path = "../substrate/frame/multisig" [dependencies.pallet-nft-fractionalization] +path = "../substrate/frame/nft-fractionalization" default-features = false optional = true -path = "../substrate/frame/nft-fractionalization" [dependencies.pallet-nfts] +path = "../substrate/frame/nfts" default-features = false optional = true -path = "../substrate/frame/nfts" [dependencies.pallet-nfts-runtime-api] +path = "../substrate/frame/nfts/runtime-api" default-features = false optional = true -path = "../substrate/frame/nfts/runtime-api" [dependencies.pallet-nis] +path = "../substrate/frame/nis" default-features = false optional = true -path = "../substrate/frame/nis" [dependencies.pallet-node-authorization] +path = "../substrate/frame/node-authorization" default-features = false optional = true -path = "../substrate/frame/node-authorization" [dependencies.pallet-nomination-pools] +path = "../substrate/frame/nomination-pools" default-features = false optional = true -path = "../substrate/frame/nomination-pools" [dependencies.pallet-nomination-pools-benchmarking] +path = "../substrate/frame/nomination-pools/benchmarking" default-features = false optional = true -path = "../substrate/frame/nomination-pools/benchmarking" [dependencies.pallet-nomination-pools-runtime-api] +path = "../substrate/frame/nomination-pools/runtime-api" default-features = false optional = true -path = "../substrate/frame/nomination-pools/runtime-api" [dependencies.pallet-offences] +path = "../substrate/frame/offences" default-features = false optional = true -path = "../substrate/frame/offences" [dependencies.pallet-offences-benchmarking] +path = "../substrate/frame/offences/benchmarking" default-features = false optional = true -path = "../substrate/frame/offences/benchmarking" [dependencies.pallet-paged-list] +path = "../substrate/frame/paged-list" default-features = false optional = true -path = "../substrate/frame/paged-list" [dependencies.pallet-parameters] +path = "../substrate/frame/parameters" default-features = false optional = true -path = "../substrate/frame/parameters" [dependencies.pallet-preimage] +path = "../substrate/frame/preimage" default-features = false optional = true -path = "../substrate/frame/preimage" [dependencies.pallet-proxy] +path = "../substrate/frame/proxy" default-features = false optional = true -path = "../substrate/frame/proxy" [dependencies.pallet-ranked-collective] +path = "../substrate/frame/ranked-collective" default-features = false optional = true -path = "../substrate/frame/ranked-collective" [dependencies.pallet-recovery] +path = "../substrate/frame/recovery" default-features = false optional = true -path = "../substrate/frame/recovery" [dependencies.pallet-referenda] +path = "../substrate/frame/referenda" default-features = false optional = true -path = "../substrate/frame/referenda" [dependencies.pallet-remark] +path = "../substrate/frame/remark" default-features = false optional = true -path = "../substrate/frame/remark" [dependencies.pallet-revive] +path = "../substrate/frame/revive" default-features = false optional = true -path = "../substrate/frame/revive" -[dependencies.pallet-revive-proc-macro] +[dependencies.pallet-revive-fixtures] +path = "../substrate/frame/revive/fixtures" default-features = false optional = true + +[dependencies.pallet-revive-proc-macro] path = "../substrate/frame/revive/proc-macro" +default-features = false +optional = true [dependencies.pallet-revive-uapi] +path = "../substrate/frame/revive/uapi" default-features = false optional = true -path = "../substrate/frame/revive/uapi" [dependencies.pallet-root-offences] +path = "../substrate/frame/root-offences" default-features = false optional = true -path = "../substrate/frame/root-offences" [dependencies.pallet-root-testing] +path = "../substrate/frame/root-testing" default-features = false optional = true -path = "../substrate/frame/root-testing" [dependencies.pallet-safe-mode] +path = "../substrate/frame/safe-mode" default-features = false optional = true -path = "../substrate/frame/safe-mode" [dependencies.pallet-salary] +path = "../substrate/frame/salary" default-features = false optional = true -path = "../substrate/frame/salary" [dependencies.pallet-scheduler] +path = "../substrate/frame/scheduler" default-features = false optional = true -path = "../substrate/frame/scheduler" [dependencies.pallet-scored-pool] +path = "../substrate/frame/scored-pool" default-features = false optional = true -path = "../substrate/frame/scored-pool" [dependencies.pallet-session] +path = "../substrate/frame/session" default-features = false optional = true -path = "../substrate/frame/session" [dependencies.pallet-session-benchmarking] +path = "../substrate/frame/session/benchmarking" default-features = false optional = true -path = "../substrate/frame/session/benchmarking" [dependencies.pallet-skip-feeless-payment] +path = "../substrate/frame/transaction-payment/skip-feeless-payment" default-features = false optional = true -path = "../substrate/frame/transaction-payment/skip-feeless-payment" [dependencies.pallet-society] +path = "../substrate/frame/society" default-features = false optional = true -path = "../substrate/frame/society" [dependencies.pallet-staking] +path = "../substrate/frame/staking" default-features = false optional = true -path = "../substrate/frame/staking" [dependencies.pallet-staking-reward-curve] +path = "../substrate/frame/staking/reward-curve" default-features = false optional = true -path = "../substrate/frame/staking/reward-curve" [dependencies.pallet-staking-reward-fn] +path = "../substrate/frame/staking/reward-fn" default-features = false optional = true -path = "../substrate/frame/staking/reward-fn" [dependencies.pallet-staking-runtime-api] +path = "../substrate/frame/staking/runtime-api" default-features = false optional = true -path = "../substrate/frame/staking/runtime-api" [dependencies.pallet-state-trie-migration] +path = "../substrate/frame/state-trie-migration" default-features = false optional = true -path = "../substrate/frame/state-trie-migration" [dependencies.pallet-statement] +path = "../substrate/frame/statement" default-features = false optional = true -path = "../substrate/frame/statement" [dependencies.pallet-sudo] +path = "../substrate/frame/sudo" default-features = false optional = true -path = "../substrate/frame/sudo" [dependencies.pallet-timestamp] +path = "../substrate/frame/timestamp" default-features = false optional = true -path = "../substrate/frame/timestamp" [dependencies.pallet-tips] +path = "../substrate/frame/tips" default-features = false optional = true -path = "../substrate/frame/tips" [dependencies.pallet-transaction-payment] +path = "../substrate/frame/transaction-payment" default-features = false optional = true -path = "../substrate/frame/transaction-payment" [dependencies.pallet-transaction-payment-rpc-runtime-api] +path = "../substrate/frame/transaction-payment/rpc/runtime-api" default-features = false optional = true -path = "../substrate/frame/transaction-payment/rpc/runtime-api" [dependencies.pallet-transaction-storage] +path = "../substrate/frame/transaction-storage" default-features = false optional = true -path = "../substrate/frame/transaction-storage" [dependencies.pallet-treasury] +path = "../substrate/frame/treasury" default-features = false optional = true -path = "../substrate/frame/treasury" [dependencies.pallet-tx-pause] +path = "../substrate/frame/tx-pause" default-features = false optional = true -path = "../substrate/frame/tx-pause" [dependencies.pallet-uniques] +path = "../substrate/frame/uniques" default-features = false optional = true -path = "../substrate/frame/uniques" [dependencies.pallet-utility] +path = "../substrate/frame/utility" default-features = false optional = true -path = "../substrate/frame/utility" [dependencies.pallet-verify-signature] +path = "../substrate/frame/verify-signature" default-features = false optional = true -path = "../substrate/frame/verify-signature" [dependencies.pallet-vesting] +path = "../substrate/frame/vesting" default-features = false optional = true -path = "../substrate/frame/vesting" [dependencies.pallet-whitelist] +path = "../substrate/frame/whitelist" default-features = false optional = true -path = "../substrate/frame/whitelist" [dependencies.pallet-xcm] +path = "../polkadot/xcm/pallet-xcm" default-features = false optional = true -path = "../polkadot/xcm/pallet-xcm" [dependencies.pallet-xcm-benchmarks] +path = "../polkadot/xcm/pallet-xcm-benchmarks" default-features = false optional = true -path = "../polkadot/xcm/pallet-xcm-benchmarks" [dependencies.pallet-xcm-bridge-hub] +path = "../bridges/modules/xcm-bridge-hub" default-features = false optional = true -path = "../bridges/modules/xcm-bridge-hub" [dependencies.pallet-xcm-bridge-hub-router] +path = "../bridges/modules/xcm-bridge-hub-router" default-features = false optional = true -path = "../bridges/modules/xcm-bridge-hub-router" [dependencies.parachains-common] +path = "../cumulus/parachains/common" default-features = false optional = true -path = "../cumulus/parachains/common" [dependencies.polkadot-core-primitives] +path = "../polkadot/core-primitives" default-features = false optional = true -path = "../polkadot/core-primitives" [dependencies.polkadot-parachain-primitives] +path = "../polkadot/parachain" default-features = false optional = true -path = "../polkadot/parachain" [dependencies.polkadot-primitives] +path = "../polkadot/primitives" default-features = false optional = true -path = "../polkadot/primitives" [dependencies.polkadot-runtime-common] +path = "../polkadot/runtime/common" default-features = false optional = true -path = "../polkadot/runtime/common" [dependencies.polkadot-runtime-metrics] +path = "../polkadot/runtime/metrics" default-features = false optional = true -path = "../polkadot/runtime/metrics" [dependencies.polkadot-runtime-parachains] +path = "../polkadot/runtime/parachains" default-features = false optional = true -path = "../polkadot/runtime/parachains" [dependencies.polkadot-sdk-frame] +path = "../substrate/frame" default-features = false optional = true -path = "../substrate/frame" [dependencies.sc-chain-spec-derive] +path = "../substrate/client/chain-spec/derive" default-features = false optional = true -path = "../substrate/client/chain-spec/derive" [dependencies.sc-tracing-proc-macro] +path = "../substrate/client/tracing/proc-macro" default-features = false optional = true -path = "../substrate/client/tracing/proc-macro" [dependencies.slot-range-helper] +path = "../polkadot/runtime/common/slot_range_helper" default-features = false optional = true -path = "../polkadot/runtime/common/slot_range_helper" [dependencies.snowbridge-beacon-primitives] +path = "../bridges/snowbridge/primitives/beacon" default-features = false optional = true -path = "../bridges/snowbridge/primitives/beacon" [dependencies.snowbridge-core] +path = "../bridges/snowbridge/primitives/core" default-features = false optional = true -path = "../bridges/snowbridge/primitives/core" [dependencies.snowbridge-ethereum] +path = "../bridges/snowbridge/primitives/ethereum" default-features = false optional = true -path = "../bridges/snowbridge/primitives/ethereum" [dependencies.snowbridge-outbound-queue-merkle-tree] +path = "../bridges/snowbridge/pallets/outbound-queue/merkle-tree" default-features = false optional = true -path = "../bridges/snowbridge/pallets/outbound-queue/merkle-tree" [dependencies.snowbridge-outbound-queue-runtime-api] +path = "../bridges/snowbridge/pallets/outbound-queue/runtime-api" default-features = false optional = true -path = "../bridges/snowbridge/pallets/outbound-queue/runtime-api" [dependencies.snowbridge-pallet-ethereum-client] +path = "../bridges/snowbridge/pallets/ethereum-client" default-features = false optional = true -path = "../bridges/snowbridge/pallets/ethereum-client" [dependencies.snowbridge-pallet-ethereum-client-fixtures] +path = "../bridges/snowbridge/pallets/ethereum-client/fixtures" default-features = false optional = true -path = "../bridges/snowbridge/pallets/ethereum-client/fixtures" [dependencies.snowbridge-pallet-inbound-queue] +path = "../bridges/snowbridge/pallets/inbound-queue" default-features = false optional = true -path = "../bridges/snowbridge/pallets/inbound-queue" [dependencies.snowbridge-pallet-inbound-queue-fixtures] +path = "../bridges/snowbridge/pallets/inbound-queue/fixtures" default-features = false optional = true -path = "../bridges/snowbridge/pallets/inbound-queue/fixtures" [dependencies.snowbridge-pallet-outbound-queue] +path = "../bridges/snowbridge/pallets/outbound-queue" default-features = false optional = true -path = "../bridges/snowbridge/pallets/outbound-queue" [dependencies.snowbridge-pallet-system] +path = "../bridges/snowbridge/pallets/system" default-features = false optional = true -path = "../bridges/snowbridge/pallets/system" [dependencies.snowbridge-router-primitives] +path = "../bridges/snowbridge/primitives/router" default-features = false optional = true -path = "../bridges/snowbridge/primitives/router" [dependencies.snowbridge-runtime-common] +path = "../bridges/snowbridge/runtime/runtime-common" default-features = false optional = true -path = "../bridges/snowbridge/runtime/runtime-common" [dependencies.snowbridge-system-runtime-api] +path = "../bridges/snowbridge/pallets/system/runtime-api" default-features = false optional = true -path = "../bridges/snowbridge/pallets/system/runtime-api" [dependencies.sp-api] +path = "../substrate/primitives/api" default-features = false optional = true -path = "../substrate/primitives/api" [dependencies.sp-api-proc-macro] +path = "../substrate/primitives/api/proc-macro" default-features = false optional = true -path = "../substrate/primitives/api/proc-macro" [dependencies.sp-application-crypto] +path = "../substrate/primitives/application-crypto" default-features = false optional = true -path = "../substrate/primitives/application-crypto" [dependencies.sp-arithmetic] +path = "../substrate/primitives/arithmetic" default-features = false optional = true -path = "../substrate/primitives/arithmetic" [dependencies.sp-authority-discovery] +path = "../substrate/primitives/authority-discovery" default-features = false optional = true -path = "../substrate/primitives/authority-discovery" [dependencies.sp-block-builder] +path = "../substrate/primitives/block-builder" default-features = false optional = true -path = "../substrate/primitives/block-builder" [dependencies.sp-consensus-aura] +path = "../substrate/primitives/consensus/aura" default-features = false optional = true -path = "../substrate/primitives/consensus/aura" [dependencies.sp-consensus-babe] +path = "../substrate/primitives/consensus/babe" default-features = false optional = true -path = "../substrate/primitives/consensus/babe" [dependencies.sp-consensus-beefy] +path = "../substrate/primitives/consensus/beefy" default-features = false optional = true -path = "../substrate/primitives/consensus/beefy" [dependencies.sp-consensus-grandpa] +path = "../substrate/primitives/consensus/grandpa" default-features = false optional = true -path = "../substrate/primitives/consensus/grandpa" [dependencies.sp-consensus-pow] +path = "../substrate/primitives/consensus/pow" default-features = false optional = true -path = "../substrate/primitives/consensus/pow" [dependencies.sp-consensus-slots] +path = "../substrate/primitives/consensus/slots" default-features = false optional = true -path = "../substrate/primitives/consensus/slots" [dependencies.sp-core] +path = "../substrate/primitives/core" default-features = false optional = true -path = "../substrate/primitives/core" [dependencies.sp-crypto-ec-utils] +path = "../substrate/primitives/crypto/ec-utils" default-features = false optional = true -path = "../substrate/primitives/crypto/ec-utils" [dependencies.sp-crypto-hashing] +path = "../substrate/primitives/crypto/hashing" default-features = false optional = true -path = "../substrate/primitives/crypto/hashing" [dependencies.sp-crypto-hashing-proc-macro] +path = "../substrate/primitives/crypto/hashing/proc-macro" default-features = false optional = true -path = "../substrate/primitives/crypto/hashing/proc-macro" [dependencies.sp-debug-derive] +path = "../substrate/primitives/debug-derive" default-features = false optional = true -path = "../substrate/primitives/debug-derive" [dependencies.sp-externalities] +path = "../substrate/primitives/externalities" default-features = false optional = true -path = "../substrate/primitives/externalities" [dependencies.sp-genesis-builder] +path = "../substrate/primitives/genesis-builder" default-features = false optional = true -path = "../substrate/primitives/genesis-builder" [dependencies.sp-inherents] +path = "../substrate/primitives/inherents" default-features = false optional = true -path = "../substrate/primitives/inherents" [dependencies.sp-io] +path = "../substrate/primitives/io" default-features = false optional = true -path = "../substrate/primitives/io" [dependencies.sp-keyring] +path = "../substrate/primitives/keyring" default-features = false optional = true -path = "../substrate/primitives/keyring" [dependencies.sp-keystore] +path = "../substrate/primitives/keystore" default-features = false optional = true -path = "../substrate/primitives/keystore" [dependencies.sp-metadata-ir] +path = "../substrate/primitives/metadata-ir" default-features = false optional = true -path = "../substrate/primitives/metadata-ir" [dependencies.sp-mixnet] +path = "../substrate/primitives/mixnet" default-features = false optional = true -path = "../substrate/primitives/mixnet" [dependencies.sp-mmr-primitives] +path = "../substrate/primitives/merkle-mountain-range" default-features = false optional = true -path = "../substrate/primitives/merkle-mountain-range" [dependencies.sp-npos-elections] +path = "../substrate/primitives/npos-elections" default-features = false optional = true -path = "../substrate/primitives/npos-elections" [dependencies.sp-offchain] +path = "../substrate/primitives/offchain" default-features = false optional = true -path = "../substrate/primitives/offchain" [dependencies.sp-runtime] +path = "../substrate/primitives/runtime" default-features = false optional = true -path = "../substrate/primitives/runtime" [dependencies.sp-runtime-interface] +path = "../substrate/primitives/runtime-interface" default-features = false optional = true -path = "../substrate/primitives/runtime-interface" [dependencies.sp-runtime-interface-proc-macro] +path = "../substrate/primitives/runtime-interface/proc-macro" default-features = false optional = true -path = "../substrate/primitives/runtime-interface/proc-macro" [dependencies.sp-session] +path = "../substrate/primitives/session" default-features = false optional = true -path = "../substrate/primitives/session" [dependencies.sp-staking] +path = "../substrate/primitives/staking" default-features = false optional = true -path = "../substrate/primitives/staking" [dependencies.sp-state-machine] +path = "../substrate/primitives/state-machine" default-features = false optional = true -path = "../substrate/primitives/state-machine" [dependencies.sp-statement-store] +path = "../substrate/primitives/statement-store" default-features = false optional = true -path = "../substrate/primitives/statement-store" [dependencies.sp-std] +path = "../substrate/primitives/std" default-features = false optional = true -path = "../substrate/primitives/std" [dependencies.sp-storage] +path = "../substrate/primitives/storage" default-features = false optional = true -path = "../substrate/primitives/storage" [dependencies.sp-timestamp] +path = "../substrate/primitives/timestamp" default-features = false optional = true -path = "../substrate/primitives/timestamp" [dependencies.sp-tracing] +path = "../substrate/primitives/tracing" default-features = false optional = true -path = "../substrate/primitives/tracing" [dependencies.sp-transaction-pool] +path = "../substrate/primitives/transaction-pool" default-features = false optional = true -path = "../substrate/primitives/transaction-pool" [dependencies.sp-transaction-storage-proof] +path = "../substrate/primitives/transaction-storage-proof" default-features = false optional = true -path = "../substrate/primitives/transaction-storage-proof" [dependencies.sp-trie] +path = "../substrate/primitives/trie" default-features = false optional = true -path = "../substrate/primitives/trie" [dependencies.sp-version] +path = "../substrate/primitives/version" default-features = false optional = true -path = "../substrate/primitives/version" [dependencies.sp-version-proc-macro] +path = "../substrate/primitives/version/proc-macro" default-features = false optional = true -path = "../substrate/primitives/version/proc-macro" [dependencies.sp-wasm-interface] +path = "../substrate/primitives/wasm-interface" default-features = false optional = true -path = "../substrate/primitives/wasm-interface" [dependencies.sp-weights] +path = "../substrate/primitives/weights" default-features = false optional = true -path = "../substrate/primitives/weights" [dependencies.staging-parachain-info] +path = "../cumulus/parachains/pallets/parachain-info" default-features = false optional = true -path = "../cumulus/parachains/pallets/parachain-info" [dependencies.staging-xcm] +path = "../polkadot/xcm" default-features = false optional = true -path = "../polkadot/xcm" [dependencies.staging-xcm-builder] +path = "../polkadot/xcm/xcm-builder" default-features = false optional = true -path = "../polkadot/xcm/xcm-builder" [dependencies.staging-xcm-executor] +path = "../polkadot/xcm/xcm-executor" default-features = false optional = true -path = "../polkadot/xcm/xcm-executor" [dependencies.substrate-bip39] +path = "../substrate/utils/substrate-bip39" default-features = false optional = true -path = "../substrate/utils/substrate-bip39" [dependencies.testnet-parachains-constants] +path = "../cumulus/parachains/runtimes/constants" default-features = false optional = true -path = "../cumulus/parachains/runtimes/constants" [dependencies.tracing-gum-proc-macro] +path = "../polkadot/node/gum/proc-macro" default-features = false optional = true -path = "../polkadot/node/gum/proc-macro" [dependencies.xcm-procedural] +path = "../polkadot/xcm/procedural" default-features = false optional = true -path = "../polkadot/xcm/procedural" [dependencies.xcm-runtime-apis] +path = "../polkadot/xcm/xcm-runtime-apis" default-features = false optional = true -path = "../polkadot/xcm/xcm-runtime-apis" [dependencies.asset-test-utils] +path = "../cumulus/parachains/runtimes/assets/test-utils" default-features = false optional = true -path = "../cumulus/parachains/runtimes/assets/test-utils" [dependencies.bridge-hub-test-utils] +path = "../cumulus/parachains/runtimes/bridge-hubs/test-utils" default-features = false optional = true -path = "../cumulus/parachains/runtimes/bridge-hubs/test-utils" [dependencies.cumulus-client-cli] +path = "../cumulus/client/cli" default-features = false optional = true -path = "../cumulus/client/cli" [dependencies.cumulus-client-collator] +path = "../cumulus/client/collator" default-features = false optional = true -path = "../cumulus/client/collator" [dependencies.cumulus-client-consensus-aura] +path = "../cumulus/client/consensus/aura" default-features = false optional = true -path = "../cumulus/client/consensus/aura" [dependencies.cumulus-client-consensus-common] +path = "../cumulus/client/consensus/common" default-features = false optional = true -path = "../cumulus/client/consensus/common" [dependencies.cumulus-client-consensus-proposer] +path = "../cumulus/client/consensus/proposer" default-features = false optional = true -path = "../cumulus/client/consensus/proposer" [dependencies.cumulus-client-consensus-relay-chain] +path = "../cumulus/client/consensus/relay-chain" default-features = false optional = true -path = "../cumulus/client/consensus/relay-chain" [dependencies.cumulus-client-network] +path = "../cumulus/client/network" default-features = false optional = true -path = "../cumulus/client/network" [dependencies.cumulus-client-parachain-inherent] +path = "../cumulus/client/parachain-inherent" default-features = false optional = true -path = "../cumulus/client/parachain-inherent" [dependencies.cumulus-client-pov-recovery] +path = "../cumulus/client/pov-recovery" default-features = false optional = true -path = "../cumulus/client/pov-recovery" [dependencies.cumulus-client-service] +path = "../cumulus/client/service" default-features = false optional = true -path = "../cumulus/client/service" [dependencies.cumulus-relay-chain-inprocess-interface] +path = "../cumulus/client/relay-chain-inprocess-interface" default-features = false optional = true -path = "../cumulus/client/relay-chain-inprocess-interface" [dependencies.cumulus-relay-chain-interface] +path = "../cumulus/client/relay-chain-interface" default-features = false optional = true -path = "../cumulus/client/relay-chain-interface" [dependencies.cumulus-relay-chain-minimal-node] +path = "../cumulus/client/relay-chain-minimal-node" default-features = false optional = true -path = "../cumulus/client/relay-chain-minimal-node" [dependencies.cumulus-relay-chain-rpc-interface] +path = "../cumulus/client/relay-chain-rpc-interface" default-features = false optional = true -path = "../cumulus/client/relay-chain-rpc-interface" [dependencies.cumulus-test-relay-sproof-builder] +path = "../cumulus/test/relay-sproof-builder" default-features = false optional = true -path = "../cumulus/test/relay-sproof-builder" [dependencies.emulated-integration-tests-common] +path = "../cumulus/parachains/integration-tests/emulated/common" default-features = false optional = true -path = "../cumulus/parachains/integration-tests/emulated/common" [dependencies.fork-tree] +path = "../substrate/utils/fork-tree" default-features = false optional = true -path = "../substrate/utils/fork-tree" [dependencies.frame-benchmarking-cli] +path = "../substrate/utils/frame/benchmarking-cli" default-features = false optional = true -path = "../substrate/utils/frame/benchmarking-cli" [dependencies.frame-remote-externalities] +path = "../substrate/utils/frame/remote-externalities" default-features = false optional = true -path = "../substrate/utils/frame/remote-externalities" [dependencies.frame-support-procedural-tools] +path = "../substrate/frame/support/procedural/tools" default-features = false optional = true -path = "../substrate/frame/support/procedural/tools" [dependencies.generate-bags] +path = "../substrate/utils/frame/generate-bags" default-features = false optional = true -path = "../substrate/utils/frame/generate-bags" [dependencies.mmr-gadget] +path = "../substrate/client/merkle-mountain-range" default-features = false optional = true -path = "../substrate/client/merkle-mountain-range" [dependencies.mmr-rpc] +path = "../substrate/client/merkle-mountain-range/rpc" default-features = false optional = true -path = "../substrate/client/merkle-mountain-range/rpc" [dependencies.pallet-contracts-mock-network] +path = "../substrate/frame/contracts/mock-network" default-features = false optional = true -path = "../substrate/frame/contracts/mock-network" [dependencies.pallet-revive-eth-rpc] +path = "../substrate/frame/revive/rpc" default-features = false optional = true -path = "../substrate/frame/revive/rpc" [dependencies.pallet-revive-mock-network] +path = "../substrate/frame/revive/mock-network" default-features = false optional = true -path = "../substrate/frame/revive/mock-network" [dependencies.pallet-transaction-payment-rpc] +path = "../substrate/frame/transaction-payment/rpc" default-features = false optional = true -path = "../substrate/frame/transaction-payment/rpc" [dependencies.parachains-runtimes-test-utils] +path = "../cumulus/parachains/runtimes/test-utils" default-features = false optional = true -path = "../cumulus/parachains/runtimes/test-utils" [dependencies.polkadot-approval-distribution] +path = "../polkadot/node/network/approval-distribution" default-features = false optional = true -path = "../polkadot/node/network/approval-distribution" [dependencies.polkadot-availability-bitfield-distribution] +path = "../polkadot/node/network/bitfield-distribution" default-features = false optional = true -path = "../polkadot/node/network/bitfield-distribution" [dependencies.polkadot-availability-distribution] +path = "../polkadot/node/network/availability-distribution" default-features = false optional = true -path = "../polkadot/node/network/availability-distribution" [dependencies.polkadot-availability-recovery] +path = "../polkadot/node/network/availability-recovery" default-features = false optional = true -path = "../polkadot/node/network/availability-recovery" [dependencies.polkadot-cli] +path = "../polkadot/cli" default-features = false optional = true -path = "../polkadot/cli" [dependencies.polkadot-collator-protocol] +path = "../polkadot/node/network/collator-protocol" default-features = false optional = true -path = "../polkadot/node/network/collator-protocol" [dependencies.polkadot-dispute-distribution] +path = "../polkadot/node/network/dispute-distribution" default-features = false optional = true -path = "../polkadot/node/network/dispute-distribution" [dependencies.polkadot-erasure-coding] +path = "../polkadot/erasure-coding" default-features = false optional = true -path = "../polkadot/erasure-coding" [dependencies.polkadot-gossip-support] +path = "../polkadot/node/network/gossip-support" default-features = false optional = true -path = "../polkadot/node/network/gossip-support" [dependencies.polkadot-network-bridge] +path = "../polkadot/node/network/bridge" default-features = false optional = true -path = "../polkadot/node/network/bridge" [dependencies.polkadot-node-collation-generation] +path = "../polkadot/node/collation-generation" default-features = false optional = true -path = "../polkadot/node/collation-generation" [dependencies.polkadot-node-core-approval-voting] +path = "../polkadot/node/core/approval-voting" default-features = false optional = true -path = "../polkadot/node/core/approval-voting" [dependencies.polkadot-node-core-approval-voting-parallel] +path = "../polkadot/node/core/approval-voting-parallel" default-features = false optional = true -path = "../polkadot/node/core/approval-voting-parallel" [dependencies.polkadot-node-core-av-store] +path = "../polkadot/node/core/av-store" default-features = false optional = true -path = "../polkadot/node/core/av-store" [dependencies.polkadot-node-core-backing] +path = "../polkadot/node/core/backing" default-features = false optional = true -path = "../polkadot/node/core/backing" [dependencies.polkadot-node-core-bitfield-signing] +path = "../polkadot/node/core/bitfield-signing" default-features = false optional = true -path = "../polkadot/node/core/bitfield-signing" [dependencies.polkadot-node-core-candidate-validation] +path = "../polkadot/node/core/candidate-validation" default-features = false optional = true -path = "../polkadot/node/core/candidate-validation" [dependencies.polkadot-node-core-chain-api] +path = "../polkadot/node/core/chain-api" default-features = false optional = true -path = "../polkadot/node/core/chain-api" [dependencies.polkadot-node-core-chain-selection] +path = "../polkadot/node/core/chain-selection" default-features = false optional = true -path = "../polkadot/node/core/chain-selection" [dependencies.polkadot-node-core-dispute-coordinator] +path = "../polkadot/node/core/dispute-coordinator" default-features = false optional = true -path = "../polkadot/node/core/dispute-coordinator" [dependencies.polkadot-node-core-parachains-inherent] +path = "../polkadot/node/core/parachains-inherent" default-features = false optional = true -path = "../polkadot/node/core/parachains-inherent" [dependencies.polkadot-node-core-prospective-parachains] +path = "../polkadot/node/core/prospective-parachains" default-features = false optional = true -path = "../polkadot/node/core/prospective-parachains" [dependencies.polkadot-node-core-provisioner] +path = "../polkadot/node/core/provisioner" default-features = false optional = true -path = "../polkadot/node/core/provisioner" [dependencies.polkadot-node-core-pvf] +path = "../polkadot/node/core/pvf" default-features = false optional = true -path = "../polkadot/node/core/pvf" [dependencies.polkadot-node-core-pvf-checker] +path = "../polkadot/node/core/pvf-checker" default-features = false optional = true -path = "../polkadot/node/core/pvf-checker" [dependencies.polkadot-node-core-pvf-common] +path = "../polkadot/node/core/pvf/common" default-features = false optional = true -path = "../polkadot/node/core/pvf/common" [dependencies.polkadot-node-core-pvf-execute-worker] +path = "../polkadot/node/core/pvf/execute-worker" default-features = false optional = true -path = "../polkadot/node/core/pvf/execute-worker" [dependencies.polkadot-node-core-pvf-prepare-worker] +path = "../polkadot/node/core/pvf/prepare-worker" default-features = false optional = true -path = "../polkadot/node/core/pvf/prepare-worker" [dependencies.polkadot-node-core-runtime-api] +path = "../polkadot/node/core/runtime-api" default-features = false optional = true -path = "../polkadot/node/core/runtime-api" [dependencies.polkadot-node-metrics] +path = "../polkadot/node/metrics" default-features = false optional = true -path = "../polkadot/node/metrics" [dependencies.polkadot-node-network-protocol] +path = "../polkadot/node/network/protocol" default-features = false optional = true -path = "../polkadot/node/network/protocol" [dependencies.polkadot-node-primitives] +path = "../polkadot/node/primitives" default-features = false optional = true -path = "../polkadot/node/primitives" [dependencies.polkadot-node-subsystem] +path = "../polkadot/node/subsystem" default-features = false optional = true -path = "../polkadot/node/subsystem" [dependencies.polkadot-node-subsystem-types] +path = "../polkadot/node/subsystem-types" default-features = false optional = true -path = "../polkadot/node/subsystem-types" [dependencies.polkadot-node-subsystem-util] +path = "../polkadot/node/subsystem-util" default-features = false optional = true -path = "../polkadot/node/subsystem-util" [dependencies.polkadot-omni-node-lib] +path = "../cumulus/polkadot-omni-node/lib" default-features = false optional = true -path = "../cumulus/polkadot-omni-node/lib" [dependencies.polkadot-overseer] +path = "../polkadot/node/overseer" default-features = false optional = true -path = "../polkadot/node/overseer" [dependencies.polkadot-rpc] +path = "../polkadot/rpc" default-features = false optional = true -path = "../polkadot/rpc" [dependencies.polkadot-service] +path = "../polkadot/node/service" default-features = false optional = true -path = "../polkadot/node/service" [dependencies.polkadot-statement-distribution] +path = "../polkadot/node/network/statement-distribution" default-features = false optional = true -path = "../polkadot/node/network/statement-distribution" [dependencies.polkadot-statement-table] +path = "../polkadot/statement-table" default-features = false optional = true -path = "../polkadot/statement-table" [dependencies.sc-allocator] +path = "../substrate/client/allocator" default-features = false optional = true -path = "../substrate/client/allocator" [dependencies.sc-authority-discovery] +path = "../substrate/client/authority-discovery" default-features = false optional = true -path = "../substrate/client/authority-discovery" [dependencies.sc-basic-authorship] +path = "../substrate/client/basic-authorship" default-features = false optional = true -path = "../substrate/client/basic-authorship" [dependencies.sc-block-builder] +path = "../substrate/client/block-builder" default-features = false optional = true -path = "../substrate/client/block-builder" [dependencies.sc-chain-spec] +path = "../substrate/client/chain-spec" default-features = false optional = true -path = "../substrate/client/chain-spec" [dependencies.sc-cli] +path = "../substrate/client/cli" default-features = false optional = true -path = "../substrate/client/cli" [dependencies.sc-client-api] +path = "../substrate/client/api" default-features = false optional = true -path = "../substrate/client/api" [dependencies.sc-client-db] +path = "../substrate/client/db" default-features = false optional = true -path = "../substrate/client/db" [dependencies.sc-consensus] +path = "../substrate/client/consensus/common" default-features = false optional = true -path = "../substrate/client/consensus/common" [dependencies.sc-consensus-aura] +path = "../substrate/client/consensus/aura" default-features = false optional = true -path = "../substrate/client/consensus/aura" [dependencies.sc-consensus-babe] +path = "../substrate/client/consensus/babe" default-features = false optional = true -path = "../substrate/client/consensus/babe" [dependencies.sc-consensus-babe-rpc] +path = "../substrate/client/consensus/babe/rpc" default-features = false optional = true -path = "../substrate/client/consensus/babe/rpc" [dependencies.sc-consensus-beefy] +path = "../substrate/client/consensus/beefy" default-features = false optional = true -path = "../substrate/client/consensus/beefy" [dependencies.sc-consensus-beefy-rpc] +path = "../substrate/client/consensus/beefy/rpc" default-features = false optional = true -path = "../substrate/client/consensus/beefy/rpc" [dependencies.sc-consensus-epochs] +path = "../substrate/client/consensus/epochs" default-features = false optional = true -path = "../substrate/client/consensus/epochs" [dependencies.sc-consensus-grandpa] +path = "../substrate/client/consensus/grandpa" default-features = false optional = true -path = "../substrate/client/consensus/grandpa" [dependencies.sc-consensus-grandpa-rpc] +path = "../substrate/client/consensus/grandpa/rpc" default-features = false optional = true -path = "../substrate/client/consensus/grandpa/rpc" [dependencies.sc-consensus-manual-seal] +path = "../substrate/client/consensus/manual-seal" default-features = false optional = true -path = "../substrate/client/consensus/manual-seal" [dependencies.sc-consensus-pow] +path = "../substrate/client/consensus/pow" default-features = false optional = true -path = "../substrate/client/consensus/pow" [dependencies.sc-consensus-slots] +path = "../substrate/client/consensus/slots" default-features = false optional = true -path = "../substrate/client/consensus/slots" [dependencies.sc-executor] +path = "../substrate/client/executor" default-features = false optional = true -path = "../substrate/client/executor" [dependencies.sc-executor-common] +path = "../substrate/client/executor/common" default-features = false optional = true -path = "../substrate/client/executor/common" [dependencies.sc-executor-polkavm] +path = "../substrate/client/executor/polkavm" default-features = false optional = true -path = "../substrate/client/executor/polkavm" [dependencies.sc-executor-wasmtime] +path = "../substrate/client/executor/wasmtime" default-features = false optional = true -path = "../substrate/client/executor/wasmtime" [dependencies.sc-informant] +path = "../substrate/client/informant" default-features = false optional = true -path = "../substrate/client/informant" [dependencies.sc-keystore] +path = "../substrate/client/keystore" default-features = false optional = true -path = "../substrate/client/keystore" [dependencies.sc-mixnet] +path = "../substrate/client/mixnet" default-features = false optional = true -path = "../substrate/client/mixnet" [dependencies.sc-network] +path = "../substrate/client/network" default-features = false optional = true -path = "../substrate/client/network" [dependencies.sc-network-common] +path = "../substrate/client/network/common" default-features = false optional = true -path = "../substrate/client/network/common" [dependencies.sc-network-gossip] +path = "../substrate/client/network-gossip" default-features = false optional = true -path = "../substrate/client/network-gossip" [dependencies.sc-network-light] +path = "../substrate/client/network/light" default-features = false optional = true -path = "../substrate/client/network/light" [dependencies.sc-network-statement] +path = "../substrate/client/network/statement" default-features = false optional = true -path = "../substrate/client/network/statement" [dependencies.sc-network-sync] +path = "../substrate/client/network/sync" default-features = false optional = true -path = "../substrate/client/network/sync" [dependencies.sc-network-transactions] +path = "../substrate/client/network/transactions" default-features = false optional = true -path = "../substrate/client/network/transactions" [dependencies.sc-network-types] +path = "../substrate/client/network/types" default-features = false optional = true -path = "../substrate/client/network/types" [dependencies.sc-offchain] +path = "../substrate/client/offchain" default-features = false optional = true -path = "../substrate/client/offchain" [dependencies.sc-proposer-metrics] +path = "../substrate/client/proposer-metrics" default-features = false optional = true -path = "../substrate/client/proposer-metrics" [dependencies.sc-rpc] +path = "../substrate/client/rpc" default-features = false optional = true -path = "../substrate/client/rpc" [dependencies.sc-rpc-api] +path = "../substrate/client/rpc-api" default-features = false optional = true -path = "../substrate/client/rpc-api" [dependencies.sc-rpc-server] +path = "../substrate/client/rpc-servers" default-features = false optional = true -path = "../substrate/client/rpc-servers" [dependencies.sc-rpc-spec-v2] -default-features = false -optional = true path = "../substrate/client/rpc-spec-v2" - -[dependencies.sc-runtime-utilities] default-features = false optional = true -path = "../substrate/client/runtime-utilities" [dependencies.sc-service] +path = "../substrate/client/service" default-features = false optional = true -path = "../substrate/client/service" [dependencies.sc-state-db] +path = "../substrate/client/state-db" default-features = false optional = true -path = "../substrate/client/state-db" [dependencies.sc-statement-store] +path = "../substrate/client/statement-store" default-features = false optional = true -path = "../substrate/client/statement-store" [dependencies.sc-storage-monitor] +path = "../substrate/client/storage-monitor" default-features = false optional = true -path = "../substrate/client/storage-monitor" [dependencies.sc-sync-state-rpc] +path = "../substrate/client/sync-state-rpc" default-features = false optional = true -path = "../substrate/client/sync-state-rpc" [dependencies.sc-sysinfo] +path = "../substrate/client/sysinfo" default-features = false optional = true -path = "../substrate/client/sysinfo" [dependencies.sc-telemetry] +path = "../substrate/client/telemetry" default-features = false optional = true -path = "../substrate/client/telemetry" [dependencies.sc-tracing] +path = "../substrate/client/tracing" default-features = false optional = true -path = "../substrate/client/tracing" [dependencies.sc-transaction-pool] +path = "../substrate/client/transaction-pool" default-features = false optional = true -path = "../substrate/client/transaction-pool" [dependencies.sc-transaction-pool-api] +path = "../substrate/client/transaction-pool/api" default-features = false optional = true -path = "../substrate/client/transaction-pool/api" [dependencies.sc-utils] +path = "../substrate/client/utils" default-features = false optional = true -path = "../substrate/client/utils" [dependencies.snowbridge-runtime-test-common] +path = "../bridges/snowbridge/runtime/test-common" default-features = false optional = true -path = "../bridges/snowbridge/runtime/test-common" [dependencies.sp-blockchain] +path = "../substrate/primitives/blockchain" default-features = false optional = true -path = "../substrate/primitives/blockchain" [dependencies.sp-consensus] +path = "../substrate/primitives/consensus/common" default-features = false optional = true -path = "../substrate/primitives/consensus/common" [dependencies.sp-core-hashing] +path = "../substrate/deprecated/hashing" default-features = false optional = true -path = "../substrate/deprecated/hashing" [dependencies.sp-core-hashing-proc-macro] +path = "../substrate/deprecated/hashing/proc-macro" default-features = false optional = true -path = "../substrate/deprecated/hashing/proc-macro" [dependencies.sp-database] +path = "../substrate/primitives/database" default-features = false optional = true -path = "../substrate/primitives/database" [dependencies.sp-maybe-compressed-blob] +path = "../substrate/primitives/maybe-compressed-blob" default-features = false optional = true -path = "../substrate/primitives/maybe-compressed-blob" [dependencies.sp-panic-handler] +path = "../substrate/primitives/panic-handler" default-features = false optional = true -path = "../substrate/primitives/panic-handler" [dependencies.sp-rpc] +path = "../substrate/primitives/rpc" default-features = false optional = true -path = "../substrate/primitives/rpc" [dependencies.staging-chain-spec-builder] +path = "../substrate/bin/utils/chain-spec-builder" default-features = false optional = true -path = "../substrate/bin/utils/chain-spec-builder" [dependencies.staging-node-inspect] +path = "../substrate/bin/node/inspect" default-features = false optional = true -path = "../substrate/bin/node/inspect" [dependencies.staging-tracking-allocator] +path = "../polkadot/node/tracking-allocator" default-features = false optional = true -path = "../polkadot/node/tracking-allocator" [dependencies.subkey] +path = "../substrate/bin/utils/subkey" default-features = false optional = true -path = "../substrate/bin/utils/subkey" [dependencies.substrate-build-script-utils] +path = "../substrate/utils/build-script-utils" default-features = false optional = true -path = "../substrate/utils/build-script-utils" [dependencies.substrate-frame-rpc-support] +path = "../substrate/utils/frame/rpc/support" default-features = false optional = true -path = "../substrate/utils/frame/rpc/support" [dependencies.substrate-frame-rpc-system] +path = "../substrate/utils/frame/rpc/system" default-features = false optional = true -path = "../substrate/utils/frame/rpc/system" [dependencies.substrate-prometheus-endpoint] +path = "../substrate/utils/prometheus" default-features = false optional = true -path = "../substrate/utils/prometheus" [dependencies.substrate-rpc-client] +path = "../substrate/utils/frame/rpc/client" default-features = false optional = true -path = "../substrate/utils/frame/rpc/client" [dependencies.substrate-state-trie-migration-rpc] +path = "../substrate/utils/frame/rpc/state-trie-migration-rpc" default-features = false optional = true -path = "../substrate/utils/frame/rpc/state-trie-migration-rpc" [dependencies.substrate-wasm-builder] +path = "../substrate/utils/wasm-builder" default-features = false optional = true -path = "../substrate/utils/wasm-builder" [dependencies.tracing-gum] +path = "../polkadot/node/gum" default-features = false optional = true -path = "../polkadot/node/gum" [dependencies.xcm-emulator] +path = "../cumulus/xcm/xcm-emulator" default-features = false optional = true -path = "../cumulus/xcm/xcm-emulator" [dependencies.xcm-simulator] +path = "../polkadot/xcm/xcm-simulator" default-features = false optional = true -path = "../polkadot/xcm/xcm-simulator" [package.metadata.docs.rs] features = ["node", "runtime-full"] diff --git a/umbrella/src/lib.rs b/umbrella/src/lib.rs index 7b3c869588f0..2216864fad0f 100644 --- a/umbrella/src/lib.rs +++ b/umbrella/src/lib.rs @@ -584,6 +584,10 @@ pub use pallet_revive; #[cfg(feature = "pallet-revive-eth-rpc")] pub use pallet_revive_eth_rpc; +/// Fixtures for testing and benchmarking. +#[cfg(feature = "pallet-revive-fixtures")] +pub use pallet_revive_fixtures; + /// A mock network for testing pallet-revive. #[cfg(feature = "pallet-revive-mock-network")] pub use pallet_revive_mock_network; @@ -1119,10 +1123,6 @@ pub use sc_rpc_server; #[cfg(feature = "sc-rpc-spec-v2")] pub use sc_rpc_spec_v2; -/// Substrate client utilities for frame runtime functions calls. -#[cfg(feature = "sc-runtime-utilities")] -pub use sc_runtime_utilities; - /// Substrate service. Starts a thread that spins up the network, client, and extrinsic pool. /// Manages communication between them. #[cfg(feature = "sc-service")]