From f2b44f92b319d42e456b64cf25cc12f44114b8e4 Mon Sep 17 00:00:00 2001 From: Fredrik Simonsson Date: Tue, 12 Mar 2024 12:31:27 +0900 Subject: [PATCH] Remove sponsorship migration Auto format clippy warnings --- node/src/service.rs | 648 +++++++++--------- pallets/sponsorship/src/lib.rs | 22 +- .../eden/src/weights/pallet_collective.rs | 2 +- 3 files changed, 326 insertions(+), 346 deletions(-) diff --git a/node/src/service.rs b/node/src/service.rs index 35e0065bf17..29596b11e6b 100644 --- a/node/src/service.rs +++ b/node/src/service.rs @@ -23,7 +23,7 @@ use std::{sync::Arc, time::Duration}; use cumulus_client_cli::CollatorOptions; // Local Runtime Types -use runtime_eden::{api, RuntimeApi}; +use runtime_eden::{RuntimeApi}; use polkadot_primitives::{Block, Hash}; @@ -32,8 +32,8 @@ use cumulus_client_collator::service::CollatorService; use cumulus_client_consensus_common::ParachainBlockImport as TParachainBlockImport; use cumulus_client_consensus_proposer::Proposer; use cumulus_client_service::{ - build_network, build_relay_chain_interface, prepare_node_config, start_relay_chain_tasks, BuildNetworkParams, - CollatorSybilResistance, DARecoveryProfile, StartRelayChainTasksParams, + build_network, build_relay_chain_interface, prepare_node_config, start_relay_chain_tasks, BuildNetworkParams, + CollatorSybilResistance, DARecoveryProfile, StartRelayChainTasksParams, }; use cumulus_primitives_core::{relay_chain::CollatorPair, ParaId}; use cumulus_relay_chain_interface::{OverseerHandle, RelayChainInterface}; @@ -52,15 +52,15 @@ use substrate_prometheus_endpoint::Registry; #[cfg(not(feature = "runtime-benchmarks"))] type HostFunctions = ( - sp_io::SubstrateHostFunctions, - cumulus_client_service::storage_proof_size::HostFunctions, + sp_io::SubstrateHostFunctions, + cumulus_client_service::storage_proof_size::HostFunctions, ); #[cfg(feature = "runtime-benchmarks")] type HostFunctions = ( - sp_io::SubstrateHostFunctions, - cumulus_client_service::storage_proof_size::HostFunctions, - frame_benchmarking::benchmarking::HostFunctions, + sp_io::SubstrateHostFunctions, + cumulus_client_service::storage_proof_size::HostFunctions, + frame_benchmarking::benchmarking::HostFunctions, ); type ParachainClient = TFullClient>; @@ -74,85 +74,85 @@ type ParachainBlockImport = TParachainBlockImport, P /// Use this macro if you don't actually need the full service, but just the builder in order to /// be able to perform chain operations. pub fn new_partial( - config: &Configuration, + config: &Configuration, ) -> Result< - PartialComponents< - ParachainClient, - ParachainBackend, - (), - sc_consensus::DefaultImportQueue, - sc_transaction_pool::FullPool, - (ParachainBlockImport, Option, Option), - >, - sc_service::Error, + PartialComponents< + ParachainClient, + ParachainBackend, + (), + sc_consensus::DefaultImportQueue, + sc_transaction_pool::FullPool, + (ParachainBlockImport, Option, Option), + >, + sc_service::Error, > { - let telemetry = config - .telemetry_endpoints - .clone() - .filter(|x| !x.is_empty()) - .map(|endpoints| -> Result<_, sc_telemetry::Error> { - let worker = TelemetryWorker::new(16)?; - let telemetry = worker.handle().new_telemetry(endpoints); - Ok((worker, telemetry)) - }) - .transpose()?; - - let heap_pages = config - .default_heap_pages - .map_or(DEFAULT_HEAP_ALLOC_STRATEGY, |h| HeapAllocStrategy::Static { - extra_pages: h as _, - }); - - let executor = sc_executor::WasmExecutor::::builder() - .with_execution_method(config.wasm_method) - .with_max_runtime_instances(config.max_runtime_instances) - .with_runtime_cache_size(config.runtime_cache_size) - .with_onchain_heap_alloc_strategy(heap_pages) - .with_offchain_heap_alloc_strategy(heap_pages) - .build(); - - let (client, backend, keystore_container, task_manager) = sc_service::new_full_parts::( - config, - telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()), - executor, - )?; - let client = Arc::new(client); - - let telemetry_worker_handle = telemetry.as_ref().map(|(worker, _)| worker.handle()); - - let telemetry = telemetry.map(|(worker, telemetry)| { - task_manager.spawn_handle().spawn("telemetry", None, worker.run()); - telemetry - }); - - let transaction_pool = sc_transaction_pool::BasicPool::new_full( - config.transaction_pool.clone(), - config.role.is_authority().into(), - config.prometheus_registry(), - task_manager.spawn_essential_handle(), - client.clone(), - ); - - let block_import = ParachainBlockImport::new(client.clone(), backend.clone()); - - let import_queue = build_import_queue( - client.clone(), - block_import.clone(), - config, - telemetry.as_ref().map(|telemetry| telemetry.handle()), - &task_manager, - )?; - - Ok(PartialComponents { - backend, - client, - import_queue, - keystore_container, - task_manager, - transaction_pool, - select_chain: (), - other: (block_import, telemetry, telemetry_worker_handle), - }) + let telemetry = config + .telemetry_endpoints + .clone() + .filter(|x| !x.is_empty()) + .map(|endpoints| -> Result<_, sc_telemetry::Error> { + let worker = TelemetryWorker::new(16)?; + let telemetry = worker.handle().new_telemetry(endpoints); + Ok((worker, telemetry)) + }) + .transpose()?; + + let heap_pages = config + .default_heap_pages + .map_or(DEFAULT_HEAP_ALLOC_STRATEGY, |h| HeapAllocStrategy::Static { + extra_pages: h as _, + }); + + let executor = sc_executor::WasmExecutor::::builder() + .with_execution_method(config.wasm_method) + .with_max_runtime_instances(config.max_runtime_instances) + .with_runtime_cache_size(config.runtime_cache_size) + .with_onchain_heap_alloc_strategy(heap_pages) + .with_offchain_heap_alloc_strategy(heap_pages) + .build(); + + let (client, backend, keystore_container, task_manager) = sc_service::new_full_parts::( + config, + telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()), + executor, + )?; + let client = Arc::new(client); + + let telemetry_worker_handle = telemetry.as_ref().map(|(worker, _)| worker.handle()); + + let telemetry = telemetry.map(|(worker, telemetry)| { + task_manager.spawn_handle().spawn("telemetry", None, worker.run()); + telemetry + }); + + let transaction_pool = sc_transaction_pool::BasicPool::new_full( + config.transaction_pool.clone(), + config.role.is_authority().into(), + config.prometheus_registry(), + task_manager.spawn_essential_handle(), + client.clone(), + ); + + let block_import = ParachainBlockImport::new(client.clone(), backend.clone()); + + let import_queue = build_import_queue( + client.clone(), + block_import.clone(), + config, + telemetry.as_ref().map(|telemetry| telemetry.handle()), + &task_manager, + )?; + + Ok(PartialComponents { + backend, + client, + import_queue, + keystore_container, + task_manager, + transaction_pool, + select_chain: (), + other: (block_import, telemetry, telemetry_worker_handle), + }) } /// Start a node with the given parachain `Configuration` and relay chain `Configuration`. @@ -160,266 +160,266 @@ pub fn new_partial( /// This is the actual implementation that is abstract over the executor and the runtime api. #[sc_tracing::logging::prefix_logs_with("Parachain")] async fn start_node_impl( - parachain_config: Configuration, - polkadot_config: Configuration, - collator_options: CollatorOptions, - para_id: ParaId, - hwbench: Option, + parachain_config: Configuration, + polkadot_config: Configuration, + collator_options: CollatorOptions, + para_id: ParaId, + hwbench: Option, ) -> sc_service::error::Result<(TaskManager, Arc)> { - let parachain_config = prepare_node_config(parachain_config); - - let params = new_partial(¶chain_config)?; - let (block_import, mut telemetry, telemetry_worker_handle) = params.other; - let net_config = sc_network::config::FullNetworkConfiguration::new(¶chain_config.network); - - let client = params.client.clone(); - let backend = params.backend.clone(); - let mut task_manager = params.task_manager; - - let (relay_chain_interface, collator_key) = build_relay_chain_interface( - polkadot_config, - ¶chain_config, - telemetry_worker_handle, - &mut task_manager, - collator_options.clone(), - hwbench.clone(), - ) - .await - .map_err(|e| sc_service::Error::Application(Box::new(e) as Box<_>))?; - - let validator = parachain_config.role.is_authority(); - let prometheus_registry = parachain_config.prometheus_registry().cloned(); - let transaction_pool = params.transaction_pool.clone(); - let import_queue_service = params.import_queue.service(); - let (network, system_rpc_tx, tx_handler_controller, start_network, sync_service) = - build_network(BuildNetworkParams { - parachain_config: ¶chain_config, - net_config, - client: client.clone(), - transaction_pool: transaction_pool.clone(), - para_id, - spawn_handle: task_manager.spawn_handle(), - relay_chain_interface: relay_chain_interface.clone(), - import_queue: params.import_queue, - sybil_resistance_level: CollatorSybilResistance::Resistant, // because of Aura - }) - .await?; - - let rpc_builder = { - let client = client.clone(); - let transaction_pool = transaction_pool.clone(); - - Box::new(move |deny_unsafe, _| { - let deps = crate::rpc::FullDeps { - client: client.clone(), - pool: transaction_pool.clone(), - deny_unsafe, - }; - - crate::rpc::create_full(deps).map_err(Into::into) - }) - }; - - sc_service::spawn_tasks(sc_service::SpawnTasksParams { - rpc_builder, - client: client.clone(), - transaction_pool: transaction_pool.clone(), - task_manager: &mut task_manager, - config: parachain_config, - keystore: params.keystore_container.keystore(), - backend, - network: network.clone(), - sync_service: sync_service.clone(), - system_rpc_tx, - tx_handler_controller, - telemetry: telemetry.as_mut(), - })?; - - if let Some(hwbench) = hwbench { - sc_sysinfo::print_hwbench(&hwbench); - if validator { - warn_if_slow_hardware(&hwbench); - } - - if let Some(ref mut telemetry) = telemetry { - let telemetry_handle = telemetry.handle(); - task_manager.spawn_handle().spawn( - "telemetry_hwbench", - None, - sc_sysinfo::initialize_hwbench_telemetry(telemetry_handle, hwbench), - ); - } - } - - let announce_block = { - let sync_service = sync_service.clone(); - Arc::new(move |hash, data| sync_service.announce_block(hash, data)) - }; - - let relay_chain_slot_duration = Duration::from_secs(6); - - let overseer_handle = relay_chain_interface - .overseer_handle() - .map_err(|e| sc_service::Error::Application(Box::new(e)))?; - - start_relay_chain_tasks(StartRelayChainTasksParams { - client: client.clone(), - announce_block: announce_block.clone(), - para_id, - relay_chain_interface: relay_chain_interface.clone(), - task_manager: &mut task_manager, - da_recovery_profile: if validator { - DARecoveryProfile::Collator - } else { - DARecoveryProfile::FullNode - }, - import_queue: import_queue_service, - relay_chain_slot_duration, - recovery_handle: Box::new(overseer_handle.clone()), - sync_service: sync_service.clone(), - })?; - - if validator { - start_consensus( - client.clone(), - block_import, - prometheus_registry.as_ref(), - telemetry.as_ref().map(|t| t.handle()), - &task_manager, - relay_chain_interface.clone(), - transaction_pool, - sync_service.clone(), - params.keystore_container.keystore(), - relay_chain_slot_duration, - para_id, - collator_key.expect("Command line arguments do not allow this. qed"), - overseer_handle, - announce_block, - )?; - } - - start_network.start_network(); - - Ok((task_manager, client)) + let parachain_config = prepare_node_config(parachain_config); + + let params = new_partial(¶chain_config)?; + let (block_import, mut telemetry, telemetry_worker_handle) = params.other; + let net_config = sc_network::config::FullNetworkConfiguration::new(¶chain_config.network); + + let client = params.client.clone(); + let backend = params.backend.clone(); + let mut task_manager = params.task_manager; + + let (relay_chain_interface, collator_key) = build_relay_chain_interface( + polkadot_config, + ¶chain_config, + telemetry_worker_handle, + &mut task_manager, + collator_options.clone(), + hwbench.clone(), + ) + .await + .map_err(|e| sc_service::Error::Application(Box::new(e) as Box<_>))?; + + let validator = parachain_config.role.is_authority(); + let prometheus_registry = parachain_config.prometheus_registry().cloned(); + let transaction_pool = params.transaction_pool.clone(); + let import_queue_service = params.import_queue.service(); + let (network, system_rpc_tx, tx_handler_controller, start_network, sync_service) = + build_network(BuildNetworkParams { + parachain_config: ¶chain_config, + net_config, + client: client.clone(), + transaction_pool: transaction_pool.clone(), + para_id, + spawn_handle: task_manager.spawn_handle(), + relay_chain_interface: relay_chain_interface.clone(), + import_queue: params.import_queue, + sybil_resistance_level: CollatorSybilResistance::Resistant, // because of Aura + }) + .await?; + + let rpc_builder = { + let client = client.clone(); + let transaction_pool = transaction_pool.clone(); + + Box::new(move |deny_unsafe, _| { + let deps = crate::rpc::FullDeps { + client: client.clone(), + pool: transaction_pool.clone(), + deny_unsafe, + }; + + crate::rpc::create_full(deps).map_err(Into::into) + }) + }; + + sc_service::spawn_tasks(sc_service::SpawnTasksParams { + rpc_builder, + client: client.clone(), + transaction_pool: transaction_pool.clone(), + task_manager: &mut task_manager, + config: parachain_config, + keystore: params.keystore_container.keystore(), + backend, + network: network.clone(), + sync_service: sync_service.clone(), + system_rpc_tx, + tx_handler_controller, + telemetry: telemetry.as_mut(), + })?; + + if let Some(hwbench) = hwbench { + sc_sysinfo::print_hwbench(&hwbench); + if validator { + warn_if_slow_hardware(&hwbench); + } + + if let Some(ref mut telemetry) = telemetry { + let telemetry_handle = telemetry.handle(); + task_manager.spawn_handle().spawn( + "telemetry_hwbench", + None, + sc_sysinfo::initialize_hwbench_telemetry(telemetry_handle, hwbench), + ); + } + } + + let announce_block = { + let sync_service = sync_service.clone(); + Arc::new(move |hash, data| sync_service.announce_block(hash, data)) + }; + + let relay_chain_slot_duration = Duration::from_secs(6); + + let overseer_handle = relay_chain_interface + .overseer_handle() + .map_err(|e| sc_service::Error::Application(Box::new(e)))?; + + start_relay_chain_tasks(StartRelayChainTasksParams { + client: client.clone(), + announce_block: announce_block.clone(), + para_id, + relay_chain_interface: relay_chain_interface.clone(), + task_manager: &mut task_manager, + da_recovery_profile: if validator { + DARecoveryProfile::Collator + } else { + DARecoveryProfile::FullNode + }, + import_queue: import_queue_service, + relay_chain_slot_duration, + recovery_handle: Box::new(overseer_handle.clone()), + sync_service: sync_service.clone(), + })?; + + if validator { + start_consensus( + client.clone(), + block_import, + prometheus_registry.as_ref(), + telemetry.as_ref().map(|t| t.handle()), + &task_manager, + relay_chain_interface.clone(), + transaction_pool, + sync_service.clone(), + params.keystore_container.keystore(), + relay_chain_slot_duration, + para_id, + collator_key.expect("Command line arguments do not allow this. qed"), + overseer_handle, + announce_block, + )?; + } + + start_network.start_network(); + + Ok((task_manager, client)) } /// Build the import queue for the parachain runtime. fn build_import_queue( - client: Arc, - block_import: ParachainBlockImport, - config: &Configuration, - telemetry: Option, - task_manager: &TaskManager, + client: Arc, + block_import: ParachainBlockImport, + config: &Configuration, + telemetry: Option, + task_manager: &TaskManager, ) -> Result, sc_service::Error> { - let slot_duration = cumulus_client_consensus_aura::slot_duration(&*client)?; - - Ok( - cumulus_client_consensus_aura::equivocation_import_queue::fully_verifying_import_queue::< - sp_consensus_aura::sr25519::AuthorityPair, - _, - _, - _, - _, - >( - client, - block_import, - move |_, _| async move { - let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); - Ok(timestamp) - }, - slot_duration, - &task_manager.spawn_essential_handle(), - config.prometheus_registry(), - telemetry, - ), - ) + let slot_duration = cumulus_client_consensus_aura::slot_duration(&*client)?; + + Ok( + cumulus_client_consensus_aura::equivocation_import_queue::fully_verifying_import_queue::< + sp_consensus_aura::sr25519::AuthorityPair, + _, + _, + _, + _, + >( + client, + block_import, + move |_, _| async move { + let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); + Ok(timestamp) + }, + slot_duration, + &task_manager.spawn_essential_handle(), + config.prometheus_registry(), + telemetry, + ), + ) } #[allow(clippy::too_many_arguments)] fn start_consensus( - client: Arc, - block_import: ParachainBlockImport, - prometheus_registry: Option<&Registry>, - telemetry: Option, - task_manager: &TaskManager, - relay_chain_interface: Arc, - transaction_pool: Arc>, - sync_oracle: Arc>, - keystore: KeystorePtr, - relay_chain_slot_duration: Duration, - para_id: ParaId, - collator_key: CollatorPair, - overseer_handle: OverseerHandle, - announce_block: Arc>) + Send + Sync>, + client: Arc, + block_import: ParachainBlockImport, + prometheus_registry: Option<&Registry>, + telemetry: Option, + task_manager: &TaskManager, + relay_chain_interface: Arc, + transaction_pool: Arc>, + sync_oracle: Arc>, + keystore: KeystorePtr, + relay_chain_slot_duration: Duration, + para_id: ParaId, + collator_key: CollatorPair, + overseer_handle: OverseerHandle, + announce_block: Arc>) + Send + Sync>, ) -> Result<(), sc_service::Error> { - use cumulus_client_consensus_aura::collators::basic::{self as basic_aura, Params as BasicAuraParams}; - - // NOTE: because we use Aura here explicitly, we can use `CollatorSybilResistance::Resistant` - // when starting the network. - - let slot_duration = cumulus_client_consensus_aura::slot_duration(&*client)?; - - let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording( - task_manager.spawn_handle(), - client.clone(), - transaction_pool, - prometheus_registry, - telemetry.clone(), - ); - - let proposer = Proposer::new(proposer_factory); - - let collator_service = CollatorService::new( - client.clone(), - Arc::new(task_manager.spawn_handle()), - announce_block, - client.clone(), - ); - - let params = BasicAuraParams { - create_inherent_data_providers: move |_, ()| async move { Ok(()) }, - block_import, - para_client: client, - relay_client: relay_chain_interface, - sync_oracle, - keystore, - collator_key, - para_id, - overseer_handle, - slot_duration, - relay_chain_slot_duration, - proposer, - collator_service, - // Very limited proposal time. - authoring_duration: Duration::from_millis(500), - collation_request_receiver: None, - }; - - let fut = basic_aura::run::(params); - task_manager.spawn_essential_handle().spawn("aura", None, fut); - - Ok(()) + use cumulus_client_consensus_aura::collators::basic::{self as basic_aura, Params as BasicAuraParams}; + + // NOTE: because we use Aura here explicitly, we can use `CollatorSybilResistance::Resistant` + // when starting the network. + + let slot_duration = cumulus_client_consensus_aura::slot_duration(&*client)?; + + let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording( + task_manager.spawn_handle(), + client.clone(), + transaction_pool, + prometheus_registry, + telemetry.clone(), + ); + + let proposer = Proposer::new(proposer_factory); + + let collator_service = CollatorService::new( + client.clone(), + Arc::new(task_manager.spawn_handle()), + announce_block, + client.clone(), + ); + + let params = BasicAuraParams { + create_inherent_data_providers: move |_, ()| async move { Ok(()) }, + block_import, + para_client: client, + relay_client: relay_chain_interface, + sync_oracle, + keystore, + collator_key, + para_id, + overseer_handle, + slot_duration, + relay_chain_slot_duration, + proposer, + collator_service, + // Very limited proposal time. + authoring_duration: Duration::from_millis(500), + collation_request_receiver: None, + }; + + let fut = basic_aura::run::(params); + task_manager.spawn_essential_handle().spawn("aura", None, fut); + + Ok(()) } /// Start a parachain node. pub async fn start_parachain_node( - parachain_config: Configuration, - polkadot_config: Configuration, - collator_options: CollatorOptions, - para_id: ParaId, - hwbench: Option, + parachain_config: Configuration, + polkadot_config: Configuration, + collator_options: CollatorOptions, + para_id: ParaId, + hwbench: Option, ) -> sc_service::error::Result<(TaskManager, Arc)> { - start_node_impl(parachain_config, polkadot_config, collator_options, para_id, hwbench).await + start_node_impl(parachain_config, polkadot_config, collator_options, para_id, hwbench).await } /// Checks that the hardware meets the requirements and print a warning otherwise. fn warn_if_slow_hardware(hwbench: &sc_sysinfo::HwBench) { - // Polkadot para-chains should generally use these requirements to ensure that the relay-chain - // will not take longer than expected to import its blocks. - if let Err(e) = frame_benchmarking_cli::SUBSTRATE_REFERENCE_HARDWARE.check_hardware(hwbench) { - log::warn!( + // Polkadot para-chains should generally use these requirements to ensure that the relay-chain + // will not take longer than expected to import its blocks. + if let Err(e) = frame_benchmarking_cli::SUBSTRATE_REFERENCE_HARDWARE.check_hardware(hwbench) { + log::warn!( "⚠️ The hardware does not meet the minimal requirements for role 'Authority' find out more at:\n\ https://wiki.polkadot.network/docs/maintain-guides-how-to-validate-polkadot#reference-hardware {e:?}" ); - } + } } diff --git a/pallets/sponsorship/src/lib.rs b/pallets/sponsorship/src/lib.rs index c1f19da3db9..3db150b8b63 100644 --- a/pallets/sponsorship/src/lib.rs +++ b/pallets/sponsorship/src/lib.rs @@ -25,8 +25,7 @@ use frame_support::{ dispatch::{DispatchInfo, DispatchResult, GetDispatchInfo, Pays, PostDispatchInfo}, traits::{ Currency, - ExistenceRequirement::{AllowDeath, KeepAlive}, - GetStorageVersion, InstanceFilter, IsSubType, IsType, OriginTrait, ReservableCurrency, + ExistenceRequirement::{AllowDeath, KeepAlive}, InstanceFilter, IsSubType, IsType, OriginTrait, ReservableCurrency, }, }; use pallet_transaction_payment::OnChargeTransaction; @@ -178,14 +177,6 @@ pub mod pallet { #[pallet::storage] pub(super) type UserRegistrationCount = StorageMap<_, Blake2_128Concat, T::AccountId, u32, ValueQuery>; - /// Stores the migration cursor for the Pot storage. This item will be removed in a subsequent upgrade. - #[pallet::storage] - pub(super) type PotMigrationCursor = StorageValue<_, Vec, OptionQuery>; - - /// Stores the migration cursor for the Pot storage. This item will be removed in a subsequent upgrade. - #[pallet::storage] - pub(super) type UserMigrationCursor = StorageValue<_, Vec, OptionQuery>; - #[pallet::storage] pub(super) type PotUserMigrationPerBlock = StorageValue<_, (u32, u32), OptionQuery>; @@ -278,7 +269,6 @@ pub mod pallet { reserve_quota: BalanceOf, ) -> DispatchResult { let who = ensure_signed(origin)?; - ensure!(!Self::migration_in_progress(), Error::::MigrationInProgress); ensure!(!Pot::::contains_key(pot), Error::::InUse); T::Currency::reserve(&who, T::PotDeposit::get())?; @@ -312,7 +302,6 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::remove_pot())] pub fn remove_pot(origin: OriginFor, pot: T::PotId) -> DispatchResult { let who = ensure_signed(origin)?; - ensure!(!Self::migration_in_progress(), Error::::MigrationInProgress); Pot::::try_mutate(pot, |maybe_pot_details| -> DispatchResult { let pot_details = maybe_pot_details.as_mut().ok_or(Error::::InUse)?; ensure!(pot_details.sponsor == who, Error::::NoPermission); @@ -341,7 +330,6 @@ pub mod pallet { common_reserve_quota: BalanceOf, ) -> DispatchResult { let who = ensure_signed(origin)?; - ensure!(!Self::migration_in_progress(), Error::::MigrationInProgress); let pot_details = Pot::::get(pot).ok_or(Error::::PotNotExist)?; ensure!(pot_details.sponsor == who, Error::::NoPermission); for user in users.clone() { @@ -388,7 +376,6 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::remove_users(users.len() as u32))] pub fn remove_users(origin: OriginFor, pot: T::PotId, users: Vec) -> DispatchResult { let who = ensure_signed(origin)?; - ensure!(!Self::migration_in_progress(), Error::::MigrationInProgress); let mut pot_details = Pot::::get(pot).ok_or(Error::::PotNotExist)?; ensure!(pot_details.sponsor == who, Error::::NoPermission); for user in users.clone() { @@ -451,7 +438,6 @@ pub mod pallet { call: Box<::RuntimeCall>, ) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; - ensure!(!Self::migration_in_progress(), Error::::MigrationInProgress); let preps = Self::pre_sponsor_for(who.clone(), pot)?; @@ -483,7 +469,6 @@ pub mod pallet { new_reserve_quota: BalanceOf, ) -> DispatchResult { let who = ensure_signed(origin)?; - ensure!(!Self::migration_in_progress(), Error::::MigrationInProgress); let mut pot_details = Pot::::get(pot).ok_or(Error::::PotNotExist)?; ensure!(pot_details.sponsor == who, Error::::NoPermission); @@ -518,7 +503,6 @@ pub mod pallet { users: Vec, ) -> DispatchResult { let who = ensure_signed(origin)?; - ensure!(!Self::migration_in_progress(), Error::::MigrationInProgress); let pot_details = Pot::::get(pot).ok_or(Error::::PotNotExist)?; ensure!(pot_details.sponsor == who, Error::::NoPermission); @@ -676,10 +660,6 @@ impl Pallet { Self::deposit_event(Event::Sponsored { paid, repaid }); Ok(()) } - - fn migration_in_progress() -> bool { - Self::on_chain_storage_version() < Self::current_storage_version() - } } /// Require the sponsor to pay for their transactors. diff --git a/runtimes/eden/src/weights/pallet_collective.rs b/runtimes/eden/src/weights/pallet_collective.rs index 61084ccbeac..8134eeeb58c 100644 --- a/runtimes/eden/src/weights/pallet_collective.rs +++ b/runtimes/eden/src/weights/pallet_collective.rs @@ -209,7 +209,7 @@ impl pallet_collective::WeightInfo for WeightInfo { /// The range of component `b` is `[2, 1024]`. /// The range of component `m` is `[4, 50]`. /// The range of component `p` is `[1, 100]`. - fn close_approved(b: u32, m: u32, p: u32) -> Weight { + fn close_approved(b: u32, _m: u32, p: u32) -> Weight { // Minimum execution time: 45_570 nanoseconds. Weight::from_parts(48_653_945_u64, 0) // Standard Error: 200