From 061073f2e740bfae3a576deab05feda962eb1851 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?D=C3=B3nal=20Murray?= Date: Mon, 29 Apr 2024 17:45:38 +0100 Subject: [PATCH] Release Kusama Coretime 1.2.3 (#293) Cherry pick #286 onto the 1.2.2 release for a clean 1.2.3 release. This release is the one proposed in https://github.com/polkadot-fellows/runtimes/issues/288 The changelog from main is clipped to only include the relevant changes from the cherry picked commit. - A migration is added to onboard the people chain without the usual two sale boundary delay that came with the shift to coretime. - Another migration is added to clean up an outdated assignment in state from the old sale start before the leases were added. More info in the changelog of this PR and PR #286. - [ ] Does not require a CHANGELOG entry --------- Co-authored-by: joe petrowski <25483142+joepetrowski@users.noreply.github.com> --- CHANGELOG.md | 10 + .../coretime/coretime-kusama/src/lib.rs | 5 +- .../coretime-kusama/src/migrations.rs | 347 +++++++++--------- 3 files changed, 192 insertions(+), 170 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b6563ceb0e..001407c027 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,16 @@ Changelog for the runtimes governed by the Polkadot Fellowship. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). +## [1.2.3] 29.04.2024 + +### Added + +- Add migration to Kusama Coretime to onboard People Chain without long delay ([polkadot-fellows/runtimes#286](https://github.com/polkadot-fellows/runtimes/pull/286)) + +### Fixed + +- Clean up outdated assignment in Kusama Coretime Chain state ([polkadot-fellows/runtimes#286](https://github.com/polkadot-fellows/runtimes/pull/286)) + ## [1.2.2] 20.04.2024 ### Fixed diff --git a/system-parachains/coretime/coretime-kusama/src/lib.rs b/system-parachains/coretime/coretime-kusama/src/lib.rs index 0da38ad843..47715b2370 100644 --- a/system-parachains/coretime/coretime-kusama/src/lib.rs +++ b/system-parachains/coretime/coretime-kusama/src/lib.rs @@ -109,7 +109,8 @@ pub type UncheckedExtrinsic = /// Migrations to apply on runtime upgrade. pub type Migrations = ( pallet_xcm::migration::MigrateToLatestXcmVersion, - migrations::bootstrapping::ImportLeases, + migrations::bootstrapping::RemoveOutdatedPoolAssignment, + migrations::bootstrapping::OnboardPeople, ); /// Executive: handles dispatch to the various modules. @@ -133,7 +134,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("coretime-kusama"), impl_name: create_runtime_str!("coretime-kusama"), authoring_version: 1, - spec_version: 1_002_002, + spec_version: 1_002_003, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 0, diff --git a/system-parachains/coretime/coretime-kusama/src/migrations.rs b/system-parachains/coretime/coretime-kusama/src/migrations.rs index 94e8903b09..68cf0ecec6 100644 --- a/system-parachains/coretime/coretime-kusama/src/migrations.rs +++ b/system-parachains/coretime/coretime-kusama/src/migrations.rs @@ -14,99 +14,195 @@ // See the License for the specific language governing permissions and // limitations under the License. -/// The XCM Transact which was meant to set the leases as part of the Kusama relay runtime upgrade -/// did not have enough weight. Therefore the leases were not migrated. +/// The Kusama Coretime chain had some launch issues. These migrations clean up state and enable +/// immediate onboarding of system parachains. /// -/// This migration populates the leases and restarts the sale from whichever timeslice it runs. -/// -/// This does not affect storage structure, only values. +/// None of these migrations affect storage structure, only values. pub mod bootstrapping { use crate::{weights, Runtime, RuntimeOrigin}; use frame_support::{pallet_prelude::*, traits::OnRuntimeUpgrade}; #[cfg(feature = "try-runtime")] + use pallet_broker::StatusRecord; use pallet_broker::{ - AllowedRenewalId, AllowedRenewalRecord, AllowedRenewals, Configuration, + AllowedRenewals, CoreAssignment::{Pool, Task}, - CoreMask, LeaseRecordItem, SaleInfo, SaleInfoRecordOf, Schedule, ScheduleItem, Workplan, + CoreIndex, CoreMask, Leases, Reservations, SaleInfo, Schedule, ScheduleItem, Status, + Timeslice, WeightInfo, Workplan, }; - use pallet_broker::{Leases, WeightInfo}; #[cfg(feature = "try-runtime")] use sp_runtime::TryRuntimeError; #[cfg(feature = "try-runtime")] use sp_std::vec::Vec; /// The log target. - const TARGET: &str = "runtime::bootstrapping::import-leases"; + const TARGET: &str = "runtime::bootstrapping::onboard-people"; + + // The key in Workplan with the outdated assignment. + const WORKPLAN_KEY: (Timeslice, CoreIndex) = (289960, 4); - // Alias into the broker weights for this runtime. + // Alias to the broker weights for this runtime. type BrokerWeights = weights::pallet_broker::WeightInfo; + type RuntimeDbWeight = ::DbWeight; - pub struct ImportLeases; + /// This migration cleans up an outdated pool assignment in state from the update to Kusama + /// Coretime 1002002. + pub struct RemoveOutdatedPoolAssignment; - impl OnRuntimeUpgrade for ImportLeases { + impl OnRuntimeUpgrade for RemoveOutdatedPoolAssignment { fn on_runtime_upgrade() -> Weight { - // This migration contains hardcoded values only relevant to Kusama Coretime - // 1002000 before it has any leases. These checks could be tightened. - if Leases::::decode_len().unwrap_or(0) > 0 { - // Already has leases, bail + let schedule_pool = Schedule::truncate_from(Vec::from([ScheduleItem { + mask: CoreMask::complete(), + assignment: Pool, + }])); + if Workplan::::get(WORKPLAN_KEY) != Some(schedule_pool) { + // Erroneous pool core assignment is not in state. Bailing. log::error!(target: TARGET, "This migration includes hardcoded values not relevant to this runtime. Bailing."); - return ::DbWeight::get().reads(1); + return RuntimeDbWeight::get().reads(1); } - for (para_id, end) in LEASES { - match pallet_broker::Pallet::::set_lease( - RuntimeOrigin::root(), - para_id, - end, - ) { - Ok(_) => - log::info!(target: TARGET, "Importing lease for parachain {}", ¶_id), - Err(_) => - log::error!(target: TARGET, "Importing lease for parachain {} failed!", ¶_id), - } + // Overwrite outdated pool core assignment to keep parachain 2000 on core. + let schedule_2000 = Schedule::truncate_from(Vec::from([ScheduleItem { + mask: CoreMask::complete(), + assignment: Task(2000), + }])); + Workplan::::insert(WORKPLAN_KEY, schedule_2000); + + log::info!(target: TARGET, "Outdated Workplan entry has been overwritten."); + + RuntimeDbWeight::get().reads(1).saturating_add(RuntimeDbWeight::get().writes(1)) + } + + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, TryRuntimeError> { + let schedule_pool = Schedule::truncate_from(Vec::from([ScheduleItem { + mask: CoreMask::complete(), + assignment: Pool, + }])); + if Workplan::::get(WORKPLAN_KEY) != Some(schedule_pool) { + return Ok(Vec::new()) } + let sale_info = SaleInfo::::get().unwrap(); + Ok(sale_info.encode()) + } - // The values used in referendum 375 included 52 cores. Replaying this here shifts the - // start of the sale, while crucially populating the workplan with the leases and - // recalculating the number of cores to be offered. However, there are 4 system - // parachains + 1 pool core + 47 leases + 3 cores for the open market, therefore we need - // to start sales with 55 cores. - let core_count = pallet_broker::Reservations::::decode_len().unwrap_or(0) - as u16 + pallet_broker::Leases::::decode_len().unwrap_or(0) - as u16 + 3; + #[cfg(feature = "try-runtime")] + fn post_upgrade(state: Vec) -> Result<(), TryRuntimeError> { + if state.is_empty() { + return Ok(()) + } + log::info!(target: TARGET, "Checking migration."); + + // Check that cores 0-4 are now all reassigned to themselves at the end of the original + // period 0 before sales were restarted. + let expected_assignments = [Task(1000), Task(1001), Task(1002), Task(1005), Task(2000)]; + for (core, assignment) in expected_assignments.into_iter().enumerate() { + assert_eq!( + Workplan::::get((289960, core as u16)), + Some(Schedule::truncate_from(Vec::from([ScheduleItem { + mask: CoreMask::complete(), + assignment + }]))) + ); + } + + // There are no more surprise entries in the Workplan - the only cores which have + // reassignments before start sales kicks in are the five checked above. + assert_eq!( + Workplan::::iter_keys() + .filter(|(timeslice, _)| *timeslice != 290808) + .count(), + 5 + ); + + Ok(()) + } + } + + /// The People Chain should be onboarded ASAP to Kusama, however the reserve extrinsic + /// takes two sale period boundaries to actually put new reservations on core. This + /// migration adds the People Chain immediately. + /// + /// This is achieved in three steps: + /// 1. Reserve a core for People (from period 2) + /// 2. Add People Chain to the workplan for period 1 + /// 3. Add People Chain to the workplan for the remainder of period 0 + pub struct OnboardPeople; + + impl OnRuntimeUpgrade for OnboardPeople { + fn on_runtime_upgrade() -> Weight { + // Make sure People Chain is not already reserved. + let schedule_people = Schedule::truncate_from(Vec::from([ScheduleItem { + mask: CoreMask::complete(), + assignment: Task(1004), + }])); + if Reservations::::get().iter().any(|res| *res == schedule_people) { + log::error!(target: TARGET, "The people chain is already reserved. Bailing."); + return RuntimeDbWeight::get().reads(1); + } + + let next_period = SaleInfo::::get() + .map(|sale_info| sale_info.region_begin) + .expect("Sales have started on Kusama."); + + // Request an extra core for the People Chain. + let core_count = Reservations::::decode_len().unwrap_or(0) as u16 + + Leases::::decode_len().unwrap_or(0) as u16 + + AllowedRenewals::::iter_keys() + .filter(|renewal| renewal.when >= next_period) + .count() as u16 + 4; match pallet_broker::Pallet::::request_core_count( RuntimeOrigin::root(), core_count, ) { - Ok(_) => log::info!(target: TARGET, "Request for 55 cores sent."), - Err(_) => log::error!(target: TARGET, "Request for 55 cores failed to send."), + Ok(_) => log::info!(target: TARGET, "Request for 56 cores sent."), + Err(_) => log::error!(target: TARGET, "Request for 56 cores failed to send."), } - match pallet_broker::Pallet::::start_sales( + + // People core should be assigned the new core to avoid clashes with the cores sold in + // period 0. + let people_core = core_count.saturating_sub(1); + + // 1. Schedule People Chain for period 2 and beyond. + let schedule_people = Schedule::truncate_from(Vec::from([ScheduleItem { + mask: CoreMask::complete(), + assignment: Task(1004), + }])); + match pallet_broker::Pallet::::reserve( RuntimeOrigin::root(), - 5_000_000_000_000, - core_count, + schedule_people.clone(), ) { - Ok(_) => log::info!(target: TARGET, "Sales started"), - Err(_) => log::error!(target: TARGET, "Start sales failed!"), + Ok(_) => log::info!(target: TARGET, "People Chain reserved"), + Err(_) => log::error!(target: TARGET, "People Chain reservation failed!"), } - // Weight for setting every lease and starting the sales, plus one read for leases - // check. - BrokerWeights::set_lease() - .saturating_mul(LEASES.len() as u64) - .saturating_add(BrokerWeights::request_core_count(55)) - .saturating_add(BrokerWeights::start_sales(55)) - .saturating_add(::DbWeight::get().reads(1)) + // 2. Schedule People Chain for period 1. + Workplan::::insert((next_period, people_core), schedule_people.clone()); + + // 3. Schedule People for the rest of period 0. Take the timeslice after the next tick + // so we the core definitely gets processed. + let now_ish = Status::::get() + .map(|status| status.last_committed_timeslice.saturating_add(2)) + .expect("Sales have started on Kusama."); + Workplan::::insert((now_ish, people_core), schedule_people); + + BrokerWeights::reserve() + .saturating_add(BrokerWeights::request_core_count(56)) + .saturating_add(RuntimeDbWeight::get().reads(6)) + .saturating_add(RuntimeDbWeight::get().writes(2)) } #[cfg(feature = "try-runtime")] fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { - if Leases::::decode_len().unwrap_or(0) > 0 { + let schedule_people = Schedule::truncate_from(Vec::from([ScheduleItem { + mask: CoreMask::complete(), + assignment: Task(1004), + }])); + if Reservations::::get().iter().any(|res| *res == schedule_people) { return Ok(Vec::new()) } - let sale_info = SaleInfo::::get().unwrap(); - Ok(sale_info.encode()) + let status = Status::::get().unwrap(); + Ok(status.encode()) } #[cfg(feature = "try-runtime")] @@ -114,140 +210,55 @@ pub mod bootstrapping { if state.is_empty() { return Ok(()) } - let prev_sale_info = >::decode(&mut &state[..]).unwrap(); - log::info!(target: TARGET, "Checking migration."); - let sale_info = SaleInfo::::get().unwrap(); - let now = frame_system::Pallet::::block_number(); - let config = Configuration::::get().unwrap(); - - // Check the sale start has changed as expected and the cores_offered is the correct - // number. - assert_eq!(sale_info.sale_start, now + config.interlude_length); - assert!(sale_info.region_begin > prev_sale_info.region_begin); - assert_eq!(sale_info.cores_offered, 3); - - // The workplan entries start from the region begin reported by the new SaleInfo. - let workplan_start = sale_info.region_begin; - - // Check the reservations are still in the workplan out of an abundance of caution. - for (core_id, task) in - [Task(1000), Task(1001), Task(1002), Task(1005), Pool].into_iter().enumerate() - { - assert_eq!( - Workplan::::get((workplan_start, core_id as u16)), - Some(Schedule::truncate_from(Vec::from([ScheduleItem { - mask: CoreMask::complete(), - assignment: task, - }]))) - ); - } + let prev_status = ::decode(&mut &state[..]).unwrap(); - // Because we also run start_sales, 12 expiring leases are removed from the original 47, - // leaving 35. - let leases = Leases::::get(); + // People Chain is reserved exactly once. + let schedule_people = Schedule::truncate_from(Vec::from([ScheduleItem { + mask: CoreMask::complete(), + assignment: Task(1004), + }])); assert_eq!( - leases.len(), - LEASES.iter().filter(|(_, l)| sale_info.region_end <= *l).count() + Reservations::::get() + .iter() + .filter(|&res| *res == schedule_people.clone()) + .count(), + 1 ); - // Iterate through hardcoded leases and check they're all correctly in state (leases or - // allowedrenewals) and scheduled in the workplan. - for (i, (para_id, until)) in LEASES.iter().enumerate() { - // Add the system parachains and pool core as an offset - these should come before - // the leases. - let core_id = i as u16 + 5; - // This is the entry found in Workplan and AllowedRenewal - let workload = Schedule::truncate_from(Vec::from([ScheduleItem { - mask: CoreMask::complete(), - assignment: Task(*para_id), - }])); - - // Check that the 12 who no longer have a lease can renew. - if !leases.contains(&LeaseRecordItem { until: *until, task: *para_id }) { - assert_eq!( - AllowedRenewals::::get(AllowedRenewalId { - core: core_id, - when: sale_info.region_end, - }), - Some(AllowedRenewalRecord { - price: 5_000_000_000_000, - completion: pallet_broker::CompletionStatus::Complete(workload.clone()) - }) - ); - } - // They should all be in the workplan for next sale. - assert_eq!(Workplan::::get((workplan_start, core_id)), Some(workload)); - } + // And is in the Workplan for periods 0 and 1. + assert_eq!( + Workplan::::get((prev_status.last_committed_timeslice + 2, 55)), + Some(schedule_people.clone()) + ); + + let next_period = + SaleInfo::::get().map(|sale_info| sale_info.region_begin).unwrap(); - // Ensure we have requested the correct number of events. + assert_eq!(Workplan::::get((next_period, 55)), Some(schedule_people.clone())); + + // Ensure we have requested the correct number of cores. assert!(frame_system::Pallet::::read_events_no_consensus().any(|e| { match e.event { crate::RuntimeEvent::Broker( pallet_broker::Event::::CoreCountRequested { core_count }, ) => { - log::info!("{core_count:?}"); + log::info!(target: TARGET, "Reserved {core_count:?} cores."); - core_count == 55 + // Ensure that both of these are correct as a sanity check since we hardcode + // core 55 elsewhere. + core_count == prev_status.core_count + 1 && core_count == 56 }, _ => false, } })); + // And ensure this core isn't overwritten at any stage, it should only have the two + // entries in the workload that we just checked. + assert_eq!(Workplan::::iter_keys().filter(|(_, core)| *core == 55).count(), 2); + Ok(()) } } - - // Hardcoded para ids and their end timeslice. - // Calculated using https://github.com/seadanda/coretime-scripts/blob/main/get_leases.py - const LEASES: [(u32, u32); 47] = [ - (2000, 340200), - (2001, 302400), - (2004, 332640), - (2007, 317520), - (2011, 325080), - (2012, 309960), - (2015, 287280), - (2023, 309960), - (2024, 309960), - (2048, 302400), - (2084, 340200), - (2085, 294840), - (2087, 340200), - (2088, 287280), - (2090, 340200), - (2092, 287280), - (2095, 332640), - (2096, 332640), - (2105, 325080), - (2106, 325080), - (2110, 317520), - (2113, 332640), - (2114, 317520), - (2119, 340200), - (2121, 332640), - (2123, 294840), - (2124, 287280), - (2125, 294840), - (2222, 302400), - (2233, 294840), - (2236, 317520), - (2239, 332640), - (2241, 325080), - (2274, 294840), - (2275, 294840), - (2281, 302400), - (3334, 309960), - (3336, 317520), - (3338, 317520), - (3339, 325080), - (3340, 325080), - (3343, 317520), - (3344, 340200), - (3345, 325080), - (3347, 287280), - (3348, 287280), - (3350, 340200), - ]; }