diff --git a/nexus/db-model/src/inventory.rs b/nexus/db-model/src/inventory.rs index 1c268c5914..cde067f3e8 100644 --- a/nexus/db-model/src/inventory.rs +++ b/nexus/db-model/src/inventory.rs @@ -9,7 +9,8 @@ use crate::schema::{ hw_baseboard_id, inv_caboose, inv_collection, inv_collection_error, inv_omicron_zone, inv_omicron_zone_nic, inv_physical_disk, inv_root_of_trust, inv_root_of_trust_page, inv_service_processor, - inv_sled_agent, inv_sled_omicron_zones, sw_caboose, sw_root_of_trust_page, + inv_sled_agent, inv_sled_omicron_zones, inv_zpool, sw_caboose, + sw_root_of_trust_page, }; use crate::PhysicalDiskKind; use crate::{ @@ -699,6 +700,43 @@ impl From for nexus_types::inventory::PhysicalDisk { } } +/// See [`nexus_types::inventory::Zpool`]. +#[derive(Queryable, Clone, Debug, Selectable, Insertable)] +#[diesel(table_name = inv_zpool)] +pub struct InvZpool { + pub inv_collection_id: Uuid, + pub time_collected: DateTime, + pub id: Uuid, + pub sled_id: Uuid, + pub total_size: ByteCount, +} + +impl InvZpool { + pub fn new( + inv_collection_id: Uuid, + sled_id: Uuid, + zpool: &nexus_types::inventory::Zpool, + ) -> Self { + Self { + inv_collection_id, + time_collected: zpool.time_collected, + id: zpool.id, + sled_id, + total_size: zpool.total_size.into(), + } + } +} + +impl From for nexus_types::inventory::Zpool { + fn from(pool: InvZpool) -> Self { + Self { + time_collected: pool.time_collected, + id: pool.id, + total_size: *pool.total_size, + } + } +} + /// See [`nexus_types::inventory::OmicronZonesFound`]. #[derive(Queryable, Clone, Debug, Selectable, Insertable)] #[diesel(table_name = inv_sled_omicron_zones)] diff --git a/nexus/db-model/src/schema.rs b/nexus/db-model/src/schema.rs index c31ce16775..2c53dc26a7 100644 --- a/nexus/db-model/src/schema.rs +++ b/nexus/db-model/src/schema.rs @@ -13,7 +13,7 @@ use omicron_common::api::external::SemverVersion; /// /// This should be updated whenever the schema is changed. For more details, /// refer to: schema/crdb/README.adoc -pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(43, 0, 0); +pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(44, 0, 0); table! { disk (id) { @@ -960,8 +960,6 @@ table! { sled_id -> Uuid, physical_disk_id -> Uuid, - - total_size -> Int8, } } @@ -1375,6 +1373,16 @@ table! { } } +table! { + inv_zpool (inv_collection_id, sled_id, id) { + inv_collection_id -> Uuid, + time_collected -> Timestamptz, + id -> Uuid, + sled_id -> Uuid, + total_size -> Int8, + } +} + table! { inv_sled_omicron_zones (inv_collection_id, sled_id) { inv_collection_id -> Uuid, diff --git a/nexus/db-model/src/zpool.rs b/nexus/db-model/src/zpool.rs index 21f59e0347..1d129d4c57 100644 --- a/nexus/db-model/src/zpool.rs +++ b/nexus/db-model/src/zpool.rs @@ -2,7 +2,7 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -use super::{ByteCount, Dataset, Generation}; +use super::{Dataset, Generation}; use crate::collection::DatastoreCollectionConfig; use crate::schema::{dataset, zpool}; use chrono::{DateTime, Utc}; @@ -26,26 +26,16 @@ pub struct Zpool { // The physical disk to which this Zpool is attached. pub physical_disk_id: Uuid, - - // TODO: In the future, we may expand this structure to include - // size, allocation, and health information. - pub total_size: ByteCount, } impl Zpool { - pub fn new( - id: Uuid, - sled_id: Uuid, - physical_disk_id: Uuid, - total_size: ByteCount, - ) -> Self { + pub fn new(id: Uuid, sled_id: Uuid, physical_disk_id: Uuid) -> Self { Self { identity: ZpoolIdentity::new(id), time_deleted: None, rcgen: Generation::new(), sled_id, physical_disk_id, - total_size, } } } diff --git a/nexus/db-queries/src/db/datastore/dataset.rs b/nexus/db-queries/src/db/datastore/dataset.rs index 792f8f81a4..292f13354f 100644 --- a/nexus/db-queries/src/db/datastore/dataset.rs +++ b/nexus/db-queries/src/db/datastore/dataset.rs @@ -229,12 +229,7 @@ mod test { // Create a fake zpool that backs our fake datasets. let zpool_id = Uuid::new_v4(); - let zpool = Zpool::new( - zpool_id, - sled_id, - Uuid::new_v4(), - (1 << 30).try_into().unwrap(), - ); + let zpool = Zpool::new(zpool_id, sled_id, Uuid::new_v4()); datastore.zpool_upsert(zpool).await.expect("failed to upsert zpool"); // Inserting a new dataset should succeed. diff --git a/nexus/db-queries/src/db/datastore/inventory.rs b/nexus/db-queries/src/db/datastore/inventory.rs index 860725cc08..67d5684bbf 100644 --- a/nexus/db-queries/src/db/datastore/inventory.rs +++ b/nexus/db-queries/src/db/datastore/inventory.rs @@ -45,6 +45,7 @@ use nexus_db_model::InvRotPage; use nexus_db_model::InvServiceProcessor; use nexus_db_model::InvSledAgent; use nexus_db_model::InvSledOmicronZones; +use nexus_db_model::InvZpool; use nexus_db_model::RotPageWhichEnum; use nexus_db_model::SledRole; use nexus_db_model::SledRoleEnum; @@ -137,6 +138,18 @@ impl DataStore { }) .collect(); + // Pull zpools out of all sled agents + let zpools: Vec<_> = collection + .sled_agents + .iter() + .flat_map(|(sled_id, sled_agent)| { + sled_agent + .zpools + .iter() + .map(|pool| InvZpool::new(collection_id, *sled_id, pool)) + }) + .collect(); + // Partition the sled agents into those with an associated baseboard id // and those without one. We handle these pretty differently. let (sled_agents_baseboards, sled_agents_no_baseboards): ( @@ -670,6 +683,25 @@ impl DataStore { } } + // Insert rows for all the zpools we found. + { + use db::schema::inv_zpool::dsl; + + let batch_size = SQL_BATCH_SIZE.get().try_into().unwrap(); + let mut zpools = zpools.into_iter(); + loop { + let some_zpools = + zpools.by_ref().take(batch_size).collect::>(); + if some_zpools.is_empty() { + break; + } + let _ = diesel::insert_into(dsl::inv_zpool) + .values(some_zpools) + .execute_async(&conn) + .await?; + } + } + // Insert rows for the sled agents that we found. In practice, we'd // expect these to all have baseboards (if using Oxide hardware) or // none have baseboards (if not). @@ -1470,6 +1502,34 @@ impl DataStore { disks }; + // Mapping of "Sled ID" -> "All zpools reported by that sled" + let zpools: BTreeMap> = { + use db::schema::inv_zpool::dsl; + + let mut zpools = + BTreeMap::>::new(); + let mut paginator = Paginator::new(batch_size); + while let Some(p) = paginator.next() { + let batch = paginated_multicolumn( + dsl::inv_zpool, + (dsl::sled_id, dsl::id), + &p.current_pagparams(), + ) + .filter(dsl::inv_collection_id.eq(id)) + .select(InvZpool::as_select()) + .load_async(&*conn) + .await + .map_err(|e| { + public_error_from_diesel(e, ErrorHandler::Server) + })?; + paginator = p.found_batch(&batch, &|row| (row.sled_id, row.id)); + for zpool in batch { + zpools.entry(zpool.sled_id).or_default().push(zpool.into()); + } + } + zpools + }; + // Collect the unique baseboard ids referenced by SPs, RoTs, and Sled // Agents. let baseboard_id_ids: BTreeSet<_> = sps @@ -1577,6 +1637,10 @@ impl DataStore { .get(&sled_id) .map(|disks| disks.to_vec()) .unwrap_or_default(), + zpools: zpools + .get(&sled_id) + .map(|zpools| zpools.to_vec()) + .unwrap_or_default(), }; Ok((sled_id, sled_agent)) }) diff --git a/nexus/db-queries/src/db/datastore/mod.rs b/nexus/db-queries/src/db/datastore/mod.rs index b2b564cc09..ab00d876b8 100644 --- a/nexus/db-queries/src/db/datastore/mod.rs +++ b/nexus/db-queries/src/db/datastore/mod.rs @@ -661,17 +661,59 @@ mod test { sled_id: Uuid, physical_disk_id: Uuid, ) -> Uuid { - let zpool_id = Uuid::new_v4(); - let zpool = Zpool::new( - zpool_id, + let zpool_id = create_test_zpool_not_in_inventory( + datastore, sled_id, physical_disk_id, - test_zpool_size().into(), - ); + ) + .await; + + add_test_zpool_to_inventory(datastore, zpool_id, sled_id).await; + + zpool_id + } + + // Creates a test zpool, returns its UUID. + // + // However, this helper doesn't add the zpool to the inventory just yet. + async fn create_test_zpool_not_in_inventory( + datastore: &DataStore, + sled_id: Uuid, + physical_disk_id: Uuid, + ) -> Uuid { + let zpool_id = Uuid::new_v4(); + let zpool = Zpool::new(zpool_id, sled_id, physical_disk_id); datastore.zpool_upsert(zpool).await.unwrap(); zpool_id } + // Adds a test zpool into the inventory, with a randomly generated + // collection UUID. + async fn add_test_zpool_to_inventory( + datastore: &DataStore, + zpool_id: Uuid, + sled_id: Uuid, + ) { + use db::schema::inv_zpool::dsl; + + let inv_collection_id = Uuid::new_v4(); + let time_collected = Utc::now(); + let inv_pool = nexus_db_model::InvZpool { + inv_collection_id, + time_collected, + id: zpool_id, + sled_id, + total_size: test_zpool_size().into(), + }; + diesel::insert_into(dsl::inv_zpool) + .values(inv_pool) + .execute_async( + &*datastore.pool_connection_for_tests().await.unwrap(), + ) + .await + .unwrap(); + } + fn create_test_disk_create_params( name: &str, size: ByteCount, @@ -1156,6 +1198,103 @@ mod test { logctx.cleanup_successful(); } + #[tokio::test] + async fn test_region_allocation_only_operates_on_zpools_in_inventory() { + let logctx = dev::test_setup_log( + "test_region_allocation_only_operates_on_zpools_in_inventory", + ); + let mut db = test_setup_database(&logctx.log).await; + let (opctx, datastore) = datastore_test(&logctx, &db).await; + + // Create a sled... + let sled_id = create_test_sled(&datastore).await; + + // ... and a disk on that sled... + let physical_disk_id = create_test_physical_disk( + &datastore, + &opctx, + sled_id, + PhysicalDiskKind::U2, + ) + .await; + + // Create enough zpools for region allocation to succeed + let zpool_ids: Vec = stream::iter(0..REGION_REDUNDANCY_THRESHOLD) + .then(|_| { + create_test_zpool_not_in_inventory( + &datastore, + sled_id, + physical_disk_id, + ) + }) + .collect() + .await; + + let bogus_addr = SocketAddrV6::new(Ipv6Addr::LOCALHOST, 8080, 0, 0); + + // 1 dataset per zpool + stream::iter(zpool_ids.clone()) + .then(|zpool_id| { + let id = Uuid::new_v4(); + let dataset = Dataset::new( + id, + zpool_id, + bogus_addr, + DatasetKind::Crucible, + ); + let datastore = datastore.clone(); + async move { + datastore.dataset_upsert(dataset).await.unwrap(); + id + } + }) + .collect::>() + .await; + + // Allocate regions from the datasets for this volume. + let params = create_test_disk_create_params( + "disk1", + ByteCount::from_mebibytes_u32(500), + ); + let volume1_id = Uuid::new_v4(); + let err = datastore + .region_allocate( + &opctx, + volume1_id, + ¶ms.disk_source, + params.size, + &RegionAllocationStrategy::Random { seed: Some(0) }, + ) + .await + .unwrap_err(); + + let expected = "Not enough zpool space to allocate disks"; + assert!( + err.to_string().contains(expected), + "Saw error: \'{err}\', but expected \'{expected}\'" + ); + assert!(matches!(err, Error::InsufficientCapacity { .. })); + + // If we add the zpools to the inventory and try again, the allocation + // will succeed. + for zpool_id in zpool_ids { + add_test_zpool_to_inventory(&datastore, zpool_id, sled_id).await; + } + datastore + .region_allocate( + &opctx, + volume1_id, + ¶ms.disk_source, + params.size, + &RegionAllocationStrategy::Random { seed: Some(0) }, + ) + .await + .expect("Allocation should have worked after adding zpools to inventory"); + + let _ = db.cleanup().await; + logctx.cleanup_successful(); + } + #[tokio::test] async fn test_region_allocation_not_enough_zpools() { let logctx = diff --git a/nexus/db-queries/src/db/datastore/region.rs b/nexus/db-queries/src/db/datastore/region.rs index 912565cb2c..52e0ce4d88 100644 --- a/nexus/db-queries/src/db/datastore/region.rs +++ b/nexus/db-queries/src/db/datastore/region.rs @@ -128,17 +128,16 @@ impl DataStore { let (blocks_per_extent, extent_count) = Self::get_crucible_allocation(&block_size, size); + let query = crate::db::queries::region_allocation::RegionAllocate::new( + volume_id, + block_size.to_bytes() as u64, + blocks_per_extent, + extent_count, + allocation_strategy, + ); + let conn = self.pool_connection_authorized(&opctx).await?; let dataset_and_regions: Vec<(Dataset, Region)> = - crate::db::queries::region_allocation::RegionAllocate::new( - volume_id, - block_size.to_bytes() as u64, - blocks_per_extent, - extent_count, - allocation_strategy, - ) - .get_results_async(&*self.pool_connection_authorized(&opctx).await?) - .await - .map_err(|e| { + query.get_results_async(&*conn).await.map_err(|e| { crate::db::queries::region_allocation::from_diesel(e) })?; diff --git a/nexus/db-queries/src/db/datastore/zpool.rs b/nexus/db-queries/src/db/datastore/zpool.rs index 5424c126eb..b894d5c509 100644 --- a/nexus/db-queries/src/db/datastore/zpool.rs +++ b/nexus/db-queries/src/db/datastore/zpool.rs @@ -46,7 +46,6 @@ impl DataStore { .set(( dsl::time_modified.eq(Utc::now()), dsl::sled_id.eq(excluded(dsl::sled_id)), - dsl::total_size.eq(excluded(dsl::total_size)), )), ) .insert_and_get_result_async( diff --git a/nexus/db-queries/src/db/queries/region_allocation.rs b/nexus/db-queries/src/db/queries/region_allocation.rs index 96fa2058af..a657d21c97 100644 --- a/nexus/db-queries/src/db/queries/region_allocation.rs +++ b/nexus/db-queries/src/db/queries/region_allocation.rs @@ -17,7 +17,7 @@ use diesel::query_builder::{AstPass, Query, QueryFragment, QueryId}; use diesel::result::Error as DieselError; use diesel::PgBinaryExpressionMethods; use diesel::{ - sql_types, BoolExpressionMethods, Column, CombineDsl, ExpressionMethods, + sql_types, BoolExpressionMethods, CombineDsl, ExpressionMethods, Insertable, IntoSql, JoinOnDsl, NullableExpressionMethods, QueryDsl, RunQueryDsl, }; @@ -316,7 +316,11 @@ impl CandidateZpools { // into a BigInt, not a Numeric. I welcome a better solution. let it_will_fit = (old_zpool_usage::dsl::size_used + diesel::dsl::sql(&zpool_size_delta.to_string())) - .le(diesel::dsl::sql(zpool_dsl::total_size::NAME)); + .le(diesel::dsl::sql( + "(SELECT total_size FROM omicron.public.inv_zpool WHERE + inv_zpool.id = old_zpool_usage.pool_id + ORDER BY inv_zpool.time_collected DESC LIMIT 1)", + )); // We need to join on the sled table to access provision_state. let with_sled = sled_dsl::sled.on(zpool_dsl::sled_id.eq(sled_dsl::id)); diff --git a/nexus/inventory/src/builder.rs b/nexus/inventory/src/builder.rs index 80db03f18b..2e482fcebf 100644 --- a/nexus/inventory/src/builder.rs +++ b/nexus/inventory/src/builder.rs @@ -26,6 +26,7 @@ use nexus_types::inventory::RotPageWhich; use nexus_types::inventory::RotState; use nexus_types::inventory::ServiceProcessor; use nexus_types::inventory::SledAgent; +use nexus_types::inventory::Zpool; use std::collections::BTreeMap; use std::collections::BTreeSet; use std::sync::Arc; @@ -451,6 +452,7 @@ impl CollectionBuilder { return Ok(()); } }; + let time_collected = now_db_precision(); let sled = SledAgent { source: source.to_string(), sled_agent_address, @@ -459,9 +461,14 @@ impl CollectionBuilder { usable_hardware_threads: inventory.usable_hardware_threads, usable_physical_ram: inventory.usable_physical_ram, reservoir_size: inventory.reservoir_size, - time_collected: now_db_precision(), + time_collected, sled_id, disks: inventory.disks.into_iter().map(|d| d.into()).collect(), + zpools: inventory + .zpools + .into_iter() + .map(|z| Zpool::new(time_collected, z)) + .collect(), }; if let Some(previous) = self.sleds.get(&sled_id) { diff --git a/nexus/inventory/src/examples.rs b/nexus/inventory/src/examples.rs index 286ded7276..5cc6b687d4 100644 --- a/nexus/inventory/src/examples.rs +++ b/nexus/inventory/src/examples.rs @@ -313,6 +313,7 @@ pub fn representative() -> Representative { slot: 3, }, ]; + let zpools = vec![]; builder .found_sled_inventory( @@ -326,6 +327,7 @@ pub fn representative() -> Representative { }, sled_agent_client::types::SledRole::Gimlet, disks, + zpools, ), ) .unwrap(); @@ -351,6 +353,7 @@ pub fn representative() -> Representative { }, sled_agent_client::types::SledRole::Scrimlet, vec![], + vec![], ), ) .unwrap(); @@ -371,6 +374,7 @@ pub fn representative() -> Representative { }, sled_agent_client::types::SledRole::Gimlet, vec![], + vec![], ), ) .unwrap(); @@ -389,6 +393,7 @@ pub fn representative() -> Representative { sled_agent_client::types::Baseboard::Unknown, sled_agent_client::types::SledRole::Gimlet, vec![], + vec![], ), ) .unwrap(); @@ -486,6 +491,7 @@ pub fn sled_agent( baseboard: sled_agent_client::types::Baseboard, sled_role: sled_agent_client::types::SledRole, disks: Vec, + zpools: Vec, ) -> sled_agent_client::types::Inventory { sled_agent_client::types::Inventory { baseboard, @@ -496,5 +502,6 @@ pub fn sled_agent( usable_hardware_threads: 10, usable_physical_ram: ByteCount::from(1024 * 1024), disks, + zpools, } } diff --git a/nexus/reconfigurator/execution/src/datasets.rs b/nexus/reconfigurator/execution/src/datasets.rs index 5e677b98be..6da24e4279 100644 --- a/nexus/reconfigurator/execution/src/datasets.rs +++ b/nexus/reconfigurator/execution/src/datasets.rs @@ -200,7 +200,6 @@ mod tests { zpool_name.id(), sled_id, Uuid::new_v4(), // physical_disk_id - (1 << 30).try_into().unwrap(), // total_size ); datastore .zpool_upsert(zpool) @@ -269,8 +268,7 @@ mod tests { let zpool = Zpool::new( new_zpool_id, sled_id, - Uuid::new_v4(), // physical_disk_id - (1 << 30).try_into().unwrap(), // total_size + Uuid::new_v4(), // physical_disk_id ); datastore .zpool_upsert(zpool) diff --git a/nexus/reconfigurator/planning/src/system.rs b/nexus/reconfigurator/planning/src/system.rs index 8e8283f37e..e224e3c6df 100644 --- a/nexus/reconfigurator/planning/src/system.rs +++ b/nexus/reconfigurator/planning/src/system.rs @@ -476,6 +476,7 @@ impl Sled { usable_hardware_threads: 10, usable_physical_ram: ByteCount::from(1024 * 1024), disks: vec![], + zpools: vec![], } }; @@ -558,6 +559,7 @@ impl Sled { usable_hardware_threads: inv_sled_agent.usable_hardware_threads, usable_physical_ram: inv_sled_agent.usable_physical_ram, disks: vec![], + zpools: vec![], }; Sled { diff --git a/nexus/src/app/mod.rs b/nexus/src/app/mod.rs index 68f57d31f3..d387998f6a 100644 --- a/nexus/src/app/mod.rs +++ b/nexus/src/app/mod.rs @@ -562,6 +562,12 @@ impl Nexus { Some(rustls_cfg) } + // Called to trigger inventory collection. + pub(crate) fn activate_inventory_collection(&self) { + self.background_tasks + .activate(&self.background_tasks.task_inventory_collection); + } + // Called to hand off management of external servers to Nexus. pub(crate) async fn set_servers( &self, diff --git a/nexus/src/app/rack.rs b/nexus/src/app/rack.rs index 9905df04ff..35f3337625 100644 --- a/nexus/src/app/rack.rs +++ b/nexus/src/app/rack.rs @@ -923,8 +923,7 @@ impl super::Nexus { // Trigger an inventory collection so that the newly added sled is known // about. - self.background_tasks - .activate(&self.background_tasks.task_inventory_collection); + self.activate_inventory_collection(); Ok(()) } diff --git a/nexus/src/app/sled.rs b/nexus/src/app/sled.rs index 4ad8dc36ce..06e50f2ecd 100644 --- a/nexus/src/app/sled.rs +++ b/nexus/src/app/sled.rs @@ -264,12 +264,7 @@ impl super::Nexus { ) .fetch() .await?; - let zpool = db::model::Zpool::new( - id, - sled_id, - db_disk.uuid(), - info.size.into(), - ); + let zpool = db::model::Zpool::new(id, sled_id, db_disk.uuid()); self.db_datastore.zpool_upsert(zpool).await?; Ok(()) } diff --git a/nexus/src/lib.rs b/nexus/src/lib.rs index 03470a4f3a..d143858bf4 100644 --- a/nexus/src/lib.rs +++ b/nexus/src/lib.rs @@ -30,7 +30,9 @@ use nexus_config::NexusConfig; use nexus_types::deployment::Blueprint; use nexus_types::external_api::views::SledProvisionPolicy; use nexus_types::internal_api::params::ServiceKind; +use nexus_types::inventory::Collection; use omicron_common::address::IpRange; +use omicron_common::api::external::Error; use omicron_common::api::internal::shared::{ ExternalPortDiscovery, RackNetworkConfig, SwitchLocation, }; @@ -354,6 +356,17 @@ impl nexus_test_interface::NexusServer for Server { .unwrap(); } + async fn inventory_collect_and_get_latest_collection( + &self, + ) -> Result, Error> { + let nexus = &self.apictx.nexus; + + nexus.activate_inventory_collection(); + + let opctx = nexus.opctx_for_internal_api(); + nexus.datastore().inventory_get_latest_collection(&opctx).await + } + async fn close(mut self) { self.apictx .nexus diff --git a/nexus/test-interface/src/lib.rs b/nexus/test-interface/src/lib.rs index d9bb69276a..2e3428a1dd 100644 --- a/nexus/test-interface/src/lib.rs +++ b/nexus/test-interface/src/lib.rs @@ -34,6 +34,8 @@ use async_trait::async_trait; use nexus_config::NexusConfig; use nexus_types::deployment::Blueprint; +use nexus_types::inventory::Collection; +use omicron_common::api::external::Error; use slog::Logger; use std::net::{SocketAddr, SocketAddrV6}; use uuid::Uuid; @@ -91,5 +93,9 @@ pub trait NexusServer: Send + Sync + 'static { address: SocketAddrV6, ); + async fn inventory_collect_and_get_latest_collection( + &self, + ) -> Result, Error>; + async fn close(self); } diff --git a/nexus/test-utils/src/resource_helpers.rs b/nexus/test-utils/src/resource_helpers.rs index 7331843000..f4b5d9fa46 100644 --- a/nexus/test-utils/src/resource_helpers.rs +++ b/nexus/test-utils/src/resource_helpers.rs @@ -32,13 +32,18 @@ use nexus_types::identity::Resource; use nexus_types::internal_api::params as internal_params; use omicron_common::api::external::ByteCount; use omicron_common::api::external::Disk; +use omicron_common::api::external::Error; use omicron_common::api::external::IdentityMetadataCreateParams; use omicron_common::api::external::Instance; use omicron_common::api::external::InstanceCpuCount; use omicron_common::api::external::NameOrId; use omicron_sled_agent::sim::SledAgent; +use omicron_test_utils::dev::poll::wait_for_condition; +use omicron_test_utils::dev::poll::CondCheckError; +use slog::debug; use std::net::IpAddr; use std::sync::Arc; +use std::time::Duration; use uuid::Uuid; pub async fn objects_list_page_authz( @@ -793,6 +798,56 @@ impl DiskTest { .await; } + let log = &cptestctx.logctx.log; + + // Wait until Nexus has successfully completed an inventory collection + // which includes this zpool + wait_for_condition( + || async { + let result = cptestctx + .server + .inventory_collect_and_get_latest_collection() + .await; + let log_result = match &result { + Ok(Some(_)) => Ok("found"), + Ok(None) => Ok("not found"), + Err(error) => Err(error), + }; + debug!( + log, + "attempt to fetch latest inventory collection"; + "result" => ?log_result, + ); + + match result { + Ok(None) => Err(CondCheckError::NotYet), + Ok(Some(c)) => { + let all_zpools = c + .sled_agents + .values() + .flat_map(|sled_agent| { + sled_agent.zpools.iter().map(|z| z.id) + }) + .collect::>(); + + if all_zpools.contains(&zpool.id) { + Ok(()) + } else { + Err(CondCheckError::NotYet) + } + } + Err(Error::ServiceUnavailable { .. }) => { + Err(CondCheckError::NotYet) + } + Err(error) => Err(CondCheckError::Failed(error)), + } + }, + &Duration::from_millis(50), + &Duration::from_secs(30), + ) + .await + .expect("expected to find inventory collection"); + self.zpools.push(zpool); } diff --git a/nexus/types/src/inventory.rs b/nexus/types/src/inventory.rs index b0afe51a76..40da26047b 100644 --- a/nexus/types/src/inventory.rs +++ b/nexus/types/src/inventory.rs @@ -367,6 +367,23 @@ impl From for PhysicalDisk { } } +/// A zpool reported by a sled agent. +#[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize)] +pub struct Zpool { + pub time_collected: DateTime, + pub id: Uuid, + pub total_size: ByteCount, +} + +impl Zpool { + pub fn new( + time_collected: DateTime, + pool: sled_agent_client::types::InventoryZpool, + ) -> Zpool { + Zpool { time_collected, id: pool.id, total_size: pool.total_size } + } +} + /// Inventory reported by sled agent /// /// This is a software notion of a sled, distinct from an underlying baseboard. @@ -385,6 +402,7 @@ pub struct SledAgent { pub usable_physical_ram: ByteCount, pub reservoir_size: ByteCount, pub disks: Vec, + pub zpools: Vec, } #[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize)] diff --git a/openapi/sled-agent.json b/openapi/sled-agent.json index 8de8be430f..5c43c40e56 100644 --- a/openapi/sled-agent.json +++ b/openapi/sled-agent.json @@ -5267,6 +5267,12 @@ }, "usable_physical_ram": { "$ref": "#/components/schemas/ByteCount" + }, + "zpools": { + "type": "array", + "items": { + "$ref": "#/components/schemas/InventoryZpool" + } } }, "required": [ @@ -5277,7 +5283,8 @@ "sled_id", "sled_role", "usable_hardware_threads", - "usable_physical_ram" + "usable_physical_ram", + "zpools" ] }, "InventoryDisk": { @@ -5301,6 +5308,23 @@ "variant" ] }, + "InventoryZpool": { + "description": "Identifies information about zpools managed by the control plane", + "type": "object", + "properties": { + "id": { + "type": "string", + "format": "uuid" + }, + "total_size": { + "$ref": "#/components/schemas/ByteCount" + } + }, + "required": [ + "id", + "total_size" + ] + }, "IpNet": { "oneOf": [ { diff --git a/schema/crdb/44.0.0/up01.sql b/schema/crdb/44.0.0/up01.sql new file mode 100644 index 0000000000..a13133ee67 --- /dev/null +++ b/schema/crdb/44.0.0/up01.sql @@ -0,0 +1 @@ +ALTER TABLE omicron.public.zpool DROP COLUMN IF EXISTS total_size; diff --git a/schema/crdb/44.0.0/up02.sql b/schema/crdb/44.0.0/up02.sql new file mode 100644 index 0000000000..601888949b --- /dev/null +++ b/schema/crdb/44.0.0/up02.sql @@ -0,0 +1,10 @@ +CREATE TABLE IF NOT EXISTS omicron.public.inv_zpool ( + inv_collection_id UUID NOT NULL, + time_collected TIMESTAMPTZ NOT NULL, + + id UUID NOT NULL, + sled_id UUID NOT NULL, + total_size INT NOT NULL, + + PRIMARY KEY (inv_collection_id, sled_id, id) +); diff --git a/schema/crdb/44.0.0/up03.sql b/schema/crdb/44.0.0/up03.sql new file mode 100644 index 0000000000..10195be482 --- /dev/null +++ b/schema/crdb/44.0.0/up03.sql @@ -0,0 +1 @@ +CREATE INDEX IF NOT EXISTS inv_zpool_by_id_and_time ON omicron.public.inv_zpool (id, time_collected DESC); diff --git a/schema/crdb/dbinit.sql b/schema/crdb/dbinit.sql index 255cdc8135..c7a4db09f1 100644 --- a/schema/crdb/dbinit.sql +++ b/schema/crdb/dbinit.sql @@ -462,10 +462,12 @@ CREATE TABLE IF NOT EXISTS omicron.public.virtual_provisioning_resource ( ram_provisioned INT8 NOT NULL ); -/* - * ZPools of Storage, attached to Sleds. - * These are backed by a single physical disk. - */ +-- ZPools of Storage, attached to Sleds. +-- These are backed by a single physical disk. +-- +-- For information about the provisioned zpool, reference the +-- "omicron.public.inv_zpool" table, which returns information +-- that has actually been returned from the underlying sled. CREATE TABLE IF NOT EXISTS omicron.public.zpool ( /* Identity metadata (asset) */ id UUID PRIMARY KEY, @@ -478,9 +480,7 @@ CREATE TABLE IF NOT EXISTS omicron.public.zpool ( sled_id UUID NOT NULL, /* FK into the Physical Disk table */ - physical_disk_id UUID NOT NULL, - - total_size INT NOT NULL + physical_disk_id UUID NOT NULL ); /* Create an index on the physical disk id */ @@ -2986,6 +2986,28 @@ CREATE TABLE IF NOT EXISTS omicron.public.inv_physical_disk ( PRIMARY KEY (inv_collection_id, sled_id, slot) ); +CREATE TABLE IF NOT EXISTS omicron.public.inv_zpool ( + -- where this observation came from + -- (foreign key into `inv_collection` table) + inv_collection_id UUID NOT NULL, + -- when this observation was made + time_collected TIMESTAMPTZ NOT NULL, + + -- The control plane ID of the zpool + id UUID NOT NULL, + sled_id UUID NOT NULL, + total_size INT NOT NULL, + + -- PK consisting of: + -- - Which collection this was + -- - The sled reporting the disk + -- - The slot in which this disk was found + PRIMARY KEY (inv_collection_id, sled_id, id) +); + +-- Allow looking up the most recent Zpool by ID +CREATE INDEX IF NOT EXISTS inv_zpool_by_id_and_time ON omicron.public.inv_zpool (id, time_collected DESC); + CREATE TABLE IF NOT EXISTS omicron.public.inv_sled_omicron_zones ( -- where this observation came from -- (foreign key into `inv_collection` table) @@ -3670,7 +3692,7 @@ INSERT INTO omicron.public.db_metadata ( version, target_version ) VALUES - ( TRUE, NOW(), NOW(), '43.0.0', NULL) + ( TRUE, NOW(), NOW(), '44.0.0', NULL) ON CONFLICT DO NOTHING; COMMIT; diff --git a/sled-agent/src/params.rs b/sled-agent/src/params.rs index 82294326b6..f74438a678 100644 --- a/sled-agent/src/params.rs +++ b/sled-agent/src/params.rs @@ -882,6 +882,13 @@ pub struct InventoryDisk { pub slot: i64, } +/// Identifies information about zpools managed by the control plane +#[derive(Clone, Debug, Deserialize, JsonSchema, Serialize)] +pub struct InventoryZpool { + pub id: Uuid, + pub total_size: ByteCount, +} + /// Identity and basic status information about this sled agent #[derive(Clone, Debug, Deserialize, JsonSchema, Serialize)] pub struct Inventory { @@ -893,6 +900,7 @@ pub struct Inventory { pub usable_physical_ram: ByteCount, pub reservoir_size: ByteCount, pub disks: Vec, + pub zpools: Vec, } #[derive(Clone, Debug, Deserialize, JsonSchema, Serialize)] diff --git a/sled-agent/src/sim/sled_agent.rs b/sled-agent/src/sim/sled_agent.rs index 425f4e31a4..b772a60347 100644 --- a/sled-agent/src/sim/sled_agent.rs +++ b/sled-agent/src/sim/sled_agent.rs @@ -739,6 +739,8 @@ impl SledAgent { } SocketAddr::V6(v6) => v6, }; + + let storage = self.storage.lock().await; Ok(Inventory { sled_id: self.id, sled_agent_address, @@ -753,10 +755,7 @@ impl SledAgent { self.config.hardware.reservoir_ram, ) .context("reservoir_size")?, - disks: self - .storage - .lock() - .await + disks: storage .physical_disks() .iter() .map(|(identity, info)| crate::params::InventoryDisk { @@ -765,6 +764,16 @@ impl SledAgent { slot: info.slot, }) .collect(), + zpools: storage + .zpools() + .iter() + .map(|(id, zpool)| { + Ok(crate::params::InventoryZpool { + id: *id, + total_size: ByteCount::try_from(zpool.total_size())?, + }) + }) + .collect::, anyhow::Error>>()?, }) } diff --git a/sled-agent/src/sim/storage.rs b/sled-agent/src/sim/storage.rs index d50cd296ae..8fb362c5b7 100644 --- a/sled-agent/src/sim/storage.rs +++ b/sled-agent/src/sim/storage.rs @@ -478,13 +478,14 @@ pub(crate) struct PhysicalDisk { pub(crate) slot: i64, } -struct Zpool { +pub(crate) struct Zpool { datasets: HashMap, + total_size: u64, } impl Zpool { - fn new() -> Self { - Zpool { datasets: HashMap::new() } + fn new(total_size: u64) -> Self { + Zpool { datasets: HashMap::new(), total_size } } fn insert_dataset( @@ -501,6 +502,10 @@ impl Zpool { .expect("Failed to get the dataset we just inserted") } + pub fn total_size(&self) -> u64 { + self.total_size + } + pub async fn get_dataset_for_region( &self, region_id: Uuid, @@ -614,7 +619,7 @@ impl Storage { size: u64, ) { // Update our local data - self.zpools.insert(zpool_id, Zpool::new()); + self.zpools.insert(zpool_id, Zpool::new(size)); // Notify Nexus let request = ZpoolPutRequest { @@ -629,6 +634,10 @@ impl Storage { .expect("Failed to notify Nexus about new Zpool"); } + /// Returns an immutable reference to all zpools + pub fn zpools(&self) -> &HashMap { + &self.zpools + } /// Adds a Dataset to the sled's simulated storage. pub async fn insert_dataset( &mut self, diff --git a/sled-agent/src/sled_agent.rs b/sled-agent/src/sled_agent.rs index 91ddfde153..713b10428f 100644 --- a/sled-agent/src/sled_agent.rs +++ b/sled-agent/src/sled_agent.rs @@ -1107,18 +1107,22 @@ impl SledAgent { } else { crate::params::SledRole::Gimlet }; - let disks = self - .storage() - .get_latest_resources() - .await - .disks() - .iter() - .map(|(identity, (disk, _pool))| crate::params::InventoryDisk { + + let mut disks = vec![]; + let mut zpools = vec![]; + for (identity, (disk, pool)) in + self.storage().get_latest_resources().await.disks().iter() + { + disks.push(crate::params::InventoryDisk { identity: identity.clone(), variant: disk.variant(), slot: disk.slot(), - }) - .collect(); + }); + zpools.push(crate::params::InventoryZpool { + id: pool.name.id(), + total_size: ByteCount::try_from(pool.info.size())?, + }); + } Ok(Inventory { sled_id, @@ -1129,6 +1133,7 @@ impl SledAgent { usable_physical_ram: ByteCount::try_from(usable_physical_ram)?, reservoir_size, disks, + zpools, }) } }