diff --git a/Cargo.lock b/Cargo.lock index a667cacef7..709aaca3f2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7215,6 +7215,8 @@ dependencies = [ "hex", "hex-literal", "http", + "http-body-util", + "http-range", "hyper", "hyper-staticfile", "illumos-utils", @@ -7247,6 +7249,7 @@ dependencies = [ "propolis-mock-server", "propolis_api_types", "rand", + "range-requests", "rcgen", "repo-depot-api", "repo-depot-client", @@ -7286,6 +7289,7 @@ dependencies = [ "uuid", "walkdir", "zeroize", + "zip 2.1.3", "zone 0.3.0", ] @@ -10664,6 +10668,7 @@ version = "0.1.0" dependencies = [ "camino", "dropshot 0.13.0", + "http", "nexus-sled-agent-shared", "omicron-common", "omicron-uuid-kinds", diff --git a/nexus/test-utils/src/resource_helpers.rs b/nexus/test-utils/src/resource_helpers.rs index ce1fc87996..75ec158890 100644 --- a/nexus/test-utils/src/resource_helpers.rs +++ b/nexus/test-utils/src/resource_helpers.rs @@ -51,6 +51,7 @@ use omicron_test_utils::dev::poll::wait_for_condition; use omicron_test_utils::dev::poll::CondCheckError; use omicron_uuid_kinds::DatasetUuid; use omicron_uuid_kinds::GenericUuid; +use omicron_uuid_kinds::PhysicalDiskUuid; use omicron_uuid_kinds::SledUuid; use omicron_uuid_kinds::ZpoolUuid; use oxnet::Ipv4Net; @@ -1171,7 +1172,7 @@ impl<'a, N: NexusServer> DiskTest<'a, N> { pub async fn add_zpool_with_dataset(&mut self, sled_id: SledUuid) { self.add_zpool_with_dataset_ext( sled_id, - Uuid::new_v4(), + PhysicalDiskUuid::new_v4(), ZpoolUuid::new_v4(), DatasetUuid::new_v4(), Self::DEFAULT_ZPOOL_SIZE_GIB, @@ -1204,7 +1205,7 @@ impl<'a, N: NexusServer> DiskTest<'a, N> { pub async fn add_zpool_with_dataset_ext( &mut self, sled_id: SledUuid, - physical_disk_id: Uuid, + physical_disk_id: PhysicalDiskUuid, zpool_id: ZpoolUuid, dataset_id: DatasetUuid, gibibytes: u32, @@ -1227,7 +1228,7 @@ impl<'a, N: NexusServer> DiskTest<'a, N> { let physical_disk_request = nexus_types::internal_api::params::PhysicalDiskPutRequest { - id: physical_disk_id, + id: *physical_disk_id.as_untyped_uuid(), vendor: disk_identity.vendor.clone(), serial: disk_identity.serial.clone(), model: disk_identity.model.clone(), @@ -1239,7 +1240,7 @@ impl<'a, N: NexusServer> DiskTest<'a, N> { let zpool_request = nexus_types::internal_api::params::ZpoolPutRequest { id: zpool.id.into_untyped_uuid(), - physical_disk_id, + physical_disk_id: *physical_disk_id.as_untyped_uuid(), sled_id: sled_id.into_untyped_uuid(), }; diff --git a/openapi/sled-agent.json b/openapi/sled-agent.json index cf063e83dc..be20ea5549 100644 --- a/openapi/sled-agent.json +++ b/openapi/sled-agent.json @@ -736,6 +736,277 @@ } } }, + "/support-bundles/{zpool_id}/{dataset_id}": { + "get": { + "summary": "List all support bundles within a particular dataset", + "operationId": "support_bundle_list", + "parameters": [ + { + "in": "path", + "name": "dataset_id", + "description": "The dataset on which this support bundle was provisioned", + "required": true, + "schema": { + "$ref": "#/components/schemas/TypedUuidForDatasetKind" + } + }, + { + "in": "path", + "name": "zpool_id", + "description": "The zpool on which this support bundle was provisioned", + "required": true, + "schema": { + "$ref": "#/components/schemas/TypedUuidForZpoolKind" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "title": "Array_of_SupportBundleMetadata", + "type": "array", + "items": { + "$ref": "#/components/schemas/SupportBundleMetadata" + } + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/support-bundles/{zpool_id}/{dataset_id}/{support_bundle_id}": { + "get": { + "summary": "Fetch a support bundle from a particular dataset", + "operationId": "support_bundle_get", + "parameters": [ + { + "in": "path", + "name": "dataset_id", + "description": "The dataset on which this support bundle was provisioned", + "required": true, + "schema": { + "$ref": "#/components/schemas/TypedUuidForDatasetKind" + } + }, + { + "in": "path", + "name": "support_bundle_id", + "description": "The ID of the support bundle itself", + "required": true, + "schema": { + "$ref": "#/components/schemas/TypedUuidForSupportBundleKind" + } + }, + { + "in": "path", + "name": "zpool_id", + "description": "The zpool on which this support bundle was provisioned", + "required": true, + "schema": { + "$ref": "#/components/schemas/TypedUuidForZpoolKind" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SupportBundleGetQueryParams" + } + } + }, + "required": true + }, + "responses": { + "default": { + "description": "", + "content": { + "*/*": { + "schema": {} + } + } + } + } + }, + "post": { + "summary": "Create a support bundle within a particular dataset", + "operationId": "support_bundle_create", + "parameters": [ + { + "in": "path", + "name": "dataset_id", + "description": "The dataset on which this support bundle was provisioned", + "required": true, + "schema": { + "$ref": "#/components/schemas/TypedUuidForDatasetKind" + } + }, + { + "in": "path", + "name": "support_bundle_id", + "description": "The ID of the support bundle itself", + "required": true, + "schema": { + "$ref": "#/components/schemas/TypedUuidForSupportBundleKind" + } + }, + { + "in": "path", + "name": "zpool_id", + "description": "The zpool on which this support bundle was provisioned", + "required": true, + "schema": { + "$ref": "#/components/schemas/TypedUuidForZpoolKind" + } + }, + { + "in": "query", + "name": "hash", + "required": true, + "schema": { + "type": "string", + "format": "hex string (32 bytes)" + } + } + ], + "requestBody": { + "content": { + "application/octet-stream": { + "schema": { + "type": "string", + "format": "binary" + } + } + }, + "required": true + }, + "responses": { + "201": { + "description": "successful creation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SupportBundleMetadata" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "delete": { + "summary": "Delete a support bundle from a particular dataset", + "operationId": "support_bundle_delete", + "parameters": [ + { + "in": "path", + "name": "dataset_id", + "description": "The dataset on which this support bundle was provisioned", + "required": true, + "schema": { + "$ref": "#/components/schemas/TypedUuidForDatasetKind" + } + }, + { + "in": "path", + "name": "support_bundle_id", + "description": "The ID of the support bundle itself", + "required": true, + "schema": { + "$ref": "#/components/schemas/TypedUuidForSupportBundleKind" + } + }, + { + "in": "path", + "name": "zpool_id", + "description": "The zpool on which this support bundle was provisioned", + "required": true, + "schema": { + "$ref": "#/components/schemas/TypedUuidForZpoolKind" + } + } + ], + "responses": { + "204": { + "description": "successful deletion" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "head": { + "summary": "Fetch a support bundle from a particular dataset", + "operationId": "support_bundle_head", + "parameters": [ + { + "in": "path", + "name": "dataset_id", + "description": "The dataset on which this support bundle was provisioned", + "required": true, + "schema": { + "$ref": "#/components/schemas/TypedUuidForDatasetKind" + } + }, + { + "in": "path", + "name": "support_bundle_id", + "description": "The ID of the support bundle itself", + "required": true, + "schema": { + "$ref": "#/components/schemas/TypedUuidForSupportBundleKind" + } + }, + { + "in": "path", + "name": "zpool_id", + "description": "The zpool on which this support bundle was provisioned", + "required": true, + "schema": { + "$ref": "#/components/schemas/TypedUuidForZpoolKind" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SupportBundleGetQueryParams" + } + } + }, + "required": true + }, + "responses": { + "default": { + "description": "", + "content": { + "*/*": { + "schema": {} + } + } + } + } + } + }, "/switch-ports": { "post": { "operationId": "uplink_ensure", @@ -5339,6 +5610,95 @@ "format": "uint8", "minimum": 0 }, + "SupportBundleGetQueryParams": { + "description": "Query parameters for reading the support bundle", + "type": "object", + "properties": { + "query_type": { + "$ref": "#/components/schemas/SupportBundleQueryType" + } + }, + "required": [ + "query_type" + ] + }, + "SupportBundleMetadata": { + "description": "Metadata about a support bundle", + "type": "object", + "properties": { + "state": { + "$ref": "#/components/schemas/SupportBundleState" + }, + "support_bundle_id": { + "$ref": "#/components/schemas/TypedUuidForSupportBundleKind" + } + }, + "required": [ + "state", + "support_bundle_id" + ] + }, + "SupportBundleQueryType": { + "description": "Describes the type of access to the support bundle", + "oneOf": [ + { + "description": "Access the whole support bundle", + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "whole" + ] + } + }, + "required": [ + "type" + ] + }, + { + "description": "Access the names of all files within the support bundle", + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "index" + ] + } + }, + "required": [ + "type" + ] + }, + { + "description": "Access a specific file within the support bundle", + "type": "object", + "properties": { + "file_path": { + "type": "string" + }, + "type": { + "type": "string", + "enum": [ + "path" + ] + } + }, + "required": [ + "file_path", + "type" + ] + } + ] + }, + "SupportBundleState": { + "type": "string", + "enum": [ + "complete", + "incomplete" + ] + }, "SwitchLocation": { "description": "Identifies switch physical location", "oneOf": [ @@ -5469,6 +5829,10 @@ "type": "string", "format": "uuid" }, + "TypedUuidForSupportBundleKind": { + "type": "string", + "format": "uuid" + }, "TypedUuidForZpoolKind": { "type": "string", "format": "uuid" diff --git a/range-requests/src/lib.rs b/range-requests/src/lib.rs index ccd250d949..e6e6945e60 100644 --- a/range-requests/src/lib.rs +++ b/range-requests/src/lib.rs @@ -132,6 +132,11 @@ fn make_response_common( pub struct PotentialRange(Vec); impl PotentialRange { + /// Creates a new [PotentialRange] from raw bytes. + pub fn new(bytes: &[u8]) -> Self { + Self(Vec::from(bytes)) + } + /// Parses a single range request out of the range request. /// /// `len` is the total length of the document, for the range request being made. diff --git a/sled-agent/Cargo.toml b/sled-agent/Cargo.toml index 442ddef7e1..10b4ba1cdb 100644 --- a/sled-agent/Cargo.toml +++ b/sled-agent/Cargo.toml @@ -36,6 +36,9 @@ futures.workspace = true glob.workspace = true hex.workspace = true http.workspace = true +http-body-util.workspace = true +http-range.workspace = true +hyper.workspace = true hyper-staticfile.workspace = true gateway-client.workspace = true illumos-utils.workspace = true @@ -64,6 +67,7 @@ propolis_api_types.workspace = true propolis-client.workspace = true propolis-mock-server.workspace = true # Only used by the simulated sled agent rand = { workspace = true, features = ["getrandom"] } +range-requests.workspace = true repo-depot-api.workspace = true repo-depot-client.workspace = true reqwest = { workspace = true, features = ["rustls-tls", "stream"] } @@ -91,6 +95,8 @@ tar.workspace = true thiserror.workspace = true tofino.workspace = true tokio = { workspace = true, features = ["full"] } +tokio-stream.workspace = true +tokio-util.workspace = true toml.workspace = true usdt.workspace = true uuid.workspace = true @@ -100,6 +106,7 @@ static_assertions.workspace = true omicron-workspace-hack.workspace = true slog-error-chain.workspace = true walkdir.workspace = true +zip.workspace = true [target.'cfg(target_os = "illumos")'.dependencies] opte-ioctl.workspace = true diff --git a/sled-agent/api/Cargo.toml b/sled-agent/api/Cargo.toml index 046f17574b..95e9552f53 100644 --- a/sled-agent/api/Cargo.toml +++ b/sled-agent/api/Cargo.toml @@ -10,6 +10,7 @@ workspace = true [dependencies] camino.workspace = true dropshot.workspace = true +http.workspace = true nexus-sled-agent-shared.workspace = true omicron-common.workspace = true omicron-uuid-kinds.workspace = true diff --git a/sled-agent/api/src/lib.rs b/sled-agent/api/src/lib.rs index 50fdcf9139..634640079a 100644 --- a/sled-agent/api/src/lib.rs +++ b/sled-agent/api/src/lib.rs @@ -7,7 +7,7 @@ use std::time::Duration; use camino::Utf8PathBuf; use dropshot::{ - FreeformBody, HttpError, HttpResponseAccepted, HttpResponseCreated, + Body, FreeformBody, HttpError, HttpResponseAccepted, HttpResponseCreated, HttpResponseDeleted, HttpResponseHeaders, HttpResponseOk, HttpResponseUpdatedNoContent, Path, Query, RequestContext, StreamingBody, TypedBody, @@ -29,7 +29,9 @@ use omicron_common::{ }, update::ArtifactHash, }; -use omicron_uuid_kinds::{PropolisUuid, ZpoolUuid}; +use omicron_uuid_kinds::{ + DatasetUuid, PropolisUuid, SupportBundleUuid, ZpoolUuid, +}; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; use sled_agent_types::{ @@ -157,6 +159,60 @@ pub trait SledAgentApi { rqctx: RequestContext, ) -> Result>, HttpError>; + /// List all support bundles within a particular dataset + #[endpoint { + method = GET, + path = "/support-bundles/{zpool_id}/{dataset_id}" + }] + async fn support_bundle_list( + rqctx: RequestContext, + path_params: Path, + ) -> Result>, HttpError>; + + /// Create a support bundle within a particular dataset + #[endpoint { + method = POST, + path = "/support-bundles/{zpool_id}/{dataset_id}/{support_bundle_id}" + }] + async fn support_bundle_create( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + body: StreamingBody, + ) -> Result, HttpError>; + + /// Fetch a support bundle from a particular dataset + #[endpoint { + method = GET, + path = "/support-bundles/{zpool_id}/{dataset_id}/{support_bundle_id}" + }] + async fn support_bundle_get( + rqctx: RequestContext, + path_params: Path, + body: TypedBody, + ) -> Result, HttpError>; + + /// Fetch a support bundle from a particular dataset + #[endpoint { + method = HEAD, + path = "/support-bundles/{zpool_id}/{dataset_id}/{support_bundle_id}" + }] + async fn support_bundle_head( + rqctx: RequestContext, + path_params: Path, + body: TypedBody, + ) -> Result, HttpError>; + + /// Delete a support bundle from a particular dataset + #[endpoint { + method = DELETE, + path = "/support-bundles/{zpool_id}/{dataset_id}/{support_bundle_id}" + }] + async fn support_bundle_delete( + rqctx: RequestContext, + path_params: Path, + ) -> Result; + #[endpoint { method = PUT, path = "/omicron-zones", @@ -605,6 +661,79 @@ pub struct VmmPathParam { pub propolis_id: PropolisUuid, } +/// Path parameters for Support Bundle requests (sled agent API) +#[derive(Deserialize, JsonSchema)] +pub struct SupportBundleListPathParam { + /// The zpool on which this support bundle was provisioned + pub zpool_id: ZpoolUuid, + + /// The dataset on which this support bundle was provisioned + pub dataset_id: DatasetUuid, +} + +/// Path parameters for Support Bundle requests (sled agent API) +#[derive(Deserialize, JsonSchema)] +pub struct SupportBundlePathParam { + /// The zpool on which this support bundle was provisioned + pub zpool_id: ZpoolUuid, + + /// The dataset on which this support bundle was provisioned + pub dataset_id: DatasetUuid, + + /// The ID of the support bundle itself + pub support_bundle_id: SupportBundleUuid, +} + +/// Path parameters for Support Bundle requests (sled agent API) +#[derive(Deserialize, JsonSchema)] +pub struct SupportBundleFilePathParam { + #[serde(flatten)] + pub parent: SupportBundlePathParam, +} + +/// Metadata about a support bundle +#[derive(Deserialize, Serialize, JsonSchema)] +pub struct SupportBundleCreateQueryParams { + pub hash: ArtifactHash, +} + +#[derive(Deserialize, Serialize, JsonSchema)] +pub struct SupportBundleGetHeaders { + range: String, +} + +/// Query parameters for reading the support bundle +#[derive(Deserialize, Serialize, JsonSchema)] +pub struct SupportBundleGetQueryParams { + pub query_type: SupportBundleQueryType, +} + +/// Describes the type of access to the support bundle +#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] +#[serde(tag = "type", rename_all = "snake_case")] +pub enum SupportBundleQueryType { + /// Access the whole support bundle + Whole, + /// Access the names of all files within the support bundle + Index, + /// Access a specific file within the support bundle + Path { file_path: String }, +} + +#[derive(Deserialize, Debug, Serialize, JsonSchema, PartialEq)] +#[serde(rename_all = "snake_case")] +pub enum SupportBundleState { + Complete, + Incomplete, +} + +/// Metadata about a support bundle +#[derive(Debug, Deserialize, Serialize, JsonSchema)] +pub struct SupportBundleMetadata { + pub support_bundle_id: SupportBundleUuid, + pub state: SupportBundleState, +} + /// Path parameters for Disk requests (sled agent API) #[derive(Deserialize, JsonSchema)] pub struct DiskPathParam { diff --git a/sled-agent/src/http_entrypoints.rs b/sled-agent/src/http_entrypoints.rs index 50ef3320ed..844e13151a 100644 --- a/sled-agent/src/http_entrypoints.rs +++ b/sled-agent/src/http_entrypoints.rs @@ -6,7 +6,7 @@ use super::sled_agent::SledAgent; use crate::sled_agent::Error as SledAgentError; -use crate::support_bundle::SupportBundleCommandHttpOutput; +use crate::support_bundle::queries::SupportBundleCommandHttpOutput; use crate::zone_bundle::BundleError; use bootstore::schemes::v0::NetworkConfig; use camino::Utf8PathBuf; @@ -33,6 +33,7 @@ use omicron_common::disk::{ DisksManagementResult, M2Slot, OmicronPhysicalDisksConfig, }; use omicron_common::update::ArtifactHash; +use range_requests::RequestContextEx; use sled_agent_api::*; use sled_agent_types::boot_disk::{ BootDiskOsWriteStatus, BootDiskPathParams, BootDiskUpdatePathParams, @@ -225,6 +226,96 @@ impl SledAgentApi for SledAgentImpl { .map_err(HttpError::from) } + async fn support_bundle_list( + rqctx: RequestContext, + path_params: Path, + ) -> Result>, HttpError> { + let sa = rqctx.context(); + + let SupportBundleListPathParam { zpool_id, dataset_id } = + path_params.into_inner(); + + let bundles = + sa.as_support_bundle_storage().list(zpool_id, dataset_id).await?; + + Ok(HttpResponseOk(bundles)) + } + + async fn support_bundle_create( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + body: StreamingBody, + ) -> Result, HttpError> { + let sa = rqctx.context(); + + let SupportBundlePathParam { zpool_id, dataset_id, support_bundle_id } = + path_params.into_inner(); + let SupportBundleCreateQueryParams { hash } = query_params.into_inner(); + + let metadata = sa + .as_support_bundle_storage() + .create( + zpool_id, + dataset_id, + support_bundle_id, + hash, + body.into_stream(), + ) + .await?; + + Ok(HttpResponseCreated(metadata)) + } + + async fn support_bundle_get( + rqctx: RequestContext, + path_params: Path, + body: TypedBody, + ) -> Result, HttpError> { + let sa = rqctx.context(); + let SupportBundlePathParam { zpool_id, dataset_id, support_bundle_id } = + path_params.into_inner(); + + let range = rqctx.range(); + let query = body.into_inner().query_type; + Ok(sa + .as_support_bundle_storage() + .get(zpool_id, dataset_id, support_bundle_id, range, query) + .await?) + } + + async fn support_bundle_head( + rqctx: RequestContext, + path_params: Path, + body: TypedBody, + ) -> Result, HttpError> { + let sa = rqctx.context(); + let SupportBundlePathParam { zpool_id, dataset_id, support_bundle_id } = + path_params.into_inner(); + + let range = rqctx.range(); + let query = body.into_inner().query_type; + Ok(sa + .as_support_bundle_storage() + .head(zpool_id, dataset_id, support_bundle_id, range, query) + .await?) + } + + async fn support_bundle_delete( + rqctx: RequestContext, + path_params: Path, + ) -> Result { + let sa = rqctx.context(); + + let SupportBundlePathParam { zpool_id, dataset_id, support_bundle_id } = + path_params.into_inner(); + + sa.as_support_bundle_storage() + .delete(zpool_id, dataset_id, support_bundle_id) + .await?; + Ok(HttpResponseDeleted()) + } + async fn datasets_put( rqctx: RequestContext, body: TypedBody, diff --git a/sled-agent/src/sim/http_entrypoints.rs b/sled-agent/src/sim/http_entrypoints.rs index 504f4a60dc..2d23f9150b 100644 --- a/sled-agent/src/sim/http_entrypoints.rs +++ b/sled-agent/src/sim/http_entrypoints.rs @@ -402,6 +402,101 @@ impl SledAgentApi for SledAgentSimImpl { Ok(HttpResponseUpdatedNoContent()) } + async fn support_bundle_list( + rqctx: RequestContext, + path_params: Path, + ) -> Result>, HttpError> { + let sa = rqctx.context(); + + let SupportBundleListPathParam { zpool_id, dataset_id } = + path_params.into_inner(); + + let bundles = sa.support_bundle_list(zpool_id, dataset_id).await?; + Ok(HttpResponseOk(bundles)) + } + + async fn support_bundle_create( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + _body: StreamingBody, + ) -> Result, HttpError> { + let sa = rqctx.context(); + + let SupportBundlePathParam { zpool_id, dataset_id, support_bundle_id } = + path_params.into_inner(); + let SupportBundleCreateQueryParams { hash } = query_params.into_inner(); + + Ok(HttpResponseCreated( + sa.support_bundle_create( + zpool_id, + dataset_id, + support_bundle_id, + hash, + ) + .await?, + )) + } + + async fn support_bundle_get( + rqctx: RequestContext, + path_params: Path, + _body: TypedBody, + ) -> Result, HttpError> { + let sa = rqctx.context(); + let SupportBundlePathParam { zpool_id, dataset_id, support_bundle_id } = + path_params.into_inner(); + + sa.support_bundle_get(zpool_id, dataset_id, support_bundle_id).await?; + + Ok(http::Response::builder() + .status(http::StatusCode::OK) + .header(http::header::CONTENT_TYPE, "text/html") + .body(dropshot::Body::with_content( + "simulated support bundle; do not eat", + )) + .unwrap()) + } + + async fn support_bundle_head( + rqctx: RequestContext, + path_params: Path, + _body: TypedBody, + ) -> Result, HttpError> { + let sa = rqctx.context(); + let SupportBundlePathParam { zpool_id, dataset_id, support_bundle_id } = + path_params.into_inner(); + + sa.support_bundle_get(zpool_id, dataset_id, support_bundle_id).await?; + + let fictional_length = 10000; + + Ok(http::Response::builder() + .status(http::StatusCode::OK) + .header(http::header::CONTENT_TYPE, "text/html") + .header(hyper::header::ACCEPT_RANGES, "bytes") + .header(hyper::header::CONTENT_LENGTH, fictional_length) + .body(dropshot::Body::empty()) + .unwrap()) + } + + async fn support_bundle_delete( + rqctx: RequestContext, + path_params: Path, + ) -> Result { + let sa = rqctx.context(); + + let SupportBundlePathParam { zpool_id, dataset_id, support_bundle_id } = + path_params.into_inner(); + + sa.support_bundle_delete(zpool_id, dataset_id, support_bundle_id) + .await?; + + Ok(HttpResponseDeleted()) + } + + // --- Unimplemented endpoints --- + async fn set_eip_gateways( rqctx: RequestContext, _body: TypedBody, @@ -411,8 +506,6 @@ impl SledAgentApi for SledAgentSimImpl { Ok(HttpResponseUpdatedNoContent()) } - // --- Unimplemented endpoints --- - async fn zone_bundle_list_all( _rqctx: RequestContext, _query: Query, diff --git a/sled-agent/src/sim/server.rs b/sled-agent/src/sim/server.rs index a7cf8bb382..6dc3a12ee1 100644 --- a/sled-agent/src/sim/server.rs +++ b/sled-agent/src/sim/server.rs @@ -45,6 +45,7 @@ use omicron_common::FileKv; use omicron_uuid_kinds::DatasetUuid; use omicron_uuid_kinds::GenericUuid; use omicron_uuid_kinds::OmicronZoneUuid; +use omicron_uuid_kinds::PhysicalDiskUuid; use omicron_uuid_kinds::ZpoolUuid; use oxnet::Ipv6Net; use sled_agent_types::rack_init::RecoverySiloConfig; @@ -179,7 +180,7 @@ impl Server { // Crucible dataset for each. This emulates the setup we expect to have // on the physical rack. for zpool in &config.storage.zpools { - let physical_disk_id = Uuid::new_v4(); + let physical_disk_id = PhysicalDiskUuid::new_v4(); let zpool_id = ZpoolUuid::new_v4(); let vendor = "synthetic-vendor".to_string(); let serial = format!("synthetic-serial-{zpool_id}"); @@ -517,7 +518,7 @@ pub async fn run_standalone_server( for zpool in &zpools { let zpool_id = ZpoolUuid::from_untyped_uuid(zpool.id); for (dataset_id, address) in - server.sled_agent.get_datasets(zpool_id).await + server.sled_agent.get_crucible_datasets(zpool_id).await { datasets.push(NexusTypes::DatasetCreateRequest { zpool_id: zpool.id, diff --git a/sled-agent/src/sim/sled_agent.rs b/sled-agent/src/sim/sled_agent.rs index a5c094ec21..7b797eb45a 100644 --- a/sled-agent/src/sim/sled_agent.rs +++ b/sled-agent/src/sim/sled_agent.rs @@ -38,15 +38,17 @@ use omicron_common::disk::{ DatasetsConfig, DatasetsManagementResult, DiskIdentity, DiskVariant, DisksManagementResult, OmicronPhysicalDisksConfig, }; -use omicron_uuid_kinds::DatasetUuid; -use omicron_uuid_kinds::GenericUuid; -use omicron_uuid_kinds::PropolisUuid; -use omicron_uuid_kinds::SledUuid; -use omicron_uuid_kinds::ZpoolUuid; +use omicron_common::update::ArtifactHash; +use omicron_uuid_kinds::{ + DatasetUuid, GenericUuid, PhysicalDiskUuid, PropolisUuid, SledUuid, + SupportBundleUuid, ZpoolUuid, +}; use oxnet::Ipv6Net; use propolis_client::{ types::VolumeConstructionRequest, Client as PropolisClient, }; +use sled_agent_api::SupportBundleMetadata; +use sled_agent_api::SupportBundleState; use sled_agent_types::disk::DiskStateRequested; use sled_agent_types::early_networking::{ EarlyNetworkConfig, EarlyNetworkConfigBody, @@ -592,7 +594,7 @@ impl SledAgent { /// Adds a Physical Disk to the simulated sled agent. pub async fn create_external_physical_disk( &self, - id: Uuid, + id: PhysicalDiskUuid, identity: DiskIdentity, ) { let variant = DiskVariant::U2; @@ -615,18 +617,18 @@ impl SledAgent { self.storage.lock().await.get_all_zpools() } - pub async fn get_datasets( + pub async fn get_crucible_datasets( &self, zpool_id: ZpoolUuid, ) -> Vec<(DatasetUuid, SocketAddr)> { - self.storage.lock().await.get_all_datasets(zpool_id) + self.storage.lock().await.get_all_crucible_datasets(zpool_id) } /// Adds a Zpool to the simulated sled agent. pub async fn create_zpool( &self, id: ZpoolUuid, - physical_disk_id: Uuid, + physical_disk_id: PhysicalDiskUuid, size: u64, ) { self.storage @@ -636,13 +638,30 @@ impl SledAgent { .await; } + /// Adds a debug dataset within a zpool + pub async fn create_debug_dataset( + &self, + zpool_id: ZpoolUuid, + dataset_id: DatasetUuid, + ) { + self.storage + .lock() + .await + .insert_debug_dataset(zpool_id, dataset_id) + .await + } + /// Adds a Crucible Dataset within a zpool. pub async fn create_crucible_dataset( &self, zpool_id: ZpoolUuid, dataset_id: DatasetUuid, ) -> SocketAddr { - self.storage.lock().await.insert_dataset(zpool_id, dataset_id).await + self.storage + .lock() + .await + .insert_crucible_dataset(zpool_id, dataset_id) + .await } /// Returns a crucible dataset within a particular zpool. @@ -651,7 +670,11 @@ impl SledAgent { zpool_id: ZpoolUuid, dataset_id: DatasetUuid, ) -> Arc { - self.storage.lock().await.get_dataset(zpool_id, dataset_id).await + self.storage + .lock() + .await + .get_crucible_dataset(zpool_id, dataset_id) + .await } /// Issue a snapshot request for a Crucible disk attached to an instance. @@ -896,6 +919,68 @@ impl SledAgent { }) } + pub async fn support_bundle_list( + &self, + zpool_id: ZpoolUuid, + dataset_id: DatasetUuid, + ) -> Result, HttpError> { + self.storage + .lock() + .await + .support_bundle_list(zpool_id, dataset_id) + .await + } + + pub async fn support_bundle_create( + &self, + zpool_id: ZpoolUuid, + dataset_id: DatasetUuid, + support_bundle_id: SupportBundleUuid, + expected_hash: ArtifactHash, + ) -> Result { + self.storage + .lock() + .await + .support_bundle_create( + zpool_id, + dataset_id, + support_bundle_id, + expected_hash, + ) + .await?; + + Ok(SupportBundleMetadata { + support_bundle_id, + state: SupportBundleState::Complete, + }) + } + + pub async fn support_bundle_get( + &self, + zpool_id: ZpoolUuid, + dataset_id: DatasetUuid, + support_bundle_id: SupportBundleUuid, + ) -> Result<(), HttpError> { + self.storage + .lock() + .await + .support_bundle_exists(zpool_id, dataset_id, support_bundle_id) + .await + } + + pub async fn support_bundle_delete( + &self, + zpool_id: ZpoolUuid, + dataset_id: DatasetUuid, + support_bundle_id: SupportBundleUuid, + ) -> Result<(), HttpError> { + self.storage + .lock() + .await + .support_bundle_delete(zpool_id, dataset_id, support_bundle_id) + .await + } + pub async fn datasets_ensure( &self, config: DatasetsConfig, diff --git a/sled-agent/src/sim/storage.rs b/sled-agent/src/sim/storage.rs index 344c3d730c..fe0c4c71a0 100644 --- a/sled-agent/src/sim/storage.rs +++ b/sled-agent/src/sim/storage.rs @@ -28,13 +28,18 @@ use omicron_common::disk::DiskManagementStatus; use omicron_common::disk::DiskVariant; use omicron_common::disk::DisksManagementResult; use omicron_common::disk::OmicronPhysicalDisksConfig; +use omicron_common::update::ArtifactHash; use omicron_uuid_kinds::DatasetUuid; use omicron_uuid_kinds::GenericUuid; use omicron_uuid_kinds::OmicronZoneUuid; +use omicron_uuid_kinds::PhysicalDiskUuid; use omicron_uuid_kinds::PropolisUuid; +use omicron_uuid_kinds::SupportBundleUuid; use omicron_uuid_kinds::ZpoolUuid; use propolis_client::types::VolumeConstructionRequest; use serde::Serialize; +use sled_agent_api::SupportBundleMetadata; +use sled_agent_api::SupportBundleState; use slog::Logger; use std::collections::HashMap; use std::collections::HashSet; @@ -809,25 +814,44 @@ impl CrucibleServer { } } +#[derive(Default)] +pub(crate) struct DebugData { + bundles: HashMap, +} + pub(crate) struct PhysicalDisk { pub(crate) identity: DiskIdentity, pub(crate) variant: DiskVariant, pub(crate) slot: i64, } +/// Describes data being simulated within a dataset. +pub(crate) enum DatasetContents { + Crucible(CrucibleServer), + Debug(DebugData), +} + pub(crate) struct Zpool { id: ZpoolUuid, - physical_disk_id: Uuid, + physical_disk_id: PhysicalDiskUuid, total_size: u64, - datasets: HashMap, + datasets: HashMap, } impl Zpool { - fn new(id: ZpoolUuid, physical_disk_id: Uuid, total_size: u64) -> Self { + fn new( + id: ZpoolUuid, + physical_disk_id: PhysicalDiskUuid, + total_size: u64, + ) -> Self { Zpool { id, physical_disk_id, total_size, datasets: HashMap::new() } } - fn insert_dataset( + fn insert_debug_dataset(&mut self, id: DatasetUuid) { + self.datasets.insert(id, DatasetContents::Debug(DebugData::default())); + } + + fn insert_crucible_dataset( &mut self, log: &Logger, id: DatasetUuid, @@ -837,11 +861,21 @@ impl Zpool { ) -> &CrucibleServer { self.datasets.insert( id, - CrucibleServer::new(log, crucible_ip, start_port, end_port), + DatasetContents::Crucible(CrucibleServer::new( + log, + crucible_ip, + start_port, + end_port, + )), ); - self.datasets + let DatasetContents::Crucible(crucible) = self + .datasets .get(&id) .expect("Failed to get the dataset we just inserted") + else { + panic!("Should have just inserted Crucible dataset"); + }; + crucible } pub fn total_size(&self) -> u64 { @@ -853,10 +887,12 @@ impl Zpool { region_id: Uuid, ) -> Option> { for dataset in self.datasets.values() { - for region in &dataset.data().list().await { - let id = Uuid::from_str(®ion.id.0).unwrap(); - if id == region_id { - return Some(dataset.data()); + if let DatasetContents::Crucible(dataset) = dataset { + for region in &dataset.data().list().await { + let id = Uuid::from_str(®ion.id.0).unwrap(); + if id == region_id { + return Some(dataset.data()); + } } } } @@ -868,13 +904,15 @@ impl Zpool { let mut regions = vec![]; for dataset in self.datasets.values() { - for region in &dataset.data().list().await { - if region.state == State::Destroyed { - continue; - } + if let DatasetContents::Crucible(dataset) = dataset { + for region in &dataset.data().list().await { + if region.state == State::Destroyed { + continue; + } - if port == region.port_number { - regions.push(region.clone()); + if port == region.port_number { + regions.push(region.clone()); + } } } } @@ -896,7 +934,7 @@ pub struct Storage { log: Logger, config: Option, dataset_config: Option, - physical_disks: HashMap, + physical_disks: HashMap, next_disk_slot: i64, zpools: HashMap, crucible_ip: IpAddr, @@ -919,7 +957,7 @@ impl Storage { } /// Returns an immutable reference to all (currently known) physical disks - pub fn physical_disks(&self) -> &HashMap { + pub fn physical_disks(&self) -> &HashMap { &self.physical_disks } @@ -1003,7 +1041,7 @@ impl Storage { pub async fn insert_physical_disk( &mut self, - id: Uuid, + id: PhysicalDiskUuid, identity: DiskIdentity, variant: DiskVariant, ) { @@ -1017,7 +1055,7 @@ impl Storage { pub async fn insert_zpool( &mut self, zpool_id: ZpoolUuid, - disk_id: Uuid, + disk_id: PhysicalDiskUuid, size: u64, ) { // Update our local data @@ -1029,8 +1067,143 @@ impl Storage { &self.zpools } - /// Adds a Dataset to the sled's simulated storage. - pub async fn insert_dataset( + fn get_debug_dataset( + &self, + zpool_id: ZpoolUuid, + dataset_id: DatasetUuid, + ) -> Result<&DebugData, HttpError> { + let Some(zpool) = self.zpools.get(&zpool_id) else { + return Err(HttpError::for_not_found( + None, + format!("zpool does not exist {zpool_id}"), + )); + }; + let Some(dataset) = zpool.datasets.get(&dataset_id) else { + return Err(HttpError::for_not_found( + None, + format!("dataset does not exist {dataset_id}"), + )); + }; + + let DatasetContents::Debug(debug) = dataset else { + return Err(HttpError::for_bad_request( + None, + format!("Not a debug dataset: {zpool_id} / {dataset_id}"), + )); + }; + + Ok(debug) + } + + fn get_debug_dataset_mut( + &mut self, + zpool_id: ZpoolUuid, + dataset_id: DatasetUuid, + ) -> Result<&mut DebugData, HttpError> { + let Some(zpool) = self.zpools.get_mut(&zpool_id) else { + return Err(HttpError::for_not_found( + None, + format!("zpool does not exist {zpool_id}"), + )); + }; + let Some(dataset) = zpool.datasets.get_mut(&dataset_id) else { + return Err(HttpError::for_not_found( + None, + format!("dataset does not exist {dataset_id}"), + )); + }; + + let DatasetContents::Debug(debug) = dataset else { + return Err(HttpError::for_bad_request( + None, + format!("Not a debug dataset: {zpool_id} / {dataset_id}"), + )); + }; + + Ok(debug) + } + + pub async fn support_bundle_list( + &self, + zpool_id: ZpoolUuid, + dataset_id: DatasetUuid, + ) -> Result, HttpError> { + let debug = self.get_debug_dataset(zpool_id, dataset_id)?; + + Ok(debug + .bundles + .keys() + .map(|id| SupportBundleMetadata { + support_bundle_id: *id, + state: SupportBundleState::Complete, + }) + .collect()) + } + + pub async fn support_bundle_create( + &mut self, + zpool_id: ZpoolUuid, + dataset_id: DatasetUuid, + support_bundle_id: SupportBundleUuid, + hash: ArtifactHash, + ) -> Result<(), HttpError> { + let debug = self.get_debug_dataset_mut(zpool_id, dataset_id)?; + + // This is for the simulated server, so we totally ignore the "contents" + // of the bundle and just accept that it should exist. + debug.bundles.insert(support_bundle_id, hash); + + Ok(()) + } + + pub async fn support_bundle_exists( + &self, + zpool_id: ZpoolUuid, + dataset_id: DatasetUuid, + support_bundle_id: SupportBundleUuid, + ) -> Result<(), HttpError> { + let debug = self.get_debug_dataset(zpool_id, dataset_id)?; + + if !debug.bundles.contains_key(&support_bundle_id) { + return Err(HttpError::for_not_found( + None, + format!("Support bundle not found {support_bundle_id}"), + )); + } + Ok(()) + } + + pub async fn support_bundle_delete( + &mut self, + zpool_id: ZpoolUuid, + dataset_id: DatasetUuid, + support_bundle_id: SupportBundleUuid, + ) -> Result<(), HttpError> { + let debug = self.get_debug_dataset_mut(zpool_id, dataset_id)?; + + if debug.bundles.remove(&support_bundle_id).is_none() { + return Err(HttpError::for_not_found( + None, + format!("Support bundle not found {support_bundle_id}"), + )); + } + Ok(()) + } + + /// Adds a debug dataset to the sled's simulated storage + pub async fn insert_debug_dataset( + &mut self, + zpool_id: ZpoolUuid, + dataset_id: DatasetUuid, + ) { + self.zpools + .get_mut(&zpool_id) + .expect("Zpool does not exist") + .insert_debug_dataset(dataset_id); + } + + /// Adds a Crucible dataset to the sled's simulated storage. + pub async fn insert_crucible_dataset( &mut self, zpool_id: ZpoolUuid, dataset_id: DatasetUuid, @@ -1040,7 +1213,7 @@ impl Storage { .zpools .get_mut(&zpool_id) .expect("Zpool does not exist") - .insert_dataset( + .insert_crucible_dataset( &self.log, dataset_id, self.crucible_ip, @@ -1069,7 +1242,7 @@ impl Storage { }; nexus_client::types::PhysicalDiskPutRequest { - id: *id, + id: *id.as_untyped_uuid(), vendor: disk.identity.vendor.clone(), serial: disk.identity.serial.clone(), model: disk.identity.model.clone(), @@ -1086,12 +1259,12 @@ impl Storage { .map(|pool| nexus_client::types::ZpoolPutRequest { id: pool.id.into_untyped_uuid(), sled_id: self.sled_id, - physical_disk_id: pool.physical_disk_id, + physical_disk_id: *pool.physical_disk_id.as_untyped_uuid(), }) .collect() } - pub fn get_all_datasets( + pub fn get_all_crucible_datasets( &self, zpool_id: ZpoolUuid, ) -> Vec<(DatasetUuid, SocketAddr)> { @@ -1100,7 +1273,12 @@ impl Storage { zpool .datasets .iter() - .map(|(id, server)| (*id, server.address())) + .filter_map(|(id, dataset)| match dataset { + DatasetContents::Crucible(server) => { + Some((*id, server.address())) + } + _ => None, + }) .collect() } @@ -1108,15 +1286,24 @@ impl Storage { &self, zpool_id: ZpoolUuid, dataset_id: DatasetUuid, - ) -> Arc { + ) -> &DatasetContents { self.zpools .get(&zpool_id) .expect("Zpool does not exist") .datasets .get(&dataset_id) .expect("Dataset does not exist") - .data - .clone() + } + + pub async fn get_crucible_dataset( + &self, + zpool_id: ZpoolUuid, + dataset_id: DatasetUuid, + ) -> Arc { + match self.get_dataset(zpool_id, dataset_id).await { + DatasetContents::Crucible(crucible) => crucible.data.clone(), + _ => panic!("{zpool_id} / {dataset_id} is not a crucible dataset"), + } } pub async fn get_dataset_for_region( diff --git a/sled-agent/src/sled_agent.rs b/sled-agent/src/sled_agent.rs index 80c0cdda21..63c646d4d4 100644 --- a/sled-agent/src/sled_agent.rs +++ b/sled-agent/src/sled_agent.rs @@ -18,11 +18,15 @@ use crate::nexus::{ use crate::probe_manager::ProbeManager; use crate::services::{self, ServiceManager}; use crate::storage_monitor::StorageMonitorHandle; -use crate::support_bundle::{SupportBundleCmdError, SupportBundleCmdOutput}; +use crate::support_bundle::queries::{ + dladm_info, ipadm_info, zoneadm_info, SupportBundleCmdError, + SupportBundleCmdOutput, +}; +use crate::support_bundle::storage::SupportBundleManager; use crate::updates::{ConfigUpdates, UpdateManager}; use crate::vmm_reservoir::{ReservoirMode, VmmReservoirManager}; +use crate::zone_bundle; use crate::zone_bundle::BundleError; -use crate::{support_bundle, zone_bundle}; use bootstore::schemes::v0 as bootstore; use camino::Utf8PathBuf; use derive_more::From; @@ -159,6 +163,9 @@ pub enum Error { #[error("Failed to deserialize early network config: {0}")] EarlyNetworkDeserialize(serde_json::Error), + #[error("Support bundle error: {0}")] + SupportBundle(String), + #[error("Zone bundle error: {0}")] ZoneBundle(#[from] BundleError), @@ -381,7 +388,7 @@ impl SledAgentInner { #[derive(Clone)] pub struct SledAgent { inner: Arc, - log: Logger, + pub(crate) log: Logger, sprockets: SprocketsConfig, } @@ -691,6 +698,11 @@ impl SledAgent { .unwrap(); // we retry forever, so this can't fail } + /// Accesses the [SupportBundleManager] API. + pub(crate) fn as_support_bundle_storage(&self) -> SupportBundleManager<'_> { + SupportBundleManager::new(&self.log, self.storage()) + } + pub(crate) fn switch_zone_underlay_info( &self, ) -> (Ipv6Addr, Option<&RackNetworkConfig>) { @@ -1336,19 +1348,19 @@ impl SledAgent { pub(crate) async fn support_zoneadm_info( &self, ) -> Result { - support_bundle::zoneadm_info().await + zoneadm_info().await } pub(crate) async fn support_ipadm_info( &self, ) -> Vec> { - support_bundle::ipadm_info().await + ipadm_info().await } pub(crate) async fn support_dladm_info( &self, ) -> Vec> { - support_bundle::dladm_info().await + dladm_info().await } } diff --git a/sled-agent/src/support_bundle/mod.rs b/sled-agent/src/support_bundle/mod.rs new file mode 100644 index 0000000000..314edfaec8 --- /dev/null +++ b/sled-agent/src/support_bundle/mod.rs @@ -0,0 +1,6 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +pub mod queries; +pub mod storage; diff --git a/sled-agent/src/support_bundle.rs b/sled-agent/src/support_bundle/queries.rs similarity index 99% rename from sled-agent/src/support_bundle.rs rename to sled-agent/src/support_bundle/queries.rs index 588af5fdea..2313d9e08d 100644 --- a/sled-agent/src/support_bundle.rs +++ b/sled-agent/src/support_bundle/queries.rs @@ -235,10 +235,9 @@ pub async fn dladm_info( #[cfg(test)] mod test { + use super::*; use std::{process::Command, time::Duration}; - use crate::support_bundle::*; - #[tokio::test] async fn test_long_running_command_is_aborted() { let mut command = Command::new("sleep"); diff --git a/sled-agent/src/support_bundle/storage.rs b/sled-agent/src/support_bundle/storage.rs new file mode 100644 index 0000000000..e51f35e146 --- /dev/null +++ b/sled-agent/src/support_bundle/storage.rs @@ -0,0 +1,1557 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Management of and access to Support Bundles + +use bytes::Bytes; +use camino::Utf8Path; +use dropshot::Body; +use dropshot::HttpError; +use futures::Stream; +use futures::StreamExt; +use omicron_common::api::external::Error as ExternalError; +use omicron_common::disk::CompressionAlgorithm; +use omicron_common::disk::DatasetConfig; +use omicron_common::disk::SharedDatasetConfig; +use omicron_common::update::ArtifactHash; +use omicron_uuid_kinds::DatasetUuid; +use omicron_uuid_kinds::SupportBundleUuid; +use omicron_uuid_kinds::ZpoolUuid; +use rand::distributions::Alphanumeric; +use rand::{thread_rng, Rng}; +use range_requests::PotentialRange; +use range_requests::SingleRange; +use sha2::{Digest, Sha256}; +use sled_agent_api::*; +use sled_storage::manager::NestedDatasetConfig; +use sled_storage::manager::NestedDatasetListOptions; +use sled_storage::manager::NestedDatasetLocation; +use sled_storage::manager::StorageHandle; +use slog::Logger; +use slog_error_chain::InlineErrorChain; +use std::io::Write; +use tokio::io::AsyncReadExt; +use tokio::io::AsyncSeekExt; +use tokio::io::AsyncWriteExt; +use tokio_util::io::ReaderStream; +use zip::result::ZipError; + +#[derive(thiserror::Error, Debug)] +pub enum Error { + #[error(transparent)] + HttpError(#[from] HttpError), + + #[error("Hash mismatch accessing bundle")] + HashMismatch, + + #[error("Not a file")] + NotAFile, + + #[error("Dataset not found")] + DatasetNotFound, + + #[error("Dataset exists, but has an invalid configuration: (wanted {wanted}, saw {actual})")] + DatasetExistsBadConfig { wanted: DatasetUuid, actual: DatasetUuid }, + + #[error("Dataset exists, but appears on the wrong zpool (wanted {wanted}, saw {actual})")] + DatasetExistsOnWrongZpool { wanted: ZpoolUuid, actual: ZpoolUuid }, + + #[error(transparent)] + Storage(#[from] sled_storage::error::Error), + + #[error(transparent)] + Io(#[from] std::io::Error), + + #[error(transparent)] + Range(#[from] range_requests::Error), + + #[error(transparent)] + Zip(#[from] ZipError), +} + +fn err_str(err: &dyn std::error::Error) -> String { + InlineErrorChain::new(err).to_string() +} + +impl From for HttpError { + fn from(err: Error) -> Self { + match err { + Error::HttpError(err) => err, + Error::HashMismatch => { + HttpError::for_bad_request(None, "Hash mismatch".to_string()) + } + Error::DatasetNotFound => { + HttpError::for_not_found(None, "Dataset not found".to_string()) + } + Error::NotAFile => { + HttpError::for_bad_request(None, "Not a file".to_string()) + } + Error::Storage(err) => HttpError::from(ExternalError::from(err)), + Error::Zip(err) => match err { + ZipError::FileNotFound => HttpError::for_not_found( + None, + "Entry not found".to_string(), + ), + err => HttpError::for_internal_error(err_str(&err)), + }, + err => HttpError::for_internal_error(err_str(&err)), + } + } +} + +// Implements "seeking" and "putting a capacity on a file" manually. +// +// TODO: When https://github.com/zip-rs/zip2/issues/231 is resolved, +// this method should be replaced by calling "seek" directly, +// via the "by_name_seek" method from the zip crate. +fn skip_and_limit( + mut reader: impl std::io::Read, + skip: usize, + limit: usize, +) -> std::io::Result { + const BUF_SIZE: usize = 4096; + let mut buf = vec![0; BUF_SIZE]; + let mut skip_left = skip; + + while skip_left > 0 { + let to_read = std::cmp::min(skip_left, BUF_SIZE); + reader.read_exact(&mut buf[0..to_read])?; + skip_left -= to_read; + } + + Ok(reader.take(limit as u64)) +} + +fn stream_zip_entry_helper( + tx: &tokio::sync::mpsc::Sender, HttpError>>, + mut archive: zip::ZipArchive, + entry_path: String, + range: Option, +) -> Result<(), Error> { + // TODO: When https://github.com/zip-rs/zip2/issues/231 is resolved, + // this should call "by_name_seek" instead. + let mut reader = archive.by_name(&entry_path)?; + + let reader: &mut dyn std::io::Read = match range { + Some(range) => &mut skip_and_limit( + reader, + range.start() as usize, + range.content_length().get() as usize, + )?, + None => &mut reader, + }; + + loop { + let mut buf = vec![0; 4096]; + let n = reader.read(&mut buf)?; + if n == 0 { + return Ok(()); + } + buf.truncate(n); + if let Err(_) = tx.blocking_send(Ok(buf)) { + // If we cannot send anything, just bail out - we also won't be able + // to send an appropriate error in this case, since we'd also be + // sending it on this borked channel + return Ok(()); + } + } +} + +struct ZipEntryStream { + stream: tokio_stream::wrappers::ReceiverStream, HttpError>>, + range: Option, + size: u64, +} + +// Possible responses from the success case of `stream_zip_entry` +enum ZipStreamOutput { + // Returns the zip entry, as a byte stream + Stream(ZipEntryStream), + // Returns an HTTP response indicating the accepted ranges + RangeResponse(http::Response), +} + +// Returns a stream of bytes representing an entry within a zipfile. +// +// Q: Why does this spawn a task? +// A: Two reasons - first, the "zip" crate is synchronous, and secondly, +// it has strong opinions about the "archive" living as long as the "entry +// reader". Without a task, streaming an entry from the archive would require +// a self-referential struct, as described in: +// https://morestina.net/blog/1868/self-referential-types-for-fun-and-profit +fn stream_zip_entry( + file: std::fs::File, + entry_path: String, + pr: Option, +) -> Result { + let mut archive = zip::ZipArchive::new(file)?; + let size = { + // This is a little redundant -- we look up the same file entry within the + // helper function we spawn below -- but the ZipFile is !Send and !Sync, so + // we can't re-use it in the background task. + let zipfile = archive.by_name(&entry_path)?; + if !zipfile.is_file() { + return Err(Error::NotAFile); + } + + zipfile.size() + }; + + let range = if let Some(range) = pr { + let range = match range.parse(size) { + Ok(range) => range, + Err(err) => return Ok(ZipStreamOutput::RangeResponse(err)), + }; + Some(range) + } else { + None + }; + + let (tx, rx) = tokio::sync::mpsc::channel(16); + let r = range.clone(); + tokio::task::spawn_blocking(move || { + if let Err(err) = stream_zip_entry_helper(&tx, archive, entry_path, r) { + let _ = tx.blocking_send(Err(err.into())); + } + }); + + Ok(ZipStreamOutput::Stream(ZipEntryStream { + stream: tokio_stream::wrappers::ReceiverStream::new(rx), + range, + size, + })) +} + +/// APIs to manage support bundle storage. +pub struct SupportBundleManager<'a> { + log: &'a Logger, + storage: &'a StorageHandle, +} + +impl<'a> SupportBundleManager<'a> { + /// Creates a new SupportBundleManager, which provides access + /// to support bundle CRUD APIs. + pub fn new( + log: &'a Logger, + storage: &'a StorageHandle, + ) -> SupportBundleManager<'a> { + Self { log, storage } + } + + // Returns a dataset that the sled has been explicitly configured to use. + async fn get_configured_dataset( + &self, + zpool_id: ZpoolUuid, + dataset_id: DatasetUuid, + ) -> Result { + let datasets_config = self.storage.datasets_config_list().await?; + let dataset = datasets_config + .datasets + .get(&dataset_id) + .ok_or_else(|| Error::DatasetNotFound)?; + + if dataset.id != dataset_id { + return Err(Error::DatasetExistsBadConfig { + wanted: dataset_id, + actual: dataset.id, + }); + } + let actual = dataset.name.pool().id(); + if actual != zpool_id { + return Err(Error::DatasetExistsOnWrongZpool { + wanted: zpool_id, + actual, + }); + } + Ok(dataset.clone()) + } + + /// Lists all support bundles on a particular dataset. + pub async fn list( + &self, + zpool_id: ZpoolUuid, + dataset_id: DatasetUuid, + ) -> Result, Error> { + let root = + self.get_configured_dataset(zpool_id, dataset_id).await?.name; + let dataset_location = + NestedDatasetLocation { path: String::from(""), root }; + let datasets = self + .storage + .nested_dataset_list( + dataset_location, + NestedDatasetListOptions::ChildrenOnly, + ) + .await?; + + let mut bundles = Vec::with_capacity(datasets.len()); + for dataset in datasets { + // We should be able to parse each dataset name as a support bundle UUID + let Ok(support_bundle_id) = + dataset.name.path.parse::() + else { + warn!(self.log, "Dataset path not a UUID"; "path" => dataset.name.path); + continue; + }; + + // The dataset for a support bundle exists. + let support_bundle_path = dataset + .name + .mountpoint(illumos_utils::zpool::ZPOOL_MOUNTPOINT_ROOT.into()) + .join("bundle"); + + // Identify whether or not the final "bundle" file exists. + // + // This is a signal that the support bundle has been fully written. + let state = if tokio::fs::try_exists(&support_bundle_path).await? { + SupportBundleState::Complete + } else { + SupportBundleState::Incomplete + }; + + let bundle = SupportBundleMetadata { support_bundle_id, state }; + bundles.push(bundle); + } + + Ok(bundles) + } + + /// Validates that the sha2 checksum of the file at `path` matches the + /// expected value. + async fn sha2_checksum_matches( + path: &Utf8Path, + expected: &ArtifactHash, + ) -> Result { + let mut buf = vec![0u8; 65536]; + let mut file = tokio::fs::File::open(path).await?; + let mut ctx = sha2::Sha256::new(); + loop { + let n = file.read(&mut buf).await?; + if n == 0 { + break; + } + ctx.write_all(&buf[0..n])?; + } + + let digest = ctx.finalize(); + return Ok(digest.as_slice() == expected.as_ref()); + } + + // A helper function which streams the contents of a bundle to a file. + // + // If at any point this function fails, the temporary file still exists, + // and should be removed. + async fn write_and_finalize_bundle( + mut tmp_file: tokio::fs::File, + from: &Utf8Path, + to: &Utf8Path, + expected_hash: ArtifactHash, + stream: impl Stream>, + ) -> Result<(), Error> { + futures::pin_mut!(stream); + + // Write the body to the file + let mut hasher = Sha256::new(); + while let Some(chunk) = stream.next().await { + let chunk = chunk?; + hasher.update(&chunk); + tmp_file.write_all(&chunk).await?; + } + let digest = hasher.finalize(); + if digest.as_slice() != expected_hash.as_ref() { + return Err(Error::HashMismatch); + } + + // Rename the file to indicate it's ready + tokio::fs::rename(from, to).await?; + Ok(()) + } + + /// Creates a new support bundle on a dataset. + pub async fn create( + &self, + zpool_id: ZpoolUuid, + dataset_id: DatasetUuid, + support_bundle_id: SupportBundleUuid, + expected_hash: ArtifactHash, + stream: impl Stream>, + ) -> Result { + let log = self.log.new(o!( + "operation" => "support_bundle_create", + "zpool_id" => zpool_id.to_string(), + "dataset_id" => dataset_id.to_string(), + "bundle_id" => support_bundle_id.to_string(), + )); + let root = + self.get_configured_dataset(zpool_id, dataset_id).await?.name; + let dataset = + NestedDatasetLocation { path: support_bundle_id.to_string(), root }; + // The mounted root of the support bundle dataset + let support_bundle_dir = dataset + .mountpoint(illumos_utils::zpool::ZPOOL_MOUNTPOINT_ROOT.into()); + let support_bundle_path = support_bundle_dir.join("bundle"); + let support_bundle_path_tmp = support_bundle_dir.join(format!( + "bundle-{}.tmp", + thread_rng() + .sample_iter(Alphanumeric) + .take(6) + .map(char::from) + .collect::() + )); + + // Ensure that the dataset exists. + info!(log, "Ensuring dataset exists for bundle"); + self.storage + .nested_dataset_ensure(NestedDatasetConfig { + name: dataset, + inner: SharedDatasetConfig { + compression: CompressionAlgorithm::On, + quota: None, + reservation: None, + }, + }) + .await?; + + // Exit early if the support bundle already exists + if tokio::fs::try_exists(&support_bundle_path).await? { + if !Self::sha2_checksum_matches( + &support_bundle_path, + &expected_hash, + ) + .await? + { + warn!(log, "Support bundle exists, but the hash doesn't match"); + return Err(Error::HashMismatch); + } + + info!(log, "Support bundle already exists"); + let metadata = SupportBundleMetadata { + support_bundle_id, + state: SupportBundleState::Complete, + }; + return Ok(metadata); + } + + // Stream the file into the dataset, first as a temporary file, + // and then renaming to the final location. + info!(log, "Streaming bundle to storage"); + let tmp_file = + tokio::fs::File::create(&support_bundle_path_tmp).await?; + if let Err(err) = Self::write_and_finalize_bundle( + tmp_file, + &support_bundle_path_tmp, + &support_bundle_path, + expected_hash, + stream, + ) + .await + { + warn!(log, "Failed to write bundle to storage"; "error" => ?err); + if let Err(unlink_err) = + tokio::fs::remove_file(support_bundle_path_tmp).await + { + warn!(log, "Failed to unlink bundle after previous error"; "error" => ?unlink_err); + } + return Err(err); + } + + info!(log, "Bundle written successfully"); + let metadata = SupportBundleMetadata { + support_bundle_id, + state: SupportBundleState::Complete, + }; + Ok(metadata) + } + + /// Destroys a support bundle that exists on a dataset. + pub async fn delete( + &self, + zpool_id: ZpoolUuid, + dataset_id: DatasetUuid, + support_bundle_id: SupportBundleUuid, + ) -> Result<(), Error> { + let log = self.log.new(o!( + "operation" => "support_bundle_delete", + "zpool_id" => zpool_id.to_string(), + "dataset_id" => dataset_id.to_string(), + "bundle_id" => support_bundle_id.to_string(), + )); + info!(log, "Destroying support bundle"); + let root = + self.get_configured_dataset(zpool_id, dataset_id).await?.name; + self.storage + .nested_dataset_destroy(NestedDatasetLocation { + path: support_bundle_id.to_string(), + root, + }) + .await?; + + Ok(()) + } + + async fn support_bundle_get_file( + &self, + zpool_id: ZpoolUuid, + dataset_id: DatasetUuid, + support_bundle_id: SupportBundleUuid, + ) -> Result { + let root = + self.get_configured_dataset(zpool_id, dataset_id).await?.name; + let dataset = + NestedDatasetLocation { path: support_bundle_id.to_string(), root }; + // The mounted root of the support bundle dataset + let support_bundle_dir = dataset + .mountpoint(illumos_utils::zpool::ZPOOL_MOUNTPOINT_ROOT.into()); + let path = support_bundle_dir.join("bundle"); + + let f = tokio::fs::File::open(&path).await?; + Ok(f) + } + + /// Streams a support bundle (or portion of a support bundle) from a + /// dataset. + pub async fn get( + &self, + zpool_id: ZpoolUuid, + dataset_id: DatasetUuid, + support_bundle_id: SupportBundleUuid, + range: Option, + query: SupportBundleQueryType, + ) -> Result, Error> { + self.get_inner( + zpool_id, + dataset_id, + support_bundle_id, + range, + query, + false, + ) + .await + } + + /// Returns metadata about a support bundle. + pub async fn head( + &self, + zpool_id: ZpoolUuid, + dataset_id: DatasetUuid, + support_bundle_id: SupportBundleUuid, + range: Option, + query: SupportBundleQueryType, + ) -> Result, Error> { + self.get_inner( + zpool_id, + dataset_id, + support_bundle_id, + range, + query, + true, + ) + .await + } + + async fn get_inner( + &self, + zpool_id: ZpoolUuid, + dataset_id: DatasetUuid, + support_bundle_id: SupportBundleUuid, + range: Option, + query: SupportBundleQueryType, + head_only: bool, + ) -> Result, Error> { + // Regardless of the type of query, we first need to access the entire + // bundle as a file. + let mut file = self + .support_bundle_get_file(zpool_id, dataset_id, support_bundle_id) + .await?; + + match query { + SupportBundleQueryType::Whole => { + let len = file.metadata().await?.len(); + const CONTENT_TYPE: http::HeaderValue = + http::HeaderValue::from_static("application/zip"); + let content_type = Some(CONTENT_TYPE); + + if head_only { + return Ok(range_requests::make_head_response( + None, + len, + content_type, + )?); + } + + if let Some(range) = range { + // If this has a range request, we need to validate the range + // and put bounds on the part of the file we're reading. + let range = match range.parse(len) { + Ok(range) => range, + Err(err_response) => return Ok(err_response), + }; + + info!( + &self.log, + "SupportBundle GET whole file (ranged)"; + "bundle_id" => %support_bundle_id, + "start" => range.start(), + "limit" => range.content_length().get(), + ); + + file.seek(std::io::SeekFrom::Start(range.start())).await?; + let limit = range.content_length().get(); + return Ok(range_requests::make_get_response( + Some(range), + len, + content_type, + ReaderStream::new(file.take(limit)), + )?); + } else { + return Ok(range_requests::make_get_response( + None, + len, + content_type, + ReaderStream::new(file), + )?); + }; + } + SupportBundleQueryType::Index => { + let file_std = file.into_std().await; + let archive = zip::ZipArchive::new(file_std)?; + let names: Vec<&str> = archive.file_names().collect(); + let all_names = names.join("\n"); + let all_names_bytes = all_names.as_bytes(); + let len = all_names_bytes.len() as u64; + const CONTENT_TYPE: http::HeaderValue = + http::HeaderValue::from_static("text/plain"); + let content_type = Some(CONTENT_TYPE); + + if head_only { + return Ok(range_requests::make_head_response( + None, + len, + content_type, + )?); + } + + let (range, bytes) = if let Some(range) = range { + let range = match range.parse(len) { + Ok(range) => range, + Err(err_response) => return Ok(err_response), + }; + + let section = &all_names_bytes[range.start() as usize + ..=range.end_inclusive() as usize]; + (Some(range), section.to_owned()) + } else { + (None, all_names_bytes.to_owned()) + }; + + let stream = futures::stream::once(async { + Ok::<_, std::convert::Infallible>(bytes) + }); + return Ok(range_requests::make_get_response( + range, + len, + content_type, + stream, + )?); + } + SupportBundleQueryType::Path { file_path } => { + let file_std = file.into_std().await; + + let entry_stream = + match stream_zip_entry(file_std, file_path, range)? { + // We have a valid stream + ZipStreamOutput::Stream(entry_stream) => entry_stream, + // The entry exists, but the requested range is invalid -- + // send it back as an http body. + ZipStreamOutput::RangeResponse(response) => { + return Ok(response) + } + }; + + if head_only { + return Ok(range_requests::make_head_response( + None, + entry_stream.size, + None::, + )?); + } + + return Ok(range_requests::make_get_response( + entry_stream.range, + entry_stream.size, + None::, + entry_stream.stream, + )?); + } + }; + } +} + +#[cfg(all(test, target_os = "illumos"))] +mod tests { + use super::*; + + use futures::stream; + use http::status::StatusCode; + use hyper::header::{ + ACCEPT_RANGES, CONTENT_LENGTH, CONTENT_RANGE, CONTENT_TYPE, + }; + use omicron_common::disk::DatasetConfig; + use omicron_common::disk::DatasetKind; + use omicron_common::disk::DatasetName; + use omicron_common::disk::DatasetsConfig; + use omicron_common::zpool_name::ZpoolName; + use omicron_test_utils::dev::test_setup_log; + use sled_storage::manager_test_harness::StorageManagerTestHarness; + use std::collections::BTreeMap; + use zip::write::SimpleFileOptions; + use zip::ZipWriter; + + struct SingleU2StorageHarness { + storage_test_harness: StorageManagerTestHarness, + zpool_id: ZpoolUuid, + } + + impl SingleU2StorageHarness { + async fn new(log: &Logger) -> Self { + let mut harness = StorageManagerTestHarness::new(log).await; + harness.handle().key_manager_ready().await; + let _raw_internal_disks = + harness.add_vdevs(&["m2_left.vdev", "m2_right.vdev"]).await; + + let raw_disks = harness.add_vdevs(&["u2_0.vdev"]).await; + + let config = harness.make_config(1, &raw_disks); + let result = harness + .handle() + .omicron_physical_disks_ensure(config.clone()) + .await + .expect("Failed to ensure disks"); + assert!(!result.has_error(), "{result:?}"); + + let zpool_id = config.disks[0].pool_id; + Self { storage_test_harness: harness, zpool_id } + } + + async fn configure_dataset( + &self, + dataset_id: DatasetUuid, + kind: DatasetKind, + ) { + let result = self + .storage_test_harness + .handle() + .datasets_ensure(DatasetsConfig { + datasets: BTreeMap::from([( + dataset_id, + DatasetConfig { + id: dataset_id, + name: DatasetName::new( + ZpoolName::new_external(self.zpool_id), + kind, + ), + inner: Default::default(), + }, + )]), + ..Default::default() + }) + .await + .expect("Failed to ensure datasets"); + assert!(!result.has_error(), "{result:?}"); + } + + async fn cleanup(mut self) { + self.storage_test_harness.cleanup().await + } + } + + enum Data { + File(&'static [u8]), + Directory, + } + type NamedFile = (&'static str, Data); + + const GREET_PATH: &'static str = "greeting.txt"; + const GREET_DATA: &'static [u8] = b"Hello around the world!"; + const ARBITRARY_DIRECTORY: &'static str = "look-a-directory/"; + + fn example_files() -> [NamedFile; 6] { + [ + (ARBITRARY_DIRECTORY, Data::Directory), + (GREET_PATH, Data::File(GREET_DATA)), + ("english/", Data::Directory), + ("english/hello.txt", Data::File(b"Hello world!")), + ("spanish/", Data::Directory), + ("spanish/hello.txt", Data::File(b"Hola mundo!")), + ] + } + + fn example_zipfile() -> Vec { + let mut buf = vec![0u8; 65536]; + let len = { + let mut zip = ZipWriter::new(std::io::Cursor::new(&mut buf[..])); + let options = SimpleFileOptions::default() + .compression_method(zip::CompressionMethod::Stored); + + for (name, data) in example_files() { + match data { + Data::File(data) => { + zip.start_file(name, options).unwrap(); + zip.write_all(data).unwrap(); + } + Data::Directory => { + zip.add_directory(name, options).unwrap(); + } + } + } + zip.finish().unwrap().position() + }; + buf.truncate(len as usize); + buf + } + + async fn read_body(response: &mut http::Response) -> Vec { + use http_body_util::BodyExt; + let mut data = vec![]; + while let Some(frame) = response.body_mut().frame().await { + data.append(&mut frame.unwrap().into_data().unwrap().to_vec()); + } + data + } + + #[tokio::test] + async fn basic_crud() { + let logctx = test_setup_log("basic_crud"); + let log = &logctx.log; + + // Set up storage + let harness = SingleU2StorageHarness::new(log).await; + + // For this test, we'll add a dataset that can contain our bundles. + let dataset_id = DatasetUuid::new_v4(); + harness.configure_dataset(dataset_id, DatasetKind::Debug).await; + + // Access the Support Bundle API + let mgr = SupportBundleManager::new( + log, + harness.storage_test_harness.handle(), + ); + + // Create a fake support bundle -- really, just a zipfile. + let support_bundle_id = SupportBundleUuid::new_v4(); + let zipfile_data = example_zipfile(); + let hash = ArtifactHash( + Sha256::digest(zipfile_data.as_slice()) + .as_slice() + .try_into() + .unwrap(), + ); + + // Create a new bundle + let bundle = mgr + .create( + harness.zpool_id, + dataset_id, + support_bundle_id, + hash, + stream::once(async { + Ok(Bytes::copy_from_slice(zipfile_data.as_slice())) + }), + ) + .await + .expect("Should have created support bundle"); + assert_eq!(bundle.support_bundle_id, support_bundle_id); + assert_eq!(bundle.state, SupportBundleState::Complete); + + // List the bundle we just created + let bundles = mgr + .list(harness.zpool_id, dataset_id) + .await + .expect("Should have been able to read bundles"); + assert_eq!(bundles.len(), 1); + assert_eq!(bundles[0].support_bundle_id, support_bundle_id); + + // HEAD the bundle we created - we can see it's a zipfile with the + // expected length, even without reading anything. + let mut response = mgr + .head( + harness.zpool_id, + dataset_id, + support_bundle_id, + None, + SupportBundleQueryType::Whole, + ) + .await + .expect("Should have been able to HEAD bundle"); + assert_eq!(read_body(&mut response).await, Vec::::new()); + assert_eq!(response.headers().len(), 3); + assert_eq!( + response.headers()[CONTENT_LENGTH], + zipfile_data.len().to_string() + ); + assert_eq!(response.headers()[CONTENT_TYPE], "application/zip"); + assert_eq!(response.headers()[ACCEPT_RANGES], "bytes"); + + // GET the bundle we created, and observe the contents of the bundle + let mut response = mgr + .get( + harness.zpool_id, + dataset_id, + support_bundle_id, + None, + SupportBundleQueryType::Whole, + ) + .await + .expect("Should have been able to GET bundle"); + assert_eq!(read_body(&mut response).await, zipfile_data); + assert_eq!(response.headers().len(), 3); + assert_eq!( + response.headers()[CONTENT_LENGTH], + zipfile_data.len().to_string() + ); + assert_eq!(response.headers()[CONTENT_TYPE], "application/zip"); + assert_eq!(response.headers()[ACCEPT_RANGES], "bytes"); + + // HEAD the index of the bundle - it should report the size of all + // files. + let mut response = mgr + .head( + harness.zpool_id, + dataset_id, + support_bundle_id, + None, + SupportBundleQueryType::Index, + ) + .await + .expect("Should have been able to HEAD bundle index"); + assert_eq!(read_body(&mut response).await, Vec::::new()); + let expected_index = example_files() + .into_iter() + .map(|(name, _)| name) + .collect::>() + .join("\n"); + let expected_len = expected_index.len().to_string(); + assert_eq!(response.headers().len(), 3); + assert_eq!(response.headers()[CONTENT_LENGTH], expected_len); + assert_eq!(response.headers()[CONTENT_TYPE], "text/plain"); + assert_eq!(response.headers()[ACCEPT_RANGES], "bytes"); + + // GET the index of the bundle. + let mut response = mgr + .get( + harness.zpool_id, + dataset_id, + support_bundle_id, + None, + SupportBundleQueryType::Index, + ) + .await + .expect("Should have been able to GET bundle index"); + assert_eq!(read_body(&mut response).await, expected_index.as_bytes()); + assert_eq!(response.headers().len(), 3); + assert_eq!(response.headers()[CONTENT_LENGTH], expected_len); + assert_eq!(response.headers()[CONTENT_TYPE], "text/plain"); + assert_eq!(response.headers()[ACCEPT_RANGES], "bytes"); + + // HEAD a single file from within the bundle. + let mut response = mgr + .head( + harness.zpool_id, + dataset_id, + support_bundle_id, + None, + SupportBundleQueryType::Path { + file_path: GREET_PATH.to_string(), + }, + ) + .await + .expect("Should have been able to HEAD single file"); + assert_eq!(read_body(&mut response).await, Vec::::new()); + assert_eq!(response.headers().len(), 3); + assert_eq!( + response.headers()[CONTENT_LENGTH], + GREET_DATA.len().to_string() + ); + assert_eq!( + response.headers()[CONTENT_TYPE], + "application/octet-stream" + ); + assert_eq!(response.headers()[ACCEPT_RANGES], "bytes"); + + // GET a single file within the bundle + let mut response = mgr + .get( + harness.zpool_id, + dataset_id, + support_bundle_id, + None, + SupportBundleQueryType::Path { + file_path: GREET_PATH.to_string(), + }, + ) + .await + .expect("Should have been able to GET single file"); + assert_eq!(read_body(&mut response).await, GREET_DATA); + assert_eq!(response.headers().len(), 3); + assert_eq!( + response.headers()[CONTENT_LENGTH], + GREET_DATA.len().to_string() + ); + assert_eq!( + response.headers()[CONTENT_TYPE], + "application/octet-stream" + ); + assert_eq!(response.headers()[ACCEPT_RANGES], "bytes"); + + // Cannot GET nor HEAD a directory + let err = mgr + .get( + harness.zpool_id, + dataset_id, + support_bundle_id, + None, + SupportBundleQueryType::Path { + file_path: ARBITRARY_DIRECTORY.to_string(), + }, + ) + .await + .expect_err("Should not be able to GET directory"); + assert!(matches!(err, Error::NotAFile), "Unexpected error: {err:?}"); + + let err = mgr + .head( + harness.zpool_id, + dataset_id, + support_bundle_id, + None, + SupportBundleQueryType::Path { + file_path: ARBITRARY_DIRECTORY.to_string(), + }, + ) + .await + .expect_err("Should not be able to HEAD directory"); + assert!(matches!(err, Error::NotAFile), "Unexpected error: {err:?}"); + + // DELETE the bundle on the dataset + mgr.delete(harness.zpool_id, dataset_id, support_bundle_id) + .await + .expect("Should have been able to DELETE bundle"); + + harness.cleanup().await; + logctx.cleanup_successful(); + } + + #[tokio::test] + async fn creation_without_dataset() { + let logctx = test_setup_log("creation_without_dataset"); + let log = &logctx.log; + + // Set up storage (zpool, but not dataset!) + let harness = SingleU2StorageHarness::new(log).await; + + // Access the Support Bundle API + let mgr = SupportBundleManager::new( + log, + harness.storage_test_harness.handle(), + ); + + // Get a support bundle that we're ready to store. + let support_bundle_id = SupportBundleUuid::new_v4(); + let zipfile_data = example_zipfile(); + let hash = ArtifactHash( + Sha256::digest(zipfile_data.as_slice()) + .as_slice() + .try_into() + .unwrap(), + ); + + // Storing a bundle without a dataset should throw an error. + let dataset_id = DatasetUuid::new_v4(); + let err = mgr + .create( + harness.zpool_id, + dataset_id, + support_bundle_id, + hash, + stream::once(async { + Ok(Bytes::copy_from_slice(zipfile_data.as_slice())) + }), + ) + .await + .expect_err("Bundle creation should fail without dataset"); + assert!(matches!(err, Error::Storage(_)), "Unexpected error: {err:?}"); + assert_eq!(HttpError::from(err).status_code, StatusCode::NOT_FOUND); + + // Configure the dataset now, so it'll exist for future requests. + harness.configure_dataset(dataset_id, DatasetKind::Debug).await; + + mgr.create( + harness.zpool_id, + dataset_id, + support_bundle_id, + hash, + stream::once(async { + Ok(Bytes::copy_from_slice(zipfile_data.as_slice())) + }), + ) + .await + .expect("Should have created support bundle"); + + harness.cleanup().await; + logctx.cleanup_successful(); + } + + #[tokio::test] + async fn creation_bad_hash() { + let logctx = test_setup_log("creation_bad_hash"); + let log = &logctx.log; + + // Set up storage (zpool, but not dataset!) + let harness = SingleU2StorageHarness::new(log).await; + + // Access the Support Bundle API + let mgr = SupportBundleManager::new( + log, + harness.storage_test_harness.handle(), + ); + + // Get a support bundle that we're ready to store. + let support_bundle_id = SupportBundleUuid::new_v4(); + let zipfile_data = example_zipfile(); + let hash = ArtifactHash( + Sha256::digest(zipfile_data.as_slice()) + .as_slice() + .try_into() + .unwrap(), + ); + + // Configure the dataset now, so it'll exist for future requests. + let dataset_id = DatasetUuid::new_v4(); + harness.configure_dataset(dataset_id, DatasetKind::Debug).await; + + let bad_hash = ArtifactHash( + Sha256::digest(b"Hey, this ain't right") + .as_slice() + .try_into() + .unwrap(), + ); + + // Creating the bundle with a bad hash should fail. + let err = mgr + .create( + harness.zpool_id, + dataset_id, + support_bundle_id, + bad_hash, + stream::once(async { + Ok(Bytes::copy_from_slice(zipfile_data.as_slice())) + }), + ) + .await + .expect_err("Bundle creation should fail with bad hash"); + assert!( + matches!(err, Error::HashMismatch), + "Unexpected error: {err:?}" + ); + assert_eq!(HttpError::from(err).status_code, StatusCode::BAD_REQUEST); + + // As long as the dataset exists, we'll make storage for it, which means + // the bundle will be visible, but incomplete. + let bundles = mgr.list(harness.zpool_id, dataset_id).await.unwrap(); + assert_eq!(bundles.len(), 1); + assert_eq!(bundles[0].support_bundle_id, support_bundle_id); + assert_eq!(bundles[0].state, SupportBundleState::Incomplete); + + // Creating the bundle with bad data should fail + let err = mgr + .create( + harness.zpool_id, + dataset_id, + support_bundle_id, + hash, + stream::once(async { + Ok(Bytes::from_static(b"Not a zipfile")) + }), + ) + .await + .expect_err("Bundle creation should fail with bad hash"); + assert!( + matches!(err, Error::HashMismatch), + "Unexpected error: {err:?}" + ); + assert_eq!(HttpError::from(err).status_code, StatusCode::BAD_REQUEST); + + let bundles = mgr.list(harness.zpool_id, dataset_id).await.unwrap(); + assert_eq!(bundles.len(), 1); + assert_eq!(bundles[0].support_bundle_id, support_bundle_id); + assert_eq!(bundles[0].state, SupportBundleState::Incomplete); + + // Good hash + Good data -> creation should succeed + mgr.create( + harness.zpool_id, + dataset_id, + support_bundle_id, + hash, + stream::once(async { + Ok(Bytes::copy_from_slice(zipfile_data.as_slice())) + }), + ) + .await + .expect("Should have created support bundle"); + + // The bundle should now appear "Complete" + let bundles = mgr.list(harness.zpool_id, dataset_id).await.unwrap(); + assert_eq!(bundles.len(), 1); + assert_eq!(bundles[0].support_bundle_id, support_bundle_id); + assert_eq!(bundles[0].state, SupportBundleState::Complete); + + // We can delete the bundle, and it should no longer appear. + mgr.delete(harness.zpool_id, dataset_id, support_bundle_id) + .await + .expect("Should have been able to DELETE bundle"); + let bundles = mgr.list(harness.zpool_id, dataset_id).await.unwrap(); + assert_eq!(bundles.len(), 0); + + harness.cleanup().await; + logctx.cleanup_successful(); + } + + #[tokio::test] + async fn creation_bad_hash_still_deleteable() { + let logctx = test_setup_log("creation_bad_hash_still_deleteable"); + let log = &logctx.log; + + // Set up storage (zpool, but not dataset!) + let harness = SingleU2StorageHarness::new(log).await; + + // Access the Support Bundle API + let mgr = SupportBundleManager::new( + log, + harness.storage_test_harness.handle(), + ); + + // Get a support bundle that we're ready to store + let support_bundle_id = SupportBundleUuid::new_v4(); + let zipfile_data = example_zipfile(); + + // Configure the dataset now, so it'll exist for future requests. + let dataset_id = DatasetUuid::new_v4(); + harness.configure_dataset(dataset_id, DatasetKind::Debug).await; + + let bad_hash = ArtifactHash( + Sha256::digest(b"Hey, this ain't right") + .as_slice() + .try_into() + .unwrap(), + ); + + // Creating the bundle with a bad hash should fail. + let err = mgr + .create( + harness.zpool_id, + dataset_id, + support_bundle_id, + bad_hash, + stream::once(async { + Ok(Bytes::copy_from_slice(zipfile_data.as_slice())) + }), + ) + .await + .expect_err("Bundle creation should fail with bad hash"); + assert!( + matches!(err, Error::HashMismatch), + "Unexpected error: {err:?}" + ); + assert_eq!(HttpError::from(err).status_code, StatusCode::BAD_REQUEST); + + // The bundle still appears to exist, as storage gets allocated after + // the "create" call. + let bundles = mgr.list(harness.zpool_id, dataset_id).await.unwrap(); + assert_eq!(bundles.len(), 1); + assert_eq!(bundles[0].support_bundle_id, support_bundle_id); + assert_eq!(bundles[0].state, SupportBundleState::Incomplete); + + // We can delete the bundle, and it should no longer appear. + mgr.delete(harness.zpool_id, dataset_id, support_bundle_id) + .await + .expect("Should have been able to DELETE bundle"); + let bundles = mgr.list(harness.zpool_id, dataset_id).await.unwrap(); + assert_eq!(bundles.len(), 0); + + harness.cleanup().await; + logctx.cleanup_successful(); + } + + #[tokio::test] + async fn creation_idempotency() { + let logctx = test_setup_log("creation_idempotency"); + let log = &logctx.log; + + // Set up storage (zpool, but not dataset!) + let harness = SingleU2StorageHarness::new(log).await; + + // Access the Support Bundle API + let mgr = SupportBundleManager::new( + log, + harness.storage_test_harness.handle(), + ); + + // Get a support bundle that we're ready to store. + let support_bundle_id = SupportBundleUuid::new_v4(); + let zipfile_data = example_zipfile(); + let hash = ArtifactHash( + Sha256::digest(zipfile_data.as_slice()) + .as_slice() + .try_into() + .unwrap(), + ); + + // Configure the dataset now, so it'll exist for future requests. + let dataset_id = DatasetUuid::new_v4(); + harness.configure_dataset(dataset_id, DatasetKind::Debug).await; + + // Create the bundle + mgr.create( + harness.zpool_id, + dataset_id, + support_bundle_id, + hash, + stream::once(async { + Ok(Bytes::copy_from_slice(zipfile_data.as_slice())) + }), + ) + .await + .expect("Should have created support bundle"); + + // Creating the dataset again should work. + mgr.create( + harness.zpool_id, + dataset_id, + support_bundle_id, + hash, + stream::once(async { + Ok(Bytes::copy_from_slice(zipfile_data.as_slice())) + }), + ) + .await + .expect("Support bundle should already exist"); + + // This is an edge-case, but just to make sure the behavior + // is codified: If we are creating a bundle that already exists, + // we'll skip reading the body. + mgr.create( + harness.zpool_id, + dataset_id, + support_bundle_id, + hash, + stream::once(async { + // NOTE: This is different from the call above. + Ok(Bytes::from_static(b"Ignored")) + }), + ) + .await + .expect("Support bundle should already exist"); + + harness.cleanup().await; + logctx.cleanup_successful(); + } + + #[tokio::test] + async fn ranges() { + let logctx = test_setup_log("ranges"); + let log = &logctx.log; + + // Set up storage + let harness = SingleU2StorageHarness::new(log).await; + + // For this test, we'll add a dataset that can contain our bundles. + let dataset_id = DatasetUuid::new_v4(); + harness.configure_dataset(dataset_id, DatasetKind::Debug).await; + + // Access the Support Bundle API + let mgr = SupportBundleManager::new( + log, + harness.storage_test_harness.handle(), + ); + + // Create a fake support bundle -- really, just a zipfile. + let support_bundle_id = SupportBundleUuid::new_v4(); + let zipfile_data = example_zipfile(); + let hash = ArtifactHash( + Sha256::digest(zipfile_data.as_slice()) + .as_slice() + .try_into() + .unwrap(), + ); + + // Create a new bundle + let bundle = mgr + .create( + harness.zpool_id, + dataset_id, + support_bundle_id, + hash, + stream::once(async { + Ok(Bytes::copy_from_slice(zipfile_data.as_slice())) + }), + ) + .await + .expect("Should have created support bundle"); + assert_eq!(bundle.support_bundle_id, support_bundle_id); + assert_eq!(bundle.state, SupportBundleState::Complete); + + // GET the bundle we created, and observe the contents of the bundle + let ranges = [ + (0, 5), + (5, 100), + (0, 100), + (1000, 1000), + (1000, 1001), + (1000, zipfile_data.len() - 1), + ]; + + for (first, last) in ranges.into_iter() { + eprintln!("Trying whole-file range: {first}-{last}"); + let range = + PotentialRange::new(format!("bytes={first}-{last}").as_bytes()); + let expected_data = &zipfile_data[first..=last]; + + let mut response = mgr + .get( + harness.zpool_id, + dataset_id, + support_bundle_id, + Some(range), + SupportBundleQueryType::Whole, + ) + .await + .expect("Should have been able to GET bundle"); + assert_eq!(read_body(&mut response).await, expected_data); + assert_eq!(response.headers().len(), 4); + assert_eq!( + response.headers()[CONTENT_RANGE], + format!("bytes {first}-{last}/{}", zipfile_data.len()) + ); + assert_eq!( + response.headers()[CONTENT_LENGTH], + ((last + 1) - first).to_string() + ); + assert_eq!(response.headers()[CONTENT_TYPE], "application/zip"); + assert_eq!(response.headers()[ACCEPT_RANGES], "bytes"); + } + + // GET the index of the bundle. + let expected_index_str = example_files() + .into_iter() + .map(|(name, _)| name) + .collect::>() + .join("\n"); + let expected_index = expected_index_str.as_bytes(); + let ranges = [(0, 5), (5, 10), (10, expected_index.len() - 1)]; + + for (first, last) in ranges.into_iter() { + eprintln!("Trying index range: {first}-{last}"); + let range = + PotentialRange::new(format!("bytes={first}-{last}").as_bytes()); + let expected_data = &expected_index[first..=last]; + let mut response = mgr + .get( + harness.zpool_id, + dataset_id, + support_bundle_id, + Some(range), + SupportBundleQueryType::Index, + ) + .await + .expect("Should have been able to GET bundle index"); + assert_eq!(read_body(&mut response).await, expected_data); + assert_eq!(response.headers().len(), 4); + assert_eq!( + response.headers()[CONTENT_RANGE], + format!("bytes {first}-{last}/{}", expected_index.len()) + ); + assert_eq!( + response.headers()[CONTENT_LENGTH], + ((last + 1) - first).to_string(), + ); + assert_eq!(response.headers()[CONTENT_TYPE], "text/plain"); + assert_eq!(response.headers()[ACCEPT_RANGES], "bytes"); + } + + // GET a single file within the bundle + let ranges = [(0, 5), (5, 10), (5, GREET_DATA.len() - 1)]; + for (first, last) in ranges.into_iter() { + eprintln!("Trying single file range: {first}-{last}"); + let range = + PotentialRange::new(format!("bytes={first}-{last}").as_bytes()); + let expected_data = &GREET_DATA[first..=last]; + let mut response = mgr + .get( + harness.zpool_id, + dataset_id, + support_bundle_id, + Some(range), + SupportBundleQueryType::Path { + file_path: GREET_PATH.to_string(), + }, + ) + .await + .expect("Should have been able to GET single file"); + assert_eq!(read_body(&mut response).await, expected_data); + assert_eq!(response.headers().len(), 4); + assert_eq!( + response.headers()[CONTENT_RANGE], + format!("bytes {first}-{last}/{}", GREET_DATA.len()) + ); + assert_eq!( + response.headers()[CONTENT_LENGTH], + ((last + 1) - first).to_string(), + ); + assert_eq!( + response.headers()[CONTENT_TYPE], + "application/octet-stream" + ); + assert_eq!(response.headers()[ACCEPT_RANGES], "bytes"); + } + + // Cannot GET nor HEAD a directory, even with range requests + let range = PotentialRange::new(b"bytes=0-1"); + let err = mgr + .get( + harness.zpool_id, + dataset_id, + support_bundle_id, + Some(range), + SupportBundleQueryType::Path { + file_path: ARBITRARY_DIRECTORY.to_string(), + }, + ) + .await + .expect_err("Should not be able to GET directory"); + assert!(matches!(err, Error::NotAFile), "Unexpected error: {err:?}"); + + let range = PotentialRange::new(b"bytes=0-1"); + let err = mgr + .head( + harness.zpool_id, + dataset_id, + support_bundle_id, + Some(range), + SupportBundleQueryType::Path { + file_path: ARBITRARY_DIRECTORY.to_string(), + }, + ) + .await + .expect_err("Should not be able to HEAD directory"); + assert!(matches!(err, Error::NotAFile), "Unexpected error: {err:?}"); + + // DELETE the bundle on the dataset + mgr.delete(harness.zpool_id, dataset_id, support_bundle_id) + .await + .expect("Should have been able to DELETE bundle"); + + harness.cleanup().await; + logctx.cleanup_successful(); + } +} diff --git a/uuid-kinds/src/lib.rs b/uuid-kinds/src/lib.rs index c83717ef2e..42c50379ce 100644 --- a/uuid-kinds/src/lib.rs +++ b/uuid-kinds/src/lib.rs @@ -66,6 +66,7 @@ impl_typed_uuid_kind! { ReconfiguratorSim => "reconfigurator_sim", Region => "region", Sled => "sled", + SupportBundle => "support_bundle", TufRepo => "tuf_repo", Upstairs => "upstairs", UpstairsRepair => "upstairs_repair",