From 230a22c7da74316510eeca024f81171a669f9709 Mon Sep 17 00:00:00 2001 From: Laura Abbott Date: Mon, 10 Jun 2024 09:27:09 -0400 Subject: [PATCH] =?UTF-8?q?Revert=20"Allow=20different=20versions=20for=20?= =?UTF-8?q?differently=20signed=20RoT=20images=20(#=E2=80=A6=20(#5868)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit …5580)" This reverts commit 94ef8e50d0c1310bfc41899395cbe4a562ed88b2. It turns out there are several places that are still making assumptions about versions. Until we've identified them all just revert this change to avoid any unexpected surprises. --- Cargo.lock | 13 +- tufaceous-lib/src/assemble/manifest.rs | 3 - update-common/src/artifacts/update_plan.rs | 440 ++------------------- update-common/src/errors.rs | 12 +- 4 files changed, 39 insertions(+), 429 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b43c6fe327..01e6f04557 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3225,10 +3225,9 @@ dependencies = [ [[package]] name = "hubtools" -version = "0.4.6" -source = "git+https://github.com/oxidecomputer/hubtools.git?branch=main#943c4bbe6b50d1ab635d085d6204895fb4154e79" +version = "0.4.1" +source = "git+https://github.com/oxidecomputer/hubtools.git?branch=main#73cd5a84689d59ecce9da66ad4389c540d315168" dependencies = [ - "hex", "lpc55_areas", "lpc55_sign", "object 0.30.4", @@ -4132,8 +4131,8 @@ checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c" [[package]] name = "lpc55_areas" -version = "0.2.5" -source = "git+https://github.com/oxidecomputer/lpc55_support#131520fc913ecce9b80557e854751953f743a7d2" +version = "0.2.4" +source = "git+https://github.com/oxidecomputer/lpc55_support#96f064eaae5e95930efaab6c29fd1b2e22225dac" dependencies = [ "bitfield", "clap", @@ -4143,8 +4142,8 @@ dependencies = [ [[package]] name = "lpc55_sign" -version = "0.3.4" -source = "git+https://github.com/oxidecomputer/lpc55_support#131520fc913ecce9b80557e854751953f743a7d2" +version = "0.3.3" +source = "git+https://github.com/oxidecomputer/lpc55_support#96f064eaae5e95930efaab6c29fd1b2e22225dac" dependencies = [ "byteorder", "const-oid", diff --git a/tufaceous-lib/src/assemble/manifest.rs b/tufaceous-lib/src/assemble/manifest.rs index 2236580b75..1c4a676f4c 100644 --- a/tufaceous-lib/src/assemble/manifest.rs +++ b/tufaceous-lib/src/assemble/manifest.rs @@ -294,14 +294,11 @@ impl<'a> FakeDataAttributes<'a> { KnownArtifactKind::SwitchRot => "fake-sidecar-rot", }; - // For our purposes sign = board represents what we want for the RoT - // and we don't care about the SP at this point let caboose = CabooseBuilder::default() .git_commit("this-is-fake-data") .board(board) .version(self.version.to_string()) .name(self.name) - .sign(board) .build(); let mut builder = HubrisArchiveBuilder::with_fake_image(); diff --git a/update-common/src/artifacts/update_plan.rs b/update-common/src/artifacts/update_plan.rs index 53286ee09a..c5b171d648 100644 --- a/update-common/src/artifacts/update_plan.rs +++ b/update-common/src/artifacts/update_plan.rs @@ -33,7 +33,6 @@ use std::collections::btree_map; use std::collections::BTreeMap; use std::collections::HashMap; use std::io; -use tokio::io::AsyncReadExt; use tufaceous_lib::HostPhaseImages; use tufaceous_lib::RotArchives; @@ -74,15 +73,6 @@ pub struct UpdatePlan { pub control_plane_hash: ArtifactHash, } -// Used to represent the information extracted from signed RoT images. This -// is used when going from `UpdatePlanBuilder` -> `UpdatePlan` to check -// the versions on the RoT images -#[derive(Debug, Eq, Hash, PartialEq)] -struct RotSignData { - kind: KnownArtifactKind, - sign: Vec, -} - /// `UpdatePlanBuilder` mirrors all the fields of `UpdatePlan`, but they're all /// optional: it can be filled in as we read a TUF repository. /// [`UpdatePlanBuilder::build()`] will (fallibly) convert from the builder to @@ -124,9 +114,6 @@ pub struct UpdatePlanBuilder<'a> { by_hash: HashMap, artifacts_meta: Vec, - // map for RoT signing information, used in `ArtifactsWithPlan` - rot_by_sign: HashMap>, - // extra fields we use to build the plan extracted_artifacts: ExtractedArtifacts, log: &'a Logger, @@ -157,7 +144,6 @@ impl<'a> UpdatePlanBuilder<'a> { by_id: BTreeMap::new(), by_hash: HashMap::new(), - rot_by_sign: HashMap::new(), artifacts_meta: Vec::new(), extracted_artifacts, @@ -331,56 +317,6 @@ impl<'a> UpdatePlanBuilder<'a> { }, )?; - // We need to get all the signing information now to properly check - // version at builder time (builder time is not async) - let image_a_stream = rot_a_data - .reader_stream() - .await - .map_err(RepositoryError::CreateReaderStream)?; - let mut image_a = Vec::with_capacity(rot_a_data.file_size()); - tokio_util::io::StreamReader::new(image_a_stream) - .read_to_end(&mut image_a) - .await - .map_err(|error| RepositoryError::ReadExtractedArchive { - artifact: ArtifactHashId { - kind: artifact_id.kind.clone(), - hash: rot_a_data.hash(), - }, - error, - })?; - - let (artifact_id, image_a_sign) = - read_hubris_sign_from_archive(artifact_id, image_a)?; - - self.rot_by_sign - .entry(RotSignData { kind: artifact_kind, sign: image_a_sign }) - .or_default() - .push(artifact_id.clone()); - - let image_b_stream = rot_b_data - .reader_stream() - .await - .map_err(RepositoryError::CreateReaderStream)?; - let mut image_b = Vec::with_capacity(rot_b_data.file_size()); - tokio_util::io::StreamReader::new(image_b_stream) - .read_to_end(&mut image_b) - .await - .map_err(|error| RepositoryError::ReadExtractedArchive { - artifact: ArtifactHashId { - kind: artifact_id.kind.clone(), - hash: rot_b_data.hash(), - }, - error, - })?; - - let (artifact_id, image_b_sign) = - read_hubris_sign_from_archive(artifact_id, image_b)?; - - self.rot_by_sign - .entry(RotSignData { kind: artifact_kind, sign: image_b_sign }) - .or_default() - .push(artifact_id.clone()); - // Technically we've done all we _need_ to do with the RoT images. We // send them directly to MGS ourself, so don't expect anyone to ask for // them via `by_id` or `by_hash`. However, it's more convenient to @@ -764,26 +700,38 @@ impl<'a> UpdatePlanBuilder<'a> { } } - // Ensure that all A/B RoT images for each board kind and same - // signing key have the same version. (i.e. allow gimlet_rot signed - // with a staging key to be a different version from gimlet_rot signed - // with a production key) - for (entry, versions) in self.rot_by_sign { - let kind = entry.kind; - // This unwrap is safe because we check above that each of the types - // has at least one entry - let version = &versions.first().unwrap().version; - match versions.iter().find(|x| x.version != *version) { - None => continue, - Some(v) => { + // Ensure that all A/B RoT images for each board kind have the same + // version number. + for (kind, mut single_board_rot_artifacts) in [ + ( + KnownArtifactKind::GimletRot, + self.gimlet_rot_a.iter().chain(&self.gimlet_rot_b), + ), + ( + KnownArtifactKind::PscRot, + self.psc_rot_a.iter().chain(&self.psc_rot_b), + ), + ( + KnownArtifactKind::SwitchRot, + self.sidecar_rot_a.iter().chain(&self.sidecar_rot_b), + ), + ] { + // We know each of these iterators has at least 2 elements (one from + // the A artifacts and one from the B artifacts, checked above) so + // we can safely unwrap the first. + let version = + &single_board_rot_artifacts.next().unwrap().id.version; + for artifact in single_board_rot_artifacts { + if artifact.id.version != *version { return Err(RepositoryError::MultipleVersionsPresent { kind, v1: version.clone(), - v2: v.version.clone(), - }) + v2: artifact.id.version.clone(), + }); } } } + // Repeat the same version check for all SP images. (This is a separate // loop because the types of the iterators don't match.) for (kind, mut single_board_sp_artifacts) in [ @@ -855,32 +803,6 @@ pub struct UpdatePlanBuildOutput { pub artifacts_meta: Vec, } -// We take id solely to be able to output error messages -fn read_hubris_sign_from_archive( - id: ArtifactId, - data: Vec, -) -> Result<(ArtifactId, Vec), RepositoryError> { - let archive = match RawHubrisArchive::from_vec(data).map_err(Box::new) { - Ok(archive) => archive, - Err(error) => { - return Err(RepositoryError::ParsingHubrisArchive { id, error }); - } - }; - let caboose = match archive.read_caboose().map_err(Box::new) { - Ok(caboose) => caboose, - Err(error) => { - return Err(RepositoryError::ReadHubrisCaboose { id, error }); - } - }; - let sign = match caboose.sign() { - Ok(sign) => sign, - Err(error) => { - return Err(RepositoryError::ReadHubrisCabooseBoard { id, error }); - } - }; - Ok((id, sign.to_vec())) -} - // This function takes and returns `id` to avoid an unnecessary clone; `id` will // be present in either the Ok tuple or the error. fn read_hubris_board_from_archive( @@ -973,11 +895,11 @@ mod tests { tarball: Bytes, } - fn make_random_rot_image(sign: &str, board: &str) -> RandomRotImage { + fn make_random_rot_image() -> RandomRotImage { use tufaceous_lib::CompositeRotArchiveBuilder; - let archive_a = make_fake_rot_image(sign, board); - let archive_b = make_fake_rot_image(sign, board); + let archive_a = make_random_bytes(); + let archive_b = make_random_bytes(); let mut builder = CompositeRotArchiveBuilder::new(Vec::new(), MtimeSource::Zero) @@ -1004,22 +926,6 @@ mod tests { } } - fn make_fake_rot_image(sign: &str, board: &str) -> Vec { - use hubtools::{CabooseBuilder, HubrisArchiveBuilder}; - - let caboose = CabooseBuilder::default() - .git_commit("this-is-fake-data") - .board(board) - .version("0.0.0") - .name("rot-bord") - .sign(sign) - .build(); - - let mut builder = HubrisArchiveBuilder::with_fake_image(); - builder.write_caboose(caboose.as_slice()).unwrap(); - builder.build_to_vec().unwrap() - } - fn make_fake_sp_image(board: &str) -> Vec { use hubtools::{CabooseBuilder, HubrisArchiveBuilder}; @@ -1035,288 +941,6 @@ mod tests { builder.build_to_vec().unwrap() } - #[tokio::test(flavor = "multi_thread", worker_threads = 2)] - async fn test_bad_rot_versions() { - const VERSION_0: SemverVersion = SemverVersion::new(0, 0, 0); - const VERSION_1: SemverVersion = SemverVersion::new(0, 0, 1); - - let logctx = test_setup_log("test_multi_rot_version"); - - let mut plan_builder = - UpdatePlanBuilder::new(VERSION_0, &logctx.log).unwrap(); - - // The control plane artifact can be arbitrary bytes; just populate it - // with random data. - { - let kind = KnownArtifactKind::ControlPlane; - let data = make_random_bytes(); - let hash = ArtifactHash(Sha256::digest(&data).into()); - let id = ArtifactId { - name: format!("{kind:?}"), - version: VERSION_0, - kind: kind.into(), - }; - plan_builder - .add_artifact( - id, - hash, - futures::stream::iter([Ok(Bytes::from(data))]), - ) - .await - .unwrap(); - } - - // For each SP image, we'll insert two artifacts: these should end up in - // the update plan's SP image maps keyed by their "board". Normally the - // board is read from the archive itself via hubtools; we'll inject a - // test function that returns the artifact ID name as the board instead. - for (kind, boards) in [ - (KnownArtifactKind::GimletSp, ["test-gimlet-a", "test-gimlet-b"]), - (KnownArtifactKind::PscSp, ["test-psc-a", "test-psc-b"]), - (KnownArtifactKind::SwitchSp, ["test-switch-a", "test-switch-b"]), - ] { - for board in boards { - let data = make_fake_sp_image(board); - let hash = ArtifactHash(Sha256::digest(&data).into()); - let id = ArtifactId { - name: board.to_string(), - version: VERSION_0, - kind: kind.into(), - }; - plan_builder - .add_artifact( - id, - hash, - futures::stream::iter([Ok(Bytes::from(data))]), - ) - .await - .unwrap(); - } - } - - // The Host, Trampoline, and RoT artifacts must be structed the way we - // expect (i.e., .tar.gz's containing multiple inner artifacts). - let host = make_random_host_os_image(); - let trampoline = make_random_host_os_image(); - - for (kind, image) in [ - (KnownArtifactKind::Host, &host), - (KnownArtifactKind::Trampoline, &trampoline), - ] { - let data = &image.tarball; - let hash = ArtifactHash(Sha256::digest(data).into()); - let id = ArtifactId { - name: format!("{kind:?}"), - version: VERSION_0, - kind: kind.into(), - }; - plan_builder - .add_artifact( - id, - hash, - futures::stream::iter([Ok(data.clone())]), - ) - .await - .unwrap(); - } - - let gimlet_rot = make_random_rot_image("gimlet", "gimlet"); - let psc_rot = make_random_rot_image("psc", "psc"); - let sidecar_rot = make_random_rot_image("sidecar", "sidecar"); - - let gimlet_rot_2 = make_random_rot_image("gimlet", "gimlet-the second"); - - for (kind, artifact) in [ - (KnownArtifactKind::GimletRot, &gimlet_rot), - (KnownArtifactKind::PscRot, &psc_rot), - (KnownArtifactKind::SwitchRot, &sidecar_rot), - ] { - let data = &artifact.tarball; - let hash = ArtifactHash(Sha256::digest(data).into()); - let id = ArtifactId { - name: format!("{kind:?}"), - version: VERSION_0, - kind: kind.into(), - }; - plan_builder - .add_artifact( - id, - hash, - futures::stream::iter([Ok(data.clone())]), - ) - .await - .unwrap(); - } - - let bad_kind = KnownArtifactKind::GimletRot; - let data = &gimlet_rot_2.tarball; - let hash = ArtifactHash(Sha256::digest(data).into()); - let id = ArtifactId { - name: format!("{bad_kind:?}"), - version: VERSION_1, - kind: bad_kind.into(), - }; - plan_builder - .add_artifact(id, hash, futures::stream::iter([Ok(data.clone())])) - .await - .unwrap(); - - match plan_builder.build() { - Err(_) => (), - Ok(_) => panic!("Added two artifacts with the same version"), - } - logctx.cleanup_successful(); - } - - #[tokio::test(flavor = "multi_thread", worker_threads = 2)] - async fn test_multi_rot_version() { - const VERSION_0: SemverVersion = SemverVersion::new(0, 0, 0); - const VERSION_1: SemverVersion = SemverVersion::new(0, 0, 1); - - let logctx = test_setup_log("test_multi_rot_version"); - - let mut plan_builder = - UpdatePlanBuilder::new("0.0.0".parse().unwrap(), &logctx.log) - .unwrap(); - - // The control plane artifact can be arbitrary bytes; just populate it - // with random data. - { - let kind = KnownArtifactKind::ControlPlane; - let data = make_random_bytes(); - let hash = ArtifactHash(Sha256::digest(&data).into()); - let id = ArtifactId { - name: format!("{kind:?}"), - version: VERSION_0, - kind: kind.into(), - }; - plan_builder - .add_artifact( - id, - hash, - futures::stream::iter([Ok(Bytes::from(data))]), - ) - .await - .unwrap(); - } - - // For each SP image, we'll insert two artifacts: these should end up in - // the update plan's SP image maps keyed by their "board". Normally the - // board is read from the archive itself via hubtools; we'll inject a - // test function that returns the artifact ID name as the board instead. - for (kind, boards) in [ - (KnownArtifactKind::GimletSp, ["test-gimlet-a", "test-gimlet-b"]), - (KnownArtifactKind::PscSp, ["test-psc-a", "test-psc-b"]), - (KnownArtifactKind::SwitchSp, ["test-switch-a", "test-switch-b"]), - ] { - for board in boards { - let data = make_fake_sp_image(board); - let hash = ArtifactHash(Sha256::digest(&data).into()); - let id = ArtifactId { - name: board.to_string(), - version: VERSION_0, - kind: kind.into(), - }; - plan_builder - .add_artifact( - id, - hash, - futures::stream::iter([Ok(Bytes::from(data))]), - ) - .await - .unwrap(); - } - } - - // The Host, Trampoline, and RoT artifacts must be structed the way we - // expect (i.e., .tar.gz's containing multiple inner artifacts). - let host = make_random_host_os_image(); - let trampoline = make_random_host_os_image(); - - for (kind, image) in [ - (KnownArtifactKind::Host, &host), - (KnownArtifactKind::Trampoline, &trampoline), - ] { - let data = &image.tarball; - let hash = ArtifactHash(Sha256::digest(data).into()); - let id = ArtifactId { - name: format!("{kind:?}"), - version: VERSION_0, - kind: kind.into(), - }; - plan_builder - .add_artifact( - id, - hash, - futures::stream::iter([Ok(data.clone())]), - ) - .await - .unwrap(); - } - - let gimlet_rot = make_random_rot_image("gimlet", "gimlet"); - let psc_rot = make_random_rot_image("psc", "psc"); - let sidecar_rot = make_random_rot_image("sidecar", "sidecar"); - - let gimlet_rot_2 = make_random_rot_image("gimlet2", "gimlet"); - let psc_rot_2 = make_random_rot_image("psc2", "psc"); - let sidecar_rot_2 = make_random_rot_image("sidecar2", "sidecar"); - - for (kind, artifact) in [ - (KnownArtifactKind::GimletRot, &gimlet_rot), - (KnownArtifactKind::PscRot, &psc_rot), - (KnownArtifactKind::SwitchRot, &sidecar_rot), - ] { - let data = &artifact.tarball; - let hash = ArtifactHash(Sha256::digest(data).into()); - let id = ArtifactId { - name: format!("{kind:?}"), - version: VERSION_0, - kind: kind.into(), - }; - plan_builder - .add_artifact( - id, - hash, - futures::stream::iter([Ok(data.clone())]), - ) - .await - .unwrap(); - } - - for (kind, artifact) in [ - (KnownArtifactKind::GimletRot, &gimlet_rot_2), - (KnownArtifactKind::PscRot, &psc_rot_2), - (KnownArtifactKind::SwitchRot, &sidecar_rot_2), - ] { - let data = &artifact.tarball; - let hash = ArtifactHash(Sha256::digest(data).into()); - let id = ArtifactId { - name: format!("{kind:?}"), - version: VERSION_1, - kind: kind.into(), - }; - plan_builder - .add_artifact( - id, - hash, - futures::stream::iter([Ok(data.clone())]), - ) - .await - .unwrap(); - } - - let UpdatePlanBuildOutput { plan, .. } = plan_builder.build().unwrap(); - - assert_eq!(plan.gimlet_rot_a.len(), 2); - assert_eq!(plan.gimlet_rot_b.len(), 2); - assert_eq!(plan.psc_rot_a.len(), 2); - assert_eq!(plan.psc_rot_b.len(), 2); - assert_eq!(plan.sidecar_rot_a.len(), 2); - assert_eq!(plan.sidecar_rot_b.len(), 2); - logctx.cleanup_successful(); - } - // See documentation for extract_nested_artifact_pair for why multi_thread // is required. #[tokio::test(flavor = "multi_thread", worker_threads = 2)] @@ -1427,9 +1051,9 @@ mod tests { .unwrap(); } - let gimlet_rot = make_random_rot_image("gimlet", "gimlet"); - let psc_rot = make_random_rot_image("psc", "psc"); - let sidecar_rot = make_random_rot_image("sidecar", "sidecar"); + let gimlet_rot = make_random_rot_image(); + let psc_rot = make_random_rot_image(); + let sidecar_rot = make_random_rot_image(); for (kind, artifact) in [ (KnownArtifactKind::GimletRot, &gimlet_rot), diff --git a/update-common/src/errors.rs b/update-common/src/errors.rs index 3d57fb6ab5..0d65312c56 100644 --- a/update-common/src/errors.rs +++ b/update-common/src/errors.rs @@ -140,14 +140,6 @@ pub enum RepositoryError { "duplicate hash entries found in artifacts.json for kind `{}`, hash `{}`", .0.kind, .0.hash )] DuplicateHashEntry(ArtifactHashId), - #[error("error creating reader stream")] - CreateReaderStream(#[source] anyhow::Error), - #[error("error reading extracted archive kind {}, hash {}", .artifact.kind, .artifact.hash)] - ReadExtractedArchive { - artifact: ArtifactHashId, - #[source] - error: std::io::Error, - }, } impl RepositoryError { @@ -161,9 +153,7 @@ impl RepositoryError { | RepositoryError::TempFileCreate(_) | RepositoryError::TempFileWrite(_) | RepositoryError::TempFileFlush(_) - | RepositoryError::NamedTempFileCreate { .. } - | RepositoryError::ReadExtractedArchive { .. } - | RepositoryError::CreateReaderStream { .. } => { + | RepositoryError::NamedTempFileCreate { .. } => { HttpError::for_unavail(None, message) }