From 06bf42cfd68a5151e643021ea4f81560df16c648 Mon Sep 17 00:00:00 2001 From: Robin Krahl Date: Thu, 7 Mar 2024 21:41:36 +0100 Subject: [PATCH] Extract extensions into crates Previously, this repository contained one crate with all extension definitions and the backend implementation. This is problematic if semantic versioning is used as a breaking change in the backend or in a single extension would also affect all users of any other extension. This patch moves the extensions into separate crates that can be versioned independently so that clients only have to depend on the extension crates they really need. Fixes: https://github.com/trussed-dev/trussed-staging/issues/3 --- Cargo.toml | 45 +- Makefile | 14 +- extensions/chunked/Cargo.toml | 18 + .../mod.rs => extensions/chunked/src/lib.rs | 410 +----------------- .../chunked/src}/utils.rs | 6 +- extensions/manage/Cargo.toml | 14 + extensions/manage/src/lib.rs | 147 +++++++ extensions/wrap-key-to-file/Cargo.toml | 14 + .../wrap-key-to-file/src/lib.rs | 135 +----- src/chunked/mod.rs | 408 +++++++++++++++++ src/{streaming => chunked}/store.rs | 0 src/lib.rs | 14 +- src/manage.rs | 147 +------ src/virt.rs | 17 +- src/wrap_key_to_file.rs | 134 ++++++ tests/chunked.rs | 7 +- tests/encrypted-chunked.rs | 10 +- tests/manage.rs | 3 +- tests/wrap_key_to_file.rs | 2 +- 19 files changed, 828 insertions(+), 717 deletions(-) create mode 100644 extensions/chunked/Cargo.toml rename src/streaming/mod.rs => extensions/chunked/src/lib.rs (55%) rename {src/streaming => extensions/chunked/src}/utils.rs (94%) create mode 100644 extensions/manage/Cargo.toml create mode 100644 extensions/manage/src/lib.rs create mode 100644 extensions/wrap-key-to-file/Cargo.toml rename src/wrap_key_to_file/mod.rs => extensions/wrap-key-to-file/src/lib.rs (53%) create mode 100644 src/chunked/mod.rs rename src/{streaming => chunked}/store.rs (100%) create mode 100644 src/wrap_key_to_file.rs diff --git a/Cargo.toml b/Cargo.toml index e309e0b..c5e92f2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,34 +1,53 @@ # Copyright (C) Nitrokey GmbH # SPDX-License-Identifier: CC0-1.0 -[package] -name = "trussed-staging" -version = "0.1.0" +[workspace] +members = ["extensions/chunked", "extensions/manage", "extensions/wrap-key-to-file"] + +[workspace.package] authors = ["Nitrokey GmbH "] edition = "2021" repository = "https://github.com/trussed-dev/trussed-staging" license = "Apache-2.0 OR MIT" + +[workspace.dependencies] +serde = { version = "1.0.160", default-features = false, features = ["derive"] } +serde-byte-array = "0.1.2" +trussed = { version = "0.1.0", features = ["serde-extensions"] } + +[package] +name = "trussed-staging" +version = "0.1.0" description = "Work in progress trussed features" +authors.workspace = true +edition.workspace = true +repository.workspace = true +license.workspace = true [dependencies] -trussed = { version = "0.1.0", features = ["serde-extensions"] } +serde.workspace = true +serde-byte-array.workspace = true +trussed.workspace = true + chacha20poly1305 = { version = "0.10", default-features = false, features = ["heapless", "reduced-round"], optional = true } -serde = { version = "1.0.160", default-features = false, features = ["derive"] } rand_core = { version = "0.6.4", default-features = false } delog = "0.1.6" littlefs2 = "0.4.0" -serde-byte-array = "0.1.2" + +trussed-chunked = { version = "0.1.0", optional = true } +trussed-manage = { version = "0.1.0", optional = true } +trussed-wrap-key-to-file = { version = "0.1.0", optional = true } [dev-dependencies] -trussed = { version = "0.1.0", default-features = false, features = ["serde-extensions", "virt"] } +trussed = { workspace = true, features = ["virt"] } [features] default = ["manage"] -wrap-key-to-file = ["chacha20poly1305"] -chunked = [] -encrypted-chunked = ["chunked", "chacha20poly1305/stream"] -manage = [] +wrap-key-to-file = ["chacha20poly1305", "trussed-wrap-key-to-file"] +chunked = ["trussed-chunked"] +encrypted-chunked = ["chunked", "chacha20poly1305/stream", "trussed-chunked/encrypted-chunked"] +manage = ["trussed-manage"] virt = ["std", "trussed/virt"] std = [] @@ -44,3 +63,7 @@ log-error = [] [patch.crates-io] trussed = { git = "https://github.com/trussed-dev/trussed.git", rev = "45ed62ba97d994aa6e05e2b61cea013ef131caa4" } littlefs2 = { git = "https://github.com/trussed-dev/littlefs2.git", rev = "ebd27e49ca321089d01d8c9b169c4aeb58ceeeca" } + +trussed-chunked = { path = "extensions/chunked" } +trussed-manage = { path = "extensions/manage" } +trussed-wrap-key-to-file = { path = "extensions/wrap-key-to-file" } diff --git a/Makefile b/Makefile index 834a01b..c181a02 100644 --- a/Makefile +++ b/Makefile @@ -3,18 +3,22 @@ .PHONY: check check: - RUSTLFAGS='-Dwarnings' cargo check --all-features --all-targets + RUSTLFAGS='-Dwarnings' cargo check --all-features --all-targets --workspace + RUSTLFAGS='-Dwarnings' cargo check --no-default-features + RUSTLFAGS='-Dwarnings' cargo check --features encrypted-chunked + RUSTLFAGS='-Dwarnings' cargo check --features manage + RUSTLFAGS='-Dwarnings' cargo check --features wrap-key-to-file .PHONY: lint lint: - cargo clippy --all-features --all-targets -- --deny warnings - cargo fmt -- --check - RUSTDOCFLAGS='-Dwarnings' cargo doc --no-deps --all-features + cargo clippy --all-features --all-targets --workspace -- --deny warnings + cargo fmt --all -- --check + RUSTDOCFLAGS='-Dwarnings' cargo doc --no-deps --all-features --workspace reuse lint .PHONY: test test: - cargo test --all-features + cargo test --all-features --workspace .PHONY: ci ci: check lint test diff --git a/extensions/chunked/Cargo.toml b/extensions/chunked/Cargo.toml new file mode 100644 index 0000000..daf82a5 --- /dev/null +++ b/extensions/chunked/Cargo.toml @@ -0,0 +1,18 @@ +# Copyright (C) Nitrokey GmbH +# SPDX-License-Identifier: CC0-1.0 + +[package] +name = "trussed-chunked" +version = "0.1.0" +authors.workspace = true +edition.workspace = true +repository.workspace = true +license.workspace = true + +[dependencies] +serde.workspace = true +serde-byte-array.workspace = true +trussed.workspace = true + +[features] +encrypted-chunked = [] diff --git a/src/streaming/mod.rs b/extensions/chunked/src/lib.rs similarity index 55% rename from src/streaming/mod.rs rename to extensions/chunked/src/lib.rs index 324be8f..8088d47 100644 --- a/src/streaming/mod.rs +++ b/extensions/chunked/src/lib.rs @@ -1,70 +1,22 @@ // Copyright (C) Nitrokey GmbH // SPDX-License-Identifier: Apache-2.0 or MIT -mod store; -use store::OpenSeekFrom; +#![no_std] +#![warn(non_ascii_idents, trivial_casts, unused, unused_qualifications)] +#![deny(unsafe_code)] #[cfg(feature = "encrypted-chunked")] pub mod utils; -#[cfg(feature = "encrypted-chunked")] -use chacha20poly1305::{ - aead::stream::{DecryptorLE31, EncryptorLE31, Nonce as StreamNonce, StreamLE31}, - ChaCha8Poly1305, KeyInit, -}; -use rand_core::RngCore; use serde::{Deserialize, Serialize}; use serde_byte_array::ByteArray; use trussed::{ client::FilesystemClient, - config::MAX_MESSAGE_LENGTH, - key::{Kind, Secrecy}, - serde_extensions::{Extension, ExtensionClient, ExtensionImpl, ExtensionResult}, - service::{Filestore, Keystore, ServiceResources}, - store::Store, - types::{CoreContext, KeyId, Location, Message, Path, PathBuf, UserAttribute}, - Bytes, Error, + serde_extensions::{Extension, ExtensionClient, ExtensionResult}, + types::{KeyId, Location, Message, PathBuf, UserAttribute}, }; -use crate::StagingContext; - -#[derive(Debug)] -pub struct ChunkedReadState { - pub path: PathBuf, - pub location: Location, - pub offset: usize, -} - -#[derive(Debug)] -pub struct ChunkedWriteState { - pub path: PathBuf, - pub location: Location, -} - -#[cfg(feature = "encrypted-chunked")] -pub struct EncryptedChunkedReadState { - pub path: PathBuf, - pub location: Location, - pub offset: usize, - pub decryptor: DecryptorLE31, -} - -#[cfg(feature = "encrypted-chunked")] -pub struct EncryptedChunkedWriteState { - pub path: PathBuf, - pub location: Location, - pub encryptor: EncryptorLE31, -} - -#[non_exhaustive] -pub enum ChunkedIoState { - Read(ChunkedReadState), - Write(ChunkedWriteState), - #[cfg(feature = "encrypted-chunked")] - EncryptedRead(EncryptedChunkedReadState), - #[cfg(feature = "encrypted-chunked")] - EncryptedWrite(EncryptedChunkedWriteState), -} +pub const CHACHA8_STREAM_NONCE_LEN: usize = 8; #[derive(Debug, Default)] pub struct ChunkedExtension; @@ -106,7 +58,7 @@ pub enum ChunkedReply { AppendFile(reply::AppendFile), } -mod request { +pub mod request { use super::*; use serde::{Deserialize, Serialize}; use serde_byte_array::ByteArray; @@ -319,7 +271,7 @@ mod request { } } -mod reply { +pub mod reply { use super::*; use serde::{Deserialize, Serialize}; use trussed::types::Message; @@ -516,351 +468,7 @@ mod reply { } } -impl ExtensionImpl for super::StagingBackend { - fn extension_request( - &mut self, - core_ctx: &mut CoreContext, - backend_ctx: &mut Self::Context, - request: &ChunkedRequest, - resources: &mut ServiceResources

, - ) -> Result { - let rng = &mut resources.rng()?; - let keystore = &mut resources.keystore(core_ctx.path.clone())?; - let filestore = &mut resources.filestore(core_ctx.path.clone()); - let client_id = &core_ctx.path; - let store = resources.platform_mut().store(); - match request { - ChunkedRequest::ReadChunk(_) => { - let read_state = match &mut backend_ctx.chunked_io_state { - Some(ChunkedIoState::Read(read_state)) => read_state, - #[cfg(feature = "encrypted-chunked")] - Some(ChunkedIoState::EncryptedRead(_)) => { - return read_encrypted_chunk(store, client_id, backend_ctx) - } - _ => return Err(Error::MechanismNotAvailable), - }; - let (data, len) = store::filestore_read_chunk( - store, - client_id, - &read_state.path, - read_state.location, - OpenSeekFrom::Start(read_state.offset as u32), - )?; - - read_state.offset += data.len(); - - Ok(reply::ReadChunk { data, len }.into()) - } - ChunkedRequest::StartChunkedRead(request) => { - clear_chunked_state(store, client_id, backend_ctx)?; - let (data, len) = store::filestore_read_chunk( - store, - client_id, - &request.path, - request.location, - OpenSeekFrom::Start(0), - )?; - backend_ctx.chunked_io_state = Some(ChunkedIoState::Read(ChunkedReadState { - path: request.path.clone(), - location: request.location, - offset: data.len(), - })); - Ok(reply::StartChunkedRead { data, len }.into()) - } - ChunkedRequest::WriteChunk(request) => { - let is_last = !request.data.is_full(); - if is_last { - write_last_chunk(store, client_id, backend_ctx, &request.data)?; - } else { - write_chunk(store, client_id, backend_ctx, &request.data)?; - } - Ok(reply::WriteChunk {}.into()) - } - ChunkedRequest::AbortChunkedWrite(_request) => { - let Some(ChunkedIoState::Write(ref write_state)) = backend_ctx.chunked_io_state - else { - return Ok(reply::AbortChunkedWrite { aborted: false }.into()); - }; - let aborted = store::abort_chunked_write( - store, - client_id, - &write_state.path, - write_state.location, - ); - Ok(reply::AbortChunkedWrite { aborted }.into()) - } - ChunkedRequest::StartChunkedWrite(request) => { - backend_ctx.chunked_io_state = Some(ChunkedIoState::Write(ChunkedWriteState { - path: request.path.clone(), - location: request.location, - })); - store::start_chunked_write(store, client_id, &request.path, request.location, &[])?; - Ok(reply::StartChunkedWrite {}.into()) - } - ChunkedRequest::PartialReadFile(request) => { - let (data, file_length) = store::partial_read_file( - store, - client_id, - &request.path, - request.location, - request.offset, - request.length, - )?; - Ok(reply::PartialReadFile { data, file_length }.into()) - } - ChunkedRequest::AppendFile(request) => { - let file_length = store::append_file( - store, - client_id, - &request.path, - request.location, - &request.data, - )?; - Ok(reply::AppendFile { file_length }.into()) - } - #[cfg(feature = "encrypted-chunked")] - ChunkedRequest::StartEncryptedChunkedWrite(request) => { - clear_chunked_state(store, client_id, backend_ctx)?; - let key = keystore.load_key( - Secrecy::Secret, - Some(Kind::Symmetric(CHACHA8_KEY_LEN)), - &request.key, - )?; - let nonce = request.nonce.map(|n| *n).unwrap_or_else(|| { - let mut nonce = [0; CHACHA8_STREAM_NONCE_LEN]; - rng.fill_bytes(&mut nonce); - nonce - }); - let nonce: &StreamNonce> = - (&nonce).into(); - let aead = ChaCha8Poly1305::new((&*key.material).into()); - let encryptor = EncryptorLE31::::from_aead(aead, nonce); - store::start_chunked_write( - store, - client_id, - &request.path, - request.location, - nonce, - )?; - backend_ctx.chunked_io_state = - Some(ChunkedIoState::EncryptedWrite(EncryptedChunkedWriteState { - path: request.path.clone(), - location: request.location, - encryptor, - })); - Ok(reply::StartEncryptedChunkedWrite {}.into()) - } - #[cfg(feature = "encrypted-chunked")] - ChunkedRequest::StartEncryptedChunkedRead(request) => { - clear_chunked_state(store, client_id, backend_ctx)?; - let key = keystore.load_key( - Secrecy::Secret, - Some(Kind::Symmetric(CHACHA8_KEY_LEN)), - &request.key, - )?; - let nonce: Bytes = - filestore.read(&request.path, request.location)?; - let nonce: &StreamNonce> = - (&**nonce).into(); - let aead = ChaCha8Poly1305::new((&*key.material).into()); - let decryptor = DecryptorLE31::::from_aead(aead, nonce); - backend_ctx.chunked_io_state = - Some(ChunkedIoState::EncryptedRead(EncryptedChunkedReadState { - path: request.path.clone(), - location: request.location, - decryptor, - offset: CHACHA8_STREAM_NONCE_LEN, - })); - Ok(reply::StartEncryptedChunkedRead {}.into()) - } - } - } -} - -fn clear_chunked_state( - store: impl Store, - client_id: &Path, - ctx: &mut StagingContext, -) -> Result<(), Error> { - match ctx.chunked_io_state.take() { - Some(ChunkedIoState::Read(_)) | None => {} - Some(ChunkedIoState::Write(write_state)) => { - info!("Automatically cancelling write"); - store::abort_chunked_write(store, client_id, &write_state.path, write_state.location); - } - #[cfg(feature = "encrypted-chunked")] - Some(ChunkedIoState::EncryptedRead(_)) => {} - #[cfg(feature = "encrypted-chunked")] - Some(ChunkedIoState::EncryptedWrite(write_state)) => { - info!("Automatically cancelling encrypted write"); - store::abort_chunked_write(store, client_id, &write_state.path, write_state.location); - } - } - Ok(()) -} - -fn write_chunk( - store: impl Store, - client_id: &Path, - ctx: &mut StagingContext, - data: &Message, -) -> Result<(), Error> { - match ctx.chunked_io_state { - Some(ChunkedIoState::Write(ref write_state)) => { - store::filestore_write_chunk( - store, - client_id, - &write_state.path, - write_state.location, - data, - )?; - } - #[cfg(feature = "encrypted-chunked")] - Some(ChunkedIoState::EncryptedWrite(ref mut write_state)) => { - let mut data = - Bytes::<{ MAX_MESSAGE_LENGTH + POLY1305_TAG_LEN }>::from_slice(data).unwrap(); - write_state - .encryptor - .encrypt_next_in_place(write_state.path.as_ref().as_bytes(), &mut *data) - .map_err(|_err| { - error!("Failed to encrypt {:?}", _err); - Error::AeadError - })?; - store::filestore_write_chunk( - store, - client_id, - &write_state.path, - write_state.location, - &data, - )?; - } - _ => return Err(Error::MechanismNotAvailable), - } - Ok(()) -} - -fn write_last_chunk( - store: impl Store, - client_id: &Path, - ctx: &mut StagingContext, - data: &Message, -) -> Result<(), Error> { - match ctx.chunked_io_state.take() { - Some(ChunkedIoState::Write(write_state)) => { - store::filestore_write_chunk( - store, - client_id, - &write_state.path, - write_state.location, - data, - )?; - store::flush_chunks(store, client_id, &write_state.path, write_state.location)?; - } - #[cfg(feature = "encrypted-chunked")] - Some(ChunkedIoState::EncryptedWrite(write_state)) => { - let mut data = - Bytes::<{ MAX_MESSAGE_LENGTH + POLY1305_TAG_LEN }>::from_slice(data).unwrap(); - write_state - .encryptor - .encrypt_last_in_place(&[write_state.location as u8], &mut *data) - .map_err(|_err| { - error!("Failed to encrypt {:?}", _err); - Error::AeadError - })?; - store::filestore_write_chunk( - store, - client_id, - &write_state.path, - write_state.location, - &data, - )?; - store::flush_chunks(store, client_id, &write_state.path, write_state.location)?; - } - _ => return Err(Error::MechanismNotAvailable), - } - - Ok(()) -} - -#[cfg(feature = "encrypted-chunked")] -fn read_encrypted_chunk( - store: impl Store, - client_id: &Path, - ctx: &mut StagingContext, -) -> Result { - let Some(ChunkedIoState::EncryptedRead(ref mut read_state)) = ctx.chunked_io_state else { - unreachable!( - "Read encrypted chunk can only be called in the context encrypted chunk reads" - ); - }; - let (mut data, len): (Bytes<{ MAX_MESSAGE_LENGTH + POLY1305_TAG_LEN }>, usize) = - store::filestore_read_chunk( - store, - client_id, - &read_state.path, - read_state.location, - OpenSeekFrom::Start(read_state.offset as _), - )?; - read_state.offset += data.len(); - - let is_last = !data.is_full(); - if is_last { - let Some(ChunkedIoState::EncryptedRead(read_state)) = ctx.chunked_io_state.take() else { - unreachable!(); - }; - - read_state - .decryptor - .decrypt_last_in_place(&[read_state.location as u8], &mut *data) - .map_err(|_err| { - error!("Failed to decrypt {:?}", _err); - Error::AeadError - })?; - let data = Bytes::from_slice(&data).expect("decryptor removes the tag"); - Ok(reply::ReadChunk { - data, - len: chunked_decrypted_len(len)?, - } - .into()) - } else { - read_state - .decryptor - .decrypt_next_in_place(read_state.path.as_ref().as_bytes(), &mut *data) - .map_err(|_err| { - error!("Failed to decrypt {:?}", _err); - Error::AeadError - })?; - let data = Bytes::from_slice(&data).expect("decryptor removes the tag"); - Ok(reply::ReadChunk { - data, - len: chunked_decrypted_len(len)?, - } - .into()) - } -} - -pub const POLY1305_TAG_LEN: usize = 16; -pub const CHACHA8_KEY_LEN: usize = 32; -pub const CHACHA8_STREAM_NONCE_LEN: usize = 8; -/// Calculate the decrypted length of a chunked encrypted file -fn chunked_decrypted_len(len: usize) -> Result { - let len = len.checked_sub(CHACHA8_STREAM_NONCE_LEN).ok_or_else(|| { - error!("File too small"); - Error::FilesystemReadFailure - })?; - const CHUNK_LEN: usize = POLY1305_TAG_LEN + MAX_MESSAGE_LENGTH; - let chunk_count = len / CHUNK_LEN; - let last_chunk_len = (len % CHUNK_LEN) - .checked_sub(POLY1305_TAG_LEN) - .ok_or_else(|| { - error!("Incorrect last chunk length"); - Error::FilesystemReadFailure - })?; - - Ok(chunk_count * MAX_MESSAGE_LENGTH + last_chunk_len) -} - -type ChunkedResult<'a, R, C> = ExtensionResult<'a, ChunkedExtension, R, C>; +pub type ChunkedResult<'a, R, C> = ExtensionResult<'a, ChunkedExtension, R, C>; pub trait ChunkedClient: ExtensionClient + FilesystemClient { /// Begin writing a file that can be larger than 1KiB diff --git a/src/streaming/utils.rs b/extensions/chunked/src/utils.rs similarity index 94% rename from src/streaming/utils.rs rename to extensions/chunked/src/utils.rs index 7c5a46b..24e129c 100644 --- a/src/streaming/utils.rs +++ b/extensions/chunked/src/utils.rs @@ -1,16 +1,14 @@ // Copyright (C) Nitrokey GmbH // SPDX-License-Identifier: Apache-2.0 or MIT -use littlefs2::path::PathBuf; use serde_byte_array::ByteArray; - use trussed::{ syscall, try_syscall, - types::{KeyId, Location, Message, UserAttribute}, + types::{KeyId, Location, Message, PathBuf, UserAttribute}, Error, }; -use super::{ChunkedClient, CHACHA8_STREAM_NONCE_LEN}; +use crate::{ChunkedClient, CHACHA8_STREAM_NONCE_LEN}; #[derive(Clone, Copy)] pub struct EncryptionData { diff --git a/extensions/manage/Cargo.toml b/extensions/manage/Cargo.toml new file mode 100644 index 0000000..0e29912 --- /dev/null +++ b/extensions/manage/Cargo.toml @@ -0,0 +1,14 @@ +# Copyright (C) Nitrokey GmbH +# SPDX-License-Identifier: CC0-1.0 + +[package] +name = "trussed-manage" +version = "0.1.0" +authors.workspace = true +edition.workspace = true +repository.workspace = true +license.workspace = true + +[dependencies] +serde.workspace = true +trussed.workspace = true diff --git a/extensions/manage/src/lib.rs b/extensions/manage/src/lib.rs new file mode 100644 index 0000000..4081ef2 --- /dev/null +++ b/extensions/manage/src/lib.rs @@ -0,0 +1,147 @@ +// Copyright (C) Nitrokey GmbH +// SPDX-License-Identifier: Apache-2.0 or MIT + +#![no_std] +#![warn(non_ascii_idents, trivial_casts, unused, unused_qualifications)] +#![deny(unsafe_code)] + +use serde::{Deserialize, Serialize}; +use trussed::{ + serde_extensions::{Extension, ExtensionClient, ExtensionResult}, + types::{Path, PathBuf}, + Error, +}; + +pub struct ManageExtension; + +/// Factory reset the entire device +/// +/// This will reset all filesystems +#[derive(Debug, Deserialize, Serialize, Copy, Clone)] +pub struct FactoryResetDeviceRequest; + +/// Factory reset a specific application +/// +/// This will reset all data for a specific client +#[derive(Debug, Deserialize, Serialize, Clone)] +pub struct FactoryResetClientRequest { + pub client: PathBuf, +} + +#[allow(clippy::large_enum_variant)] +#[derive(Debug, Deserialize, Serialize, Clone)] +pub enum ManageRequest { + FactoryResetDevice(FactoryResetDeviceRequest), + FactoryResetClient(FactoryResetClientRequest), +} + +impl From for ManageRequest { + fn from(value: FactoryResetClientRequest) -> Self { + Self::FactoryResetClient(value) + } +} + +impl TryFrom for FactoryResetClientRequest { + type Error = Error; + fn try_from(value: ManageRequest) -> Result { + match value { + ManageRequest::FactoryResetClient(v) => Ok(v), + _ => Err(Error::InternalError), + } + } +} + +impl From for ManageRequest { + fn from(value: FactoryResetDeviceRequest) -> Self { + Self::FactoryResetDevice(value) + } +} + +impl TryFrom for FactoryResetDeviceRequest { + type Error = Error; + fn try_from(value: ManageRequest) -> Result { + match value { + ManageRequest::FactoryResetDevice(v) => Ok(v), + _ => Err(Error::InternalError), + } + } +} + +/// Factory reset the entire device +/// +/// This will reset all filesystems +#[derive(Debug, Deserialize, Serialize, Copy, Clone)] +pub struct FactoryResetDeviceReply; + +/// Factory reset a specific application +/// +/// This will reset all data for a specific client +#[derive(Debug, Deserialize, Serialize, Clone)] +pub struct FactoryResetClientReply; + +#[derive(Debug, Deserialize, Serialize, Clone)] +pub enum ManageReply { + FactoryResetDevice(FactoryResetDeviceReply), + FactoryResetClient(FactoryResetClientReply), +} + +impl From for ManageReply { + fn from(value: FactoryResetClientReply) -> Self { + Self::FactoryResetClient(value) + } +} + +impl TryFrom for FactoryResetClientReply { + type Error = Error; + fn try_from(value: ManageReply) -> Result { + match value { + ManageReply::FactoryResetClient(v) => Ok(v), + _ => Err(Error::InternalError), + } + } +} + +impl From for ManageReply { + fn from(value: FactoryResetDeviceReply) -> Self { + Self::FactoryResetDevice(value) + } +} + +impl TryFrom for FactoryResetDeviceReply { + type Error = Error; + fn try_from(value: ManageReply) -> Result { + match value { + ManageReply::FactoryResetDevice(v) => Ok(v), + _ => Err(Error::InternalError), + } + } +} + +impl Extension for ManageExtension { + type Request = ManageRequest; + type Reply = ManageReply; +} + +pub type ManageResult<'a, R, C> = ExtensionResult<'a, ManageExtension, R, C>; + +pub trait ManageClient: ExtensionClient { + /// Factory reset the entire device + /// + /// This will reset all filesystems + fn factory_reset_device(&mut self) -> ManageResult<'_, FactoryResetDeviceReply, Self> { + self.extension(FactoryResetDeviceRequest) + } + + /// Factory reset the entire client + /// + fn factory_reset_client( + &mut self, + client: &Path, + ) -> ManageResult<'_, FactoryResetClientReply, Self> { + self.extension(FactoryResetClientRequest { + client: client.into(), + }) + } +} + +impl> ManageClient for C {} diff --git a/extensions/wrap-key-to-file/Cargo.toml b/extensions/wrap-key-to-file/Cargo.toml new file mode 100644 index 0000000..4ecb4cd --- /dev/null +++ b/extensions/wrap-key-to-file/Cargo.toml @@ -0,0 +1,14 @@ +# Copyright (C) Nitrokey GmbH +# SPDX-License-Identifier: CC0-1.0 + +[package] +name = "trussed-wrap-key-to-file" +version = "0.1.0" +authors.workspace = true +edition.workspace = true +repository.workspace = true +license.workspace = true + +[dependencies] +serde.workspace = true +trussed.workspace = true diff --git a/src/wrap_key_to_file/mod.rs b/extensions/wrap-key-to-file/src/lib.rs similarity index 53% rename from src/wrap_key_to_file/mod.rs rename to extensions/wrap-key-to-file/src/lib.rs index 74acc77..8b49ada 100644 --- a/src/wrap_key_to_file/mod.rs +++ b/extensions/wrap-key-to-file/src/lib.rs @@ -1,23 +1,17 @@ // Copyright (C) Nitrokey GmbH // SPDX-License-Identifier: Apache-2.0 or MIT +#![no_std] +#![warn(non_ascii_idents, trivial_casts, unused, unused_qualifications)] +#![deny(unsafe_code)] + use serde::{Deserialize, Serialize}; use trussed::{ client::ClientError, - config::MAX_SERIALIZED_KEY_LENGTH, - key::{self, Kind, Secrecy}, - serde_extensions::{Extension, ExtensionClient, ExtensionImpl, ExtensionResult}, - service::{Filestore, Keystore, ServiceResources}, - types::{Bytes, CoreContext, GenericArray, KeyId, Location, Mechanism, PathBuf}, - Error, + serde_extensions::{Extension, ExtensionClient, ExtensionResult}, + types::{Bytes, KeyId, Location, Mechanism, PathBuf}, }; -const NONCE_LEN: usize = 12; -const KEY_LEN: usize = 32; -const TAG_LEN: usize = 16; -const KIND: Kind = Kind::Symmetric(KEY_LEN); -const WRAPPED_TO_FILE_LEN: usize = MAX_SERIALIZED_KEY_LENGTH + NONCE_LEN + TAG_LEN; - #[derive(Debug, Default)] pub struct WrapKeyToFileExtension; @@ -101,7 +95,6 @@ pub mod reply { use super::*; #[derive(Debug, Deserialize, Serialize, Default)] - #[non_exhaustive] pub struct WrapKeyToFile {} impl TryFrom for WrapKeyToFile { @@ -147,121 +140,7 @@ impl Extension for WrapKeyToFileExtension { type Reply = WrapKeyToFileReply; } -pub fn wrap_key_to_file( - keystore: &mut impl Keystore, - filestore: &mut impl Filestore, - request: &request::WrapKeyToFile, -) -> Result { - if !matches!( - request.mechanism, - trussed::types::Mechanism::Chacha8Poly1305 - ) { - return Err(Error::MechanismInvalid); - } - - use chacha20poly1305::aead::{AeadMutInPlace, KeyInit}; - use chacha20poly1305::ChaCha8Poly1305; - use rand_core::RngCore as _; - - let serialized_key = keystore.load_key(Secrecy::Secret, None, &request.key)?; - - let mut data = Bytes::::from_slice(&serialized_key.serialize()).unwrap(); - let material_len = data.len(); - data.resize_default(material_len + NONCE_LEN).unwrap(); - let (material, nonce) = data.split_at_mut(material_len); - keystore.rng().fill_bytes(nonce); - let nonce = (&*nonce).try_into().unwrap(); - - let key = keystore.load_key(Secrecy::Secret, Some(KIND), &request.wrapping_key)?; - let chachakey: [u8; KEY_LEN] = (&*key.material).try_into().unwrap(); - let mut aead = ChaCha8Poly1305::new(&GenericArray::clone_from_slice(&chachakey)); - let tag = aead - .encrypt_in_place_detached( - <&GenericArray<_, _> as From<&[u8; NONCE_LEN]>>::from(nonce), - &request.associated_data, - material, - ) - .unwrap(); - data.extend_from_slice(&tag).unwrap(); - filestore.write(&request.path, request.location, &data)?; - Ok(reply::WrapKeyToFile {}) -} - -pub fn unwrap_key_from_file( - keystore: &mut impl Keystore, - filestore: &mut impl Filestore, - request: &request::UnwrapKeyFromFile, -) -> Result { - if !matches!( - request.mechanism, - trussed::types::Mechanism::Chacha8Poly1305 - ) { - return Err(Error::MechanismInvalid); - } - - use chacha20poly1305::aead::{AeadMutInPlace, KeyInit}; - use chacha20poly1305::ChaCha8Poly1305; - let mut data: Bytes = - filestore.read(&request.path, request.file_location)?; - - let data_len = data.len(); - if data_len < TAG_LEN + NONCE_LEN { - error!("Attempt to unwrap file that doesn't contain a key"); - return Err(Error::InvalidSerializedKey); - } - let (tmp, tag) = data.split_at_mut(data_len - TAG_LEN); - let tmp_len = tmp.len(); - let (material, nonce) = tmp.split_at_mut(tmp_len - NONCE_LEN); - - // Coerce to array - let nonce = (&*nonce).try_into().unwrap(); - let tag = (&*tag).try_into().unwrap(); - - let key = keystore.load_key(key::Secrecy::Secret, Some(KIND), &request.key)?; - let chachakey: [u8; KEY_LEN] = (&*key.material).try_into().unwrap(); - let mut aead = ChaCha8Poly1305::new(&GenericArray::clone_from_slice(&chachakey)); - if aead - .decrypt_in_place_detached( - <&GenericArray<_, _> as From<&[u8; NONCE_LEN]>>::from(nonce), - &request.associated_data, - material, - <&GenericArray<_, _> as From<&[u8; TAG_LEN]>>::from(tag), - ) - .is_err() - { - return Ok(reply::UnwrapKeyFromFile { key: None }); - } - let key = key::Key::try_deserialize(material)?; - let info = key::Info { - flags: key.flags, - kind: key.kind, - }; - let key = keystore.store_key(request.key_location, Secrecy::Secret, info, &key.material)?; - Ok(reply::UnwrapKeyFromFile { key: Some(key) }) -} - -impl ExtensionImpl for super::StagingBackend { - fn extension_request( - &mut self, - core_ctx: &mut CoreContext, - _backend_ctx: &mut Self::Context, - request: &WrapKeyToFileRequest, - resources: &mut ServiceResources

, - ) -> Result { - let keystore = &mut resources.keystore(core_ctx.path.clone())?; - let filestore = &mut resources.filestore(core_ctx.path.clone()); - match request { - WrapKeyToFileRequest::WrapKeyToFile(request) => { - wrap_key_to_file(keystore, filestore, request).map(Into::into) - } - WrapKeyToFileRequest::UnwrapKeyFromFile(request) => { - unwrap_key_from_file(keystore, filestore, request).map(Into::into) - } - } - } -} - -type WrapKeyToFileResult<'a, R, C> = ExtensionResult<'a, WrapKeyToFileExtension, R, C>; +pub type WrapKeyToFileResult<'a, R, C> = ExtensionResult<'a, WrapKeyToFileExtension, R, C>; pub trait WrapKeyToFileClient: ExtensionClient { /// Wrap a key to a file diff --git a/src/chunked/mod.rs b/src/chunked/mod.rs new file mode 100644 index 0000000..2d3f5e7 --- /dev/null +++ b/src/chunked/mod.rs @@ -0,0 +1,408 @@ +// Copyright (C) Nitrokey GmbH +// SPDX-License-Identifier: Apache-2.0 or MIT + +mod store; +use store::OpenSeekFrom; + +#[cfg(feature = "encrypted-chunked")] +use chacha20poly1305::{ + aead::stream::{DecryptorLE31, EncryptorLE31, Nonce as StreamNonce, StreamLE31}, + ChaCha8Poly1305, KeyInit, +}; +use rand_core::RngCore; +use trussed::{ + config::MAX_MESSAGE_LENGTH, + key::{Kind, Secrecy}, + serde_extensions::ExtensionImpl, + service::{Filestore, Keystore, ServiceResources}, + store::Store, + types::{CoreContext, Location, Message, Path, PathBuf}, + Bytes, Error, +}; +use trussed_chunked::{ + reply, ChunkedExtension, ChunkedReply, ChunkedRequest, CHACHA8_STREAM_NONCE_LEN, +}; + +use crate::StagingContext; + +const POLY1305_TAG_LEN: usize = 16; +const CHACHA8_KEY_LEN: usize = 32; + +#[derive(Debug)] +pub struct ChunkedReadState { + pub path: PathBuf, + pub location: Location, + pub offset: usize, +} + +#[derive(Debug)] +pub struct ChunkedWriteState { + pub path: PathBuf, + pub location: Location, +} + +#[cfg(feature = "encrypted-chunked")] +pub struct EncryptedChunkedReadState { + pub path: PathBuf, + pub location: Location, + pub offset: usize, + pub decryptor: DecryptorLE31, +} + +#[cfg(feature = "encrypted-chunked")] +pub struct EncryptedChunkedWriteState { + pub path: PathBuf, + pub location: Location, + pub encryptor: EncryptorLE31, +} + +#[non_exhaustive] +pub enum ChunkedIoState { + Read(ChunkedReadState), + Write(ChunkedWriteState), + #[cfg(feature = "encrypted-chunked")] + EncryptedRead(EncryptedChunkedReadState), + #[cfg(feature = "encrypted-chunked")] + EncryptedWrite(EncryptedChunkedWriteState), +} + +impl ExtensionImpl for super::StagingBackend { + fn extension_request( + &mut self, + core_ctx: &mut CoreContext, + backend_ctx: &mut Self::Context, + request: &ChunkedRequest, + resources: &mut ServiceResources

, + ) -> Result { + let rng = &mut resources.rng()?; + let keystore = &mut resources.keystore(core_ctx.path.clone())?; + let filestore = &mut resources.filestore(core_ctx.path.clone()); + let client_id = &core_ctx.path; + let store = resources.platform_mut().store(); + match request { + ChunkedRequest::ReadChunk(_) => { + let read_state = match &mut backend_ctx.chunked_io_state { + Some(ChunkedIoState::Read(read_state)) => read_state, + #[cfg(feature = "encrypted-chunked")] + Some(ChunkedIoState::EncryptedRead(_)) => { + return read_encrypted_chunk(store, client_id, backend_ctx) + } + _ => return Err(Error::MechanismNotAvailable), + }; + let (data, len) = store::filestore_read_chunk( + store, + client_id, + &read_state.path, + read_state.location, + OpenSeekFrom::Start(read_state.offset as u32), + )?; + + read_state.offset += data.len(); + + Ok(reply::ReadChunk { data, len }.into()) + } + ChunkedRequest::StartChunkedRead(request) => { + clear_chunked_state(store, client_id, backend_ctx)?; + let (data, len) = store::filestore_read_chunk( + store, + client_id, + &request.path, + request.location, + OpenSeekFrom::Start(0), + )?; + backend_ctx.chunked_io_state = Some(ChunkedIoState::Read(ChunkedReadState { + path: request.path.clone(), + location: request.location, + offset: data.len(), + })); + Ok(reply::StartChunkedRead { data, len }.into()) + } + ChunkedRequest::WriteChunk(request) => { + let is_last = !request.data.is_full(); + if is_last { + write_last_chunk(store, client_id, backend_ctx, &request.data)?; + } else { + write_chunk(store, client_id, backend_ctx, &request.data)?; + } + Ok(reply::WriteChunk {}.into()) + } + ChunkedRequest::AbortChunkedWrite(_request) => { + let Some(ChunkedIoState::Write(ref write_state)) = backend_ctx.chunked_io_state + else { + return Ok(reply::AbortChunkedWrite { aborted: false }.into()); + }; + let aborted = store::abort_chunked_write( + store, + client_id, + &write_state.path, + write_state.location, + ); + Ok(reply::AbortChunkedWrite { aborted }.into()) + } + ChunkedRequest::StartChunkedWrite(request) => { + backend_ctx.chunked_io_state = Some(ChunkedIoState::Write(ChunkedWriteState { + path: request.path.clone(), + location: request.location, + })); + store::start_chunked_write(store, client_id, &request.path, request.location, &[])?; + Ok(reply::StartChunkedWrite {}.into()) + } + ChunkedRequest::PartialReadFile(request) => { + let (data, file_length) = store::partial_read_file( + store, + client_id, + &request.path, + request.location, + request.offset, + request.length, + )?; + Ok(reply::PartialReadFile { data, file_length }.into()) + } + ChunkedRequest::AppendFile(request) => { + let file_length = store::append_file( + store, + client_id, + &request.path, + request.location, + &request.data, + )?; + Ok(reply::AppendFile { file_length }.into()) + } + #[cfg(feature = "encrypted-chunked")] + ChunkedRequest::StartEncryptedChunkedWrite(request) => { + clear_chunked_state(store, client_id, backend_ctx)?; + let key = keystore.load_key( + Secrecy::Secret, + Some(Kind::Symmetric(CHACHA8_KEY_LEN)), + &request.key, + )?; + let nonce = request.nonce.map(|n| *n).unwrap_or_else(|| { + let mut nonce = [0; CHACHA8_STREAM_NONCE_LEN]; + rng.fill_bytes(&mut nonce); + nonce + }); + let nonce: &StreamNonce> = + (&nonce).into(); + let aead = ChaCha8Poly1305::new((&*key.material).into()); + let encryptor = EncryptorLE31::::from_aead(aead, nonce); + store::start_chunked_write( + store, + client_id, + &request.path, + request.location, + nonce, + )?; + backend_ctx.chunked_io_state = + Some(ChunkedIoState::EncryptedWrite(EncryptedChunkedWriteState { + path: request.path.clone(), + location: request.location, + encryptor, + })); + Ok(reply::StartEncryptedChunkedWrite {}.into()) + } + #[cfg(feature = "encrypted-chunked")] + ChunkedRequest::StartEncryptedChunkedRead(request) => { + clear_chunked_state(store, client_id, backend_ctx)?; + let key = keystore.load_key( + Secrecy::Secret, + Some(Kind::Symmetric(CHACHA8_KEY_LEN)), + &request.key, + )?; + let nonce: Bytes = + filestore.read(&request.path, request.location)?; + let nonce: &StreamNonce> = + (&**nonce).into(); + let aead = ChaCha8Poly1305::new((&*key.material).into()); + let decryptor = DecryptorLE31::::from_aead(aead, nonce); + backend_ctx.chunked_io_state = + Some(ChunkedIoState::EncryptedRead(EncryptedChunkedReadState { + path: request.path.clone(), + location: request.location, + decryptor, + offset: CHACHA8_STREAM_NONCE_LEN, + })); + Ok(reply::StartEncryptedChunkedRead {}.into()) + } + } + } +} + +fn clear_chunked_state( + store: impl Store, + client_id: &Path, + ctx: &mut StagingContext, +) -> Result<(), Error> { + match ctx.chunked_io_state.take() { + Some(ChunkedIoState::Read(_)) | None => {} + Some(ChunkedIoState::Write(write_state)) => { + info!("Automatically cancelling write"); + store::abort_chunked_write(store, client_id, &write_state.path, write_state.location); + } + #[cfg(feature = "encrypted-chunked")] + Some(ChunkedIoState::EncryptedRead(_)) => {} + #[cfg(feature = "encrypted-chunked")] + Some(ChunkedIoState::EncryptedWrite(write_state)) => { + info!("Automatically cancelling encrypted write"); + store::abort_chunked_write(store, client_id, &write_state.path, write_state.location); + } + } + Ok(()) +} + +fn write_chunk( + store: impl Store, + client_id: &Path, + ctx: &mut StagingContext, + data: &Message, +) -> Result<(), Error> { + match ctx.chunked_io_state { + Some(ChunkedIoState::Write(ref write_state)) => { + store::filestore_write_chunk( + store, + client_id, + &write_state.path, + write_state.location, + data, + )?; + } + #[cfg(feature = "encrypted-chunked")] + Some(ChunkedIoState::EncryptedWrite(ref mut write_state)) => { + let mut data = + Bytes::<{ MAX_MESSAGE_LENGTH + POLY1305_TAG_LEN }>::from_slice(data).unwrap(); + write_state + .encryptor + .encrypt_next_in_place(write_state.path.as_ref().as_bytes(), &mut *data) + .map_err(|_err| { + error!("Failed to encrypt {:?}", _err); + Error::AeadError + })?; + store::filestore_write_chunk( + store, + client_id, + &write_state.path, + write_state.location, + &data, + )?; + } + _ => return Err(Error::MechanismNotAvailable), + } + Ok(()) +} + +fn write_last_chunk( + store: impl Store, + client_id: &Path, + ctx: &mut StagingContext, + data: &Message, +) -> Result<(), Error> { + match ctx.chunked_io_state.take() { + Some(ChunkedIoState::Write(write_state)) => { + store::filestore_write_chunk( + store, + client_id, + &write_state.path, + write_state.location, + data, + )?; + store::flush_chunks(store, client_id, &write_state.path, write_state.location)?; + } + #[cfg(feature = "encrypted-chunked")] + Some(ChunkedIoState::EncryptedWrite(write_state)) => { + let mut data = + Bytes::<{ MAX_MESSAGE_LENGTH + POLY1305_TAG_LEN }>::from_slice(data).unwrap(); + write_state + .encryptor + .encrypt_last_in_place(&[write_state.location as u8], &mut *data) + .map_err(|_err| { + error!("Failed to encrypt {:?}", _err); + Error::AeadError + })?; + store::filestore_write_chunk( + store, + client_id, + &write_state.path, + write_state.location, + &data, + )?; + store::flush_chunks(store, client_id, &write_state.path, write_state.location)?; + } + _ => return Err(Error::MechanismNotAvailable), + } + + Ok(()) +} + +#[cfg(feature = "encrypted-chunked")] +fn read_encrypted_chunk( + store: impl Store, + client_id: &Path, + ctx: &mut StagingContext, +) -> Result { + let Some(ChunkedIoState::EncryptedRead(ref mut read_state)) = ctx.chunked_io_state else { + unreachable!( + "Read encrypted chunk can only be called in the context encrypted chunk reads" + ); + }; + let (mut data, len): (Bytes<{ MAX_MESSAGE_LENGTH + POLY1305_TAG_LEN }>, usize) = + store::filestore_read_chunk( + store, + client_id, + &read_state.path, + read_state.location, + OpenSeekFrom::Start(read_state.offset as _), + )?; + read_state.offset += data.len(); + + let is_last = !data.is_full(); + if is_last { + let Some(ChunkedIoState::EncryptedRead(read_state)) = ctx.chunked_io_state.take() else { + unreachable!(); + }; + + read_state + .decryptor + .decrypt_last_in_place(&[read_state.location as u8], &mut *data) + .map_err(|_err| { + error!("Failed to decrypt {:?}", _err); + Error::AeadError + })?; + let data = Bytes::from_slice(&data).expect("decryptor removes the tag"); + Ok(reply::ReadChunk { + data, + len: chunked_decrypted_len(len)?, + } + .into()) + } else { + read_state + .decryptor + .decrypt_next_in_place(read_state.path.as_ref().as_bytes(), &mut *data) + .map_err(|_err| { + error!("Failed to decrypt {:?}", _err); + Error::AeadError + })?; + let data = Bytes::from_slice(&data).expect("decryptor removes the tag"); + Ok(reply::ReadChunk { + data, + len: chunked_decrypted_len(len)?, + } + .into()) + } +} + +/// Calculate the decrypted length of a chunked encrypted file +fn chunked_decrypted_len(len: usize) -> Result { + let len = len.checked_sub(CHACHA8_STREAM_NONCE_LEN).ok_or_else(|| { + error!("File too small"); + Error::FilesystemReadFailure + })?; + const CHUNK_LEN: usize = POLY1305_TAG_LEN + MAX_MESSAGE_LENGTH; + let chunk_count = len / CHUNK_LEN; + let last_chunk_len = (len % CHUNK_LEN) + .checked_sub(POLY1305_TAG_LEN) + .ok_or_else(|| { + error!("Incorrect last chunk length"); + Error::FilesystemReadFailure + })?; + + Ok(chunk_count * MAX_MESSAGE_LENGTH + last_chunk_len) +} diff --git a/src/streaming/store.rs b/src/chunked/store.rs similarity index 100% rename from src/streaming/store.rs rename to src/chunked/store.rs diff --git a/src/lib.rs b/src/lib.rs index 023ab9e..0f85088 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -13,26 +13,28 @@ use trussed::backend::Backend; pub mod virt; #[cfg(feature = "wrap-key-to-file")] -pub mod wrap_key_to_file; +mod wrap_key_to_file; #[cfg(feature = "chunked")] -pub mod streaming; +mod chunked; #[cfg(feature = "manage")] -pub mod manage; +mod manage; +#[cfg(feature = "manage")] +pub use manage::State as ManageState; #[derive(Clone, Debug, Default)] #[non_exhaustive] pub struct StagingBackend { #[cfg(feature = "manage")] - pub manage: manage::State, + pub manage: ManageState, } impl StagingBackend { pub fn new() -> Self { Self { #[cfg(feature = "manage")] - manage: manage::State::default(), + manage: Default::default(), } } } @@ -41,7 +43,7 @@ impl StagingBackend { #[non_exhaustive] pub struct StagingContext { #[cfg(feature = "chunked")] - chunked_io_state: Option, + chunked_io_state: Option, } impl Backend for StagingBackend { diff --git a/src/manage.rs b/src/manage.rs index 72c30bd..7c45e20 100644 --- a/src/manage.rs +++ b/src/manage.rs @@ -1,153 +1,20 @@ // Copyright (C) Nitrokey GmbH // SPDX-License-Identifier: Apache-2.0 or MIT -use littlefs2::{ - fs::DirEntry, - path, - path::{Path, PathBuf}, -}; -use serde::{Deserialize, Serialize}; +use littlefs2::{fs::DirEntry, path, path::Path}; use trussed::{ - serde_extensions::{Extension, ExtensionClient, ExtensionImpl, ExtensionResult}, + serde_extensions::{Extension, ExtensionImpl}, store::Store, types::Location, Error, }; +use trussed_manage::{ + FactoryResetClientReply, FactoryResetClientRequest, FactoryResetDeviceReply, + FactoryResetDeviceRequest, ManageExtension, ManageReply, ManageRequest, +}; use crate::StagingBackend; -pub struct ManageExtension; - -/// Factory reset the entire device -/// -/// This will reset all filesystems -#[derive(Debug, Deserialize, Serialize, Copy, Clone)] -pub struct FactoryResetDeviceRequest; - -/// Factory reset a specific application -/// -/// This will reset all data for a specific client -#[derive(Debug, Deserialize, Serialize, Clone)] -pub struct FactoryResetClientRequest { - pub client: PathBuf, -} - -#[allow(clippy::large_enum_variant)] -#[derive(Debug, Deserialize, Serialize, Clone)] -pub enum ManageRequest { - FactoryResetDevice(FactoryResetDeviceRequest), - FactoryResetClient(FactoryResetClientRequest), -} - -impl From for ManageRequest { - fn from(value: FactoryResetClientRequest) -> Self { - Self::FactoryResetClient(value) - } -} - -impl TryFrom for FactoryResetClientRequest { - type Error = Error; - fn try_from(value: ManageRequest) -> Result { - match value { - ManageRequest::FactoryResetClient(v) => Ok(v), - _ => Err(Error::InternalError), - } - } -} - -impl From for ManageRequest { - fn from(value: FactoryResetDeviceRequest) -> Self { - Self::FactoryResetDevice(value) - } -} - -impl TryFrom for FactoryResetDeviceRequest { - type Error = Error; - fn try_from(value: ManageRequest) -> Result { - match value { - ManageRequest::FactoryResetDevice(v) => Ok(v), - _ => Err(Error::InternalError), - } - } -} - -/// Factory reset the entire device -/// -/// This will reset all filesystems -#[derive(Debug, Deserialize, Serialize, Copy, Clone)] -pub struct FactoryResetDeviceReply; - -/// Factory reset a specific application -/// -/// This will reset all data for a specific client -#[derive(Debug, Deserialize, Serialize, Clone)] -pub struct FactoryResetClientReply; - -#[derive(Debug, Deserialize, Serialize, Clone)] -pub enum ManageReply { - FactoryResetDevice(FactoryResetDeviceReply), - FactoryResetClient(FactoryResetClientReply), -} - -impl From for ManageReply { - fn from(value: FactoryResetClientReply) -> Self { - Self::FactoryResetClient(value) - } -} - -impl TryFrom for FactoryResetClientReply { - type Error = Error; - fn try_from(value: ManageReply) -> Result { - match value { - ManageReply::FactoryResetClient(v) => Ok(v), - _ => Err(Error::InternalError), - } - } -} - -impl From for ManageReply { - fn from(value: FactoryResetDeviceReply) -> Self { - Self::FactoryResetDevice(value) - } -} - -impl TryFrom for FactoryResetDeviceReply { - type Error = Error; - fn try_from(value: ManageReply) -> Result { - match value { - ManageReply::FactoryResetDevice(v) => Ok(v), - _ => Err(Error::InternalError), - } - } -} - -impl Extension for ManageExtension { - type Request = ManageRequest; - type Reply = ManageReply; -} - -type ManageResult<'a, R, C> = ExtensionResult<'a, ManageExtension, R, C>; - -pub trait ManageClient: ExtensionClient { - /// Factory reset the entire device - /// - /// This will reset all filesystems - fn factory_reset_device(&mut self) -> ManageResult<'_, FactoryResetDeviceReply, Self> { - self.extension(FactoryResetDeviceRequest) - } - - /// Factory reset the entire client - /// - fn factory_reset_client( - &mut self, - client: &Path, - ) -> ManageResult<'_, FactoryResetClientReply, Self> { - self.extension(FactoryResetClientRequest { - client: client.into(), - }) - } -} - #[derive(Debug, Clone)] pub struct State { /// Function called during a factory reset (of a client or the whole device) @@ -180,8 +47,6 @@ fn callback( move |f| !should_preserve_file(f.path(), location) } -impl> ManageClient for C {} - impl ExtensionImpl for StagingBackend { fn extension_request( &mut self, diff --git a/src/virt.rs b/src/virt.rs index 6eb2f7a..b7f26d9 100644 --- a/src/virt.rs +++ b/src/virt.rs @@ -3,18 +3,17 @@ //! Wrapper around [`trussed::virt`][] that provides clients with both the core backend and the [`StagingBackend`] backend. -#[cfg(feature = "wrap-key-to-file")] -use crate::wrap_key_to_file::WrapKeyToFileExtension; - -use crate::{StagingBackend, StagingContext}; - #[cfg(feature = "manage")] -use crate::manage::ManageExtension; -#[cfg(feature = "chunked")] -use crate::streaming::ChunkedExtension; +use trussed::types::{Location, Path}; +#[cfg(feature = "chunked")] +use trussed_chunked::ChunkedExtension; #[cfg(feature = "manage")] -use trussed::types::{Location, Path}; +use trussed_manage::ManageExtension; +#[cfg(feature = "wrap-key-to-file")] +use trussed_wrap_key_to_file::WrapKeyToFileExtension; + +use crate::{StagingBackend, StagingContext}; #[derive(Default, Debug)] pub struct Dispatcher { diff --git a/src/wrap_key_to_file.rs b/src/wrap_key_to_file.rs new file mode 100644 index 0000000..d8d9307 --- /dev/null +++ b/src/wrap_key_to_file.rs @@ -0,0 +1,134 @@ +// Copyright (C) Nitrokey GmbH +// SPDX-License-Identifier: Apache-2.0 or MIT + +use trussed::{ + config::MAX_SERIALIZED_KEY_LENGTH, + key::{self, Kind, Secrecy}, + serde_extensions::ExtensionImpl, + service::{Filestore, Keystore, ServiceResources}, + types::{Bytes, CoreContext, GenericArray}, + Error, +}; +use trussed_wrap_key_to_file::{ + reply, request, WrapKeyToFileExtension, WrapKeyToFileReply, WrapKeyToFileRequest, +}; + +const NONCE_LEN: usize = 12; +const KEY_LEN: usize = 32; +const TAG_LEN: usize = 16; +const KIND: Kind = Kind::Symmetric(KEY_LEN); +const WRAPPED_TO_FILE_LEN: usize = MAX_SERIALIZED_KEY_LENGTH + NONCE_LEN + TAG_LEN; + +fn wrap_key_to_file( + keystore: &mut impl Keystore, + filestore: &mut impl Filestore, + request: &request::WrapKeyToFile, +) -> Result { + if !matches!( + request.mechanism, + trussed::types::Mechanism::Chacha8Poly1305 + ) { + return Err(Error::MechanismInvalid); + } + + use chacha20poly1305::aead::{AeadMutInPlace, KeyInit}; + use chacha20poly1305::ChaCha8Poly1305; + use rand_core::RngCore as _; + + let serialized_key = keystore.load_key(Secrecy::Secret, None, &request.key)?; + + let mut data = Bytes::::from_slice(&serialized_key.serialize()).unwrap(); + let material_len = data.len(); + data.resize_default(material_len + NONCE_LEN).unwrap(); + let (material, nonce) = data.split_at_mut(material_len); + keystore.rng().fill_bytes(nonce); + let nonce = (&*nonce).try_into().unwrap(); + + let key = keystore.load_key(Secrecy::Secret, Some(KIND), &request.wrapping_key)?; + let chachakey: [u8; KEY_LEN] = (&*key.material).try_into().unwrap(); + let mut aead = ChaCha8Poly1305::new(&GenericArray::clone_from_slice(&chachakey)); + let tag = aead + .encrypt_in_place_detached( + <&GenericArray<_, _> as From<&[u8; NONCE_LEN]>>::from(nonce), + &request.associated_data, + material, + ) + .unwrap(); + data.extend_from_slice(&tag).unwrap(); + filestore.write(&request.path, request.location, &data)?; + Ok(reply::WrapKeyToFile {}) +} + +fn unwrap_key_from_file( + keystore: &mut impl Keystore, + filestore: &mut impl Filestore, + request: &request::UnwrapKeyFromFile, +) -> Result { + if !matches!( + request.mechanism, + trussed::types::Mechanism::Chacha8Poly1305 + ) { + return Err(Error::MechanismInvalid); + } + + use chacha20poly1305::aead::{AeadMutInPlace, KeyInit}; + use chacha20poly1305::ChaCha8Poly1305; + let mut data: Bytes = + filestore.read(&request.path, request.file_location)?; + + let data_len = data.len(); + if data_len < TAG_LEN + NONCE_LEN { + error!("Attempt to unwrap file that doesn't contain a key"); + return Err(Error::InvalidSerializedKey); + } + let (tmp, tag) = data.split_at_mut(data_len - TAG_LEN); + let tmp_len = tmp.len(); + let (material, nonce) = tmp.split_at_mut(tmp_len - NONCE_LEN); + + // Coerce to array + let nonce = (&*nonce).try_into().unwrap(); + let tag = (&*tag).try_into().unwrap(); + + let key = keystore.load_key(key::Secrecy::Secret, Some(KIND), &request.key)?; + let chachakey: [u8; KEY_LEN] = (&*key.material).try_into().unwrap(); + let mut aead = ChaCha8Poly1305::new(&GenericArray::clone_from_slice(&chachakey)); + if aead + .decrypt_in_place_detached( + <&GenericArray<_, _> as From<&[u8; NONCE_LEN]>>::from(nonce), + &request.associated_data, + material, + <&GenericArray<_, _> as From<&[u8; TAG_LEN]>>::from(tag), + ) + .is_err() + { + return Ok(reply::UnwrapKeyFromFile { key: None }); + } + let key = key::Key::try_deserialize(material)?; + let info = key::Info { + flags: key.flags, + kind: key.kind, + }; + let key = keystore.store_key(request.key_location, Secrecy::Secret, info, &key.material)?; + Ok(reply::UnwrapKeyFromFile { key: Some(key) }) +} + +impl ExtensionImpl for super::StagingBackend { + fn extension_request( + &mut self, + core_ctx: &mut CoreContext, + _backend_ctx: &mut Self::Context, + request: &WrapKeyToFileRequest, + resources: &mut ServiceResources

, + ) -> Result { + let keystore = &mut resources.keystore(core_ctx.path.clone())?; + let filestore = &mut resources.filestore(core_ctx.path.clone()); + match request { + WrapKeyToFileRequest::WrapKeyToFile(request) => { + wrap_key_to_file(keystore, filestore, request).map(Into::into) + } + WrapKeyToFileRequest::UnwrapKeyFromFile(request) => { + unwrap_key_from_file(keystore, filestore, request).map(Into::into) + } + } + } +} diff --git a/tests/chunked.rs b/tests/chunked.rs index 21bd712..7b8c753 100644 --- a/tests/chunked.rs +++ b/tests/chunked.rs @@ -5,10 +5,9 @@ use littlefs2::path::PathBuf; use trussed::{client::FilesystemClient, syscall, try_syscall, types::Location, Bytes}; -use trussed_staging::{ - streaming::{utils, ChunkedClient}, - virt::with_ram_client, -}; +use trussed_chunked::{utils, ChunkedClient}; +use trussed_staging::virt::with_ram_client; + fn test_write_all(location: Location) { with_ram_client("test chunked", |mut client| { let path = PathBuf::from("foo"); diff --git a/tests/encrypted-chunked.rs b/tests/encrypted-chunked.rs index cd351fc..a7df91f 100644 --- a/tests/encrypted-chunked.rs +++ b/tests/encrypted-chunked.rs @@ -9,13 +9,11 @@ use trussed::{ client::CryptoClient, client::FilesystemClient, syscall, try_syscall, types::Location, Bytes, Error, }; -use trussed_staging::{ - streaming::{ - utils::{self, EncryptionData}, - ChunkedClient, - }, - virt::with_ram_client, +use trussed_chunked::{ + utils::{self, EncryptionData}, + ChunkedClient, }; +use trussed_staging::virt::with_ram_client; fn test_write_all(location: Location) { with_ram_client("test chunked", |mut client| { diff --git a/tests/manage.rs b/tests/manage.rs index 4808a84..b29374a 100644 --- a/tests/manage.rs +++ b/tests/manage.rs @@ -7,7 +7,8 @@ use littlefs2::path; use trussed::client::FilesystemClient; use trussed::syscall; use trussed::types::{Bytes, Location, Path}; -use trussed_staging::{manage::ManageClient, virt::with_ram_clients_and_preserve}; +use trussed_manage::ManageClient; +use trussed_staging::virt::with_ram_clients_and_preserve; fn should_preserve(path: &Path, location: Location) -> bool { (location == Location::Internal && path == path!("/client1/dat/to_save_internal")) diff --git a/tests/wrap_key_to_file.rs b/tests/wrap_key_to_file.rs index 0bed78b..84482b7 100644 --- a/tests/wrap_key_to_file.rs +++ b/tests/wrap_key_to_file.rs @@ -12,7 +12,7 @@ use trussed::types::{ use trussed_staging::virt::with_ram_client; -use trussed_staging::wrap_key_to_file::WrapKeyToFileClient; +use trussed_wrap_key_to_file::WrapKeyToFileClient; fn assert_key_eq(key1: KeyId, key2: KeyId, client: &mut impl trussed::Client) { let derivative1 = syscall!(client.sign(