diff --git a/.cargo/config.toml b/.cargo/config.toml index c84e30e..597ed0f 100644 --- a/.cargo/config.toml +++ b/.cargo/config.toml @@ -8,7 +8,7 @@ build-std = ["core", "compiler_builtins", "alloc"] # but don't change the README instructions as someone might not # have .cargo/ [build] -target = "svsm-target.json" +target = "x86_64-unknown-none.json" [target.svsm-target] rustflags = [ diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index dc42bb3..28cb8e4 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -12,11 +12,7 @@ jobs: steps: - name: Checkout code uses: actions/checkout@v2 - - name: Install Rust toolchain - uses: actions-rs/toolchain@v1 - with: - toolchain: nightly - name: Prepare tests - run: rustup component add rust-src --toolchain nightly + run: make prereq RUST_INSTALLER_ARGS='-y' - name: Run tests - run: cargo test --target=x86_64-unknown-linux-gnu -Z build-std + run: make test diff --git a/.gitignore b/.gitignore index bbb5778..ad35d12 100644 --- a/.gitignore +++ b/.gitignore @@ -65,3 +65,4 @@ dkms.conf # Others *.lds .prereq +bindgen_out.rs diff --git a/Cargo.lock b/Cargo.lock index a2f12fc..e3c521d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10,9 +10,9 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "bit_field" -version = "0.10.1" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcb6dd1c2376d2e096796e234a70e17e94cc2d5d54ff8ce42b28cef1d0d359a4" +checksum = "dc827186963e592360843fb5ba4b973e145841266c1357f7180c43526f2e5b61" [[package]] name = "bitflags" @@ -20,12 +20,6 @@ version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" -[[package]] -name = "cty" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b365fabc795046672053e29c954733ec3b05e4be654ab130fe8f1f94d7051f35" - [[package]] name = "lazy_static" version = "1.4.0" @@ -39,7 +33,6 @@ dependencies = [ name = "linux_svsm" version = "0.1.0" dependencies = [ - "cty", "lazy_static", "memchr", "memoffset", @@ -65,15 +58,15 @@ dependencies = [ [[package]] name = "paste" -version = "1.0.7" +version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c520e05135d6e763148b6426a837e239041653ba7becd2e538c076c738025fc" +checksum = "b4b27ab7be369122c218afc2079489cdcb4b517c0a3fc386ff11e1fedfcc2b35" [[package]] name = "rustversion" -version = "1.0.8" +version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24c8ad4f0c00e1eb5bc7614d236a7f1300e3dbd76b68cac8e06fb00b015ad8d8" +checksum = "dc31bd9b61a32c31f9650d18add92aa83a49ba979c143eefd27fe7177b05bd5f" [[package]] name = "spin" @@ -83,15 +76,15 @@ checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" [[package]] name = "uuid" -version = "1.1.2" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd6469f4314d5f1ffec476e05f17cc9a78bc7a27a6a857842170bdf8d6f98d2f" +checksum = "d023da39d1fde5a8a3fe1f3e01ca9632ada0a63e9797de55a879d6e2236277be" [[package]] name = "volatile" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3ca98349dda8a60ae74e04fd90c7fb4d6a4fbe01e6d3be095478aa0b76f6c0c" +checksum = "442887c63f2c839b346c192d047a7c87e73d0689c9157b00b53dcc27dd5ea793" [[package]] name = "x86_64" diff --git a/Cargo.toml b/Cargo.toml index 84677c1..95ed85d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -20,7 +20,6 @@ memoffset = "0.6" paste = "1.0" memchr = { version = "2", default-features = false } uuid = { version = "1", default-features = false } -cty = "0.2.2" [dependencies.lazy_static] version = "1.0" diff --git a/Makefile b/Makefile index f778d64..c146791 100644 --- a/Makefile +++ b/Makefile @@ -4,6 +4,8 @@ GCC = gcc SHELL := /bin/bash +ROOT_DIR := $(patsubst %/,%,$(dir $(abspath $(lastword $(MAKEFILE_LIST))))) + A_FLAGS := -D__ASSEMBLY__ C_FLAGS := -g -O2 @@ -17,7 +19,7 @@ LD_FLAGS += -nostdlib LD_FLAGS += -Wl,-Tsrc/start/svsm.lds -Wl,--build-id=none TARGET_DIR := target -TARGET := $(TARGET_DIR)/svsm-target/debug +TARGET := $(TARGET_DIR)/x86_64-unknown-none/debug OBJS := src/start/start.o OBJS += $(TARGET)/liblinux_svsm.a @@ -51,14 +53,25 @@ external/openssl/libcrypto.a: libcrypto libcrypto: external/openssl/Makefile libcrt $(MAKE) -C external/openssl -j$$(nproc) +bindgen_out.rs: libcrypto include/bindings.h + bindgen \ + include/bindings.h \ + -o bindgen_out.rs \ + --use-core \ + --rustfmt-configuration-file $(ROOT_DIR)/.rustfmt.toml \ + --rust-target "nightly" -- \ + -DOPENSSL_RAND_SEED_NONE \ + -I$(ROOT_DIR)/external/libcrt/include \ + -I$(ROOT_DIR)/external/openssl/include + svsm.bin: svsm.bin.elf objcopy -g -O binary $< $@ # "-Wl,-u,malloc" prevents the linker from removing the wrapper.rs symbols svsm.bin.elf: $(EXT_LIBS) $(OBJS) src/start/svsm.lds - $(GCC) $(LD_FLAGS) -o $@ $(OBJS) -Wl,-u,malloc -Wl,--start-group $(EXT_LIBS) -Wl,--end-group + $(GCC) $(LD_FLAGS) -o $@ $(OBJS) -Wl,-u,malloc -Wl,-u,abort -Wl,--start-group $(EXT_LIBS) -Wl,--end-group -%.a: src/*.rs src/cpu/*.rs src/mem/*.rs src/protocols/*.rs src/util/*.rs +%.a: src/*.rs src/cpu/*.rs src/mem/*.rs src/protocols/*.rs src/util/*.rs bindgen_out.rs @xargo build --features $(FEATURES) %.o: %.S src/start/svsm.h @@ -67,19 +80,20 @@ svsm.bin.elf: $(EXT_LIBS) $(OBJS) src/start/svsm.lds %.lds: %.lds.S src/start/svsm.h $(GCC) $(A_FLAGS) $(LDS_FLAGS) -E -P -o $@ $< -test: +test: bindgen_out.rs cargo test --features $(FEATURES) --target=x86_64-unknown-linux-gnu -Z build-std prereq: .prereq .prereq: - curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh + curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- $(RUST_INSTALLER_ARGS) source $(HOME)/.cargo/env echo "source $(HOME)/.cargo/env" >> ~/.bashrc rustup component add rust-src rustup component add llvm-tools-preview cargo install xargo cargo install bootimage + cargo install bindgen-cli touch .prereq external/openssl/Makefile: @@ -147,6 +161,7 @@ clean: clean_all: clean $(MAKE) -C external/libcrt clean $(MAKE) -C external/openssl clean + rm -f bindgen_out.rs superclean: clean_all rm -f .prereq diff --git a/include/bindings.h b/include/bindings.h new file mode 100644 index 0000000..52850bf --- /dev/null +++ b/include/bindings.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: MIT */ + +/* + * Attestation report requirement. SNP_GUEST_REQUEST messages have to be + * encrypted using AES_GCM, which is accessible using the EVP interface. + */ +#include diff --git a/src/bindings.rs b/src/bindings.rs new file mode 100644 index 0000000..51d1f44 --- /dev/null +++ b/src/bindings.rs @@ -0,0 +1,8 @@ +/* SPDX-License-Identifier: MIT */ + +#![allow(non_upper_case_globals)] +#![allow(non_camel_case_types)] +#![allow(non_snake_case)] + +// Add bindgen generated FFI bindings and test cases. +include!(concat!(env!("CARGO_MANIFEST_DIR"), "/bindgen_out.rs")); diff --git a/src/bios.rs b/src/bios.rs index 4a6f042..dfdff5f 100644 --- a/src/bios.rs +++ b/src/bios.rs @@ -9,6 +9,7 @@ use crate::cpu::smp_prepare_bios_vmpl; use crate::cpu::smp_run_bios_vmpl; use crate::cpu::vc::*; +use crate::mem::SnpSecrets; use crate::*; use core::cmp::min; @@ -109,47 +110,6 @@ impl SnpSection { } } -#[derive(Clone, Copy, Debug)] -#[repr(C, packed)] -struct SnpSecrets { - version: u32, - flags: u32, - fms: u32, - reserved1: [u8; 4], - - gosvw: [u8; 16], - - vmpck0: [u8; 32], - vmpck1: [u8; 32], - vmpck2: [u8; 32], - vmpck3: [u8; 32], - - os_reserved: [u8; 96], - - reserved2: [u8; 64], - - // SVSM fields start at offset 0x140 into the secrets page - svsm_base: u64, - svsm_size: u64, - svsm_caa: u64, - svsm_max_version: u32, - svsm_guest_vmpl: u8, - reserved3: [u8; 3], -} - -#[allow(dead_code)] -impl SnpSecrets { - pub fn clear_vmpck0(&mut self) { - self.vmpck0.iter_mut().for_each(|e| *e = 0); - } - - funcs!(svsm_base, u64); - funcs!(svsm_size, u64); - funcs!(svsm_caa, u64); - funcs!(svsm_max_version, u32); - funcs!(svsm_guest_vmpl, u8); -} - /// 96b582de-1fb2-45f7-baea-a366c55a082d const OVMF_TABLE_GUID: Uuid = uuid!("96b582de-1fb2-45f7-baea-a366c55a082d"); /// dc886566-984a-4798-A75e-5585a7bf67cc diff --git a/src/cpu/smp.rs b/src/cpu/smp.rs index 52c360e..13ab678 100644 --- a/src/cpu/smp.rs +++ b/src/cpu/smp.rs @@ -64,8 +64,8 @@ const SVSM_X87_FTW: u16 = 0x5555; /// 0x40 const SVSM_X87_FCW: u16 = 0x40; -/// 5 -const SVSM_STACK_PAGES: u64 = 5; /* 4 stack pages and one guard page */ +/// 17 +const SVSM_STACK_PAGES: u64 = 17; /* 16 stack pages and one guard page */ static mut AP_SYNC: u8 = 0; /// 1 diff --git a/src/cpu/vc.rs b/src/cpu/vc.rs index 22aba95..78fe52c 100644 --- a/src/cpu/vc.rs +++ b/src/cpu/vc.rs @@ -16,6 +16,7 @@ use crate::globals::*; use crate::mem::ghcb::Ghcb; use crate::mem::ghcb::*; use crate::mem::*; +use crate::psp::guest_request_cmd::{SnpGuestRequestCmd, SNP_GUEST_REQ_INVALID_LEN}; use crate::util::util::memset; use crate::*; @@ -115,6 +116,10 @@ const GHCB_NAE_CPUID: u64 = 0x72; const GHCB_NAE_IOIO: u64 = 0x7b; /// 0x80000010 const GHCB_NAE_PSC: u64 = 0x80000010; +/// 0x80000011 +const GHCB_NAE_SNP_GUEST_REQUEST: u64 = 0x80000011; +/// 0x80000012 +const GHCB_NAE_SNP_EXT_GUEST_REQUEST: u64 = 0x80000012; /// 0x80000013 const GHCB_NAE_SNP_AP_CREATION: u64 = 0x80000013; /// 1 @@ -376,6 +381,47 @@ pub fn vc_ap_create(vmsa_va: VirtAddr, apic_id: u32) { } } +pub fn vc_snp_guest_request( + extended: bool, + psp_rc: &mut u64, + cmd: &mut SnpGuestRequestCmd, +) -> Result<(), ()> { + let ghcb: *mut Ghcb = vc_get_ghcb(); + let info1: u64 = pgtable_va_to_pa((*cmd).req_shared_page()).as_u64(); + let info2: u64 = pgtable_va_to_pa((*cmd).resp_shared_page()).as_u64(); + + let exit_code: u64 = if extended { + GHCB_NAE_SNP_EXT_GUEST_REQUEST + } else { + GHCB_NAE_SNP_GUEST_REQUEST + }; + + unsafe { + if extended { + let data_gpa: u64 = pgtable_va_to_pa((*cmd).data_gva()).as_u64(); + (*ghcb).set_rax(data_gpa); + (*ghcb).set_rbx((*cmd).data_npages() as u64); + } + + vc_perform_vmgexit(ghcb, exit_code, info1, info2); + + if !(*ghcb).is_sw_exit_info_2_valid() { + return Err(()); + } + + *psp_rc = (*ghcb).sw_exit_info_2(); + + // The number of expected pages are returned in RBX + if extended && *psp_rc == SNP_GUEST_REQ_INVALID_LEN { + (*cmd).set_data_npages((*ghcb).rbx() as usize); + } + + (*ghcb).clear(); + } + + Ok(()) +} + pub fn vc_get_apic_ids(bsp_apic_id: u32) -> Vec { let mut apic_ids: Vec; let ghcb: *mut Ghcb = vc_get_ghcb(); diff --git a/src/crypto/mod.rs b/src/crypto/mod.rs new file mode 100644 index 0000000..582a08c --- /dev/null +++ b/src/crypto/mod.rs @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright (C) 2023 IBM + * + * Authors: + * Claudio Carvalho + */ + +/// SSL +#[cfg_attr(test, path = "nossl.rs")] +#[cfg_attr(not(test), path = "openssl.rs")] +pub mod ssl; diff --git a/src/crypto/nossl.rs b/src/crypto/nossl.rs new file mode 100644 index 0000000..85f8163 --- /dev/null +++ b/src/crypto/nossl.rs @@ -0,0 +1,58 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright (C) 2023 IBM + * + * Authors: + * Claudio Carvalho + */ + +use core::ffi::c_int; +use x86_64::VirtAddr; + +/// Create a new SSL context +pub fn new_ssl_ctx(_ctx: *mut VirtAddr) -> *mut VirtAddr { + VirtAddr::zero().as_mut_ptr() +} + +/// Initialize SSL context for AES-GCM-256 for either +/// encryption or decryption +pub fn init_aes_256_gcm_ctx( + _ssl_ctx: *mut VirtAddr, + _is_for_encryption: bool, + _key: VirtAddr, + _key_len: c_int, + _iv: VirtAddr, + _iv_len: c_int, +) -> Result<(), ()> { + Ok(()) +} + +/// Encrypt plaintext using AES-256-GCM +pub fn aes_256_gcm_encrypt( + _ssl_ctx: *mut VirtAddr, + _plaintext: VirtAddr, + _plaintext_len: c_int, + _aad: VirtAddr, + _aad_len: c_int, + _ciphertext: VirtAddr, + _ciphertext_len: *mut c_int, + _authtag: VirtAddr, + _authtag_len: c_int, +) -> Result<(), ()> { + Ok(()) +} + +/// Decrypt ciphertext using AES-GCM-256 +pub fn aes_256_gcm_decrypt( + _ssl_ctx: *mut VirtAddr, + _plaintext: VirtAddr, + _plaintext_len: *mut c_int, + _aad: VirtAddr, + _aad_len: c_int, + _ciphertext: VirtAddr, + _ciphertext_len: c_int, + _authtag: VirtAddr, + _authtag_len: c_int, +) -> Result<(), ()> { + Ok(()) +} diff --git a/src/crypto/openssl.rs b/src/crypto/openssl.rs new file mode 100644 index 0000000..ef07964 --- /dev/null +++ b/src/crypto/openssl.rs @@ -0,0 +1,335 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright (C) 2023 IBM + * + * Authors: + * Claudio Carvalho + */ + +use core::ffi::{c_int, c_uchar, c_void}; +use core::ptr; +use x86_64::addr::VirtAddr; + +use crate::{ + bindings::{ + EVP_CIPHER_CTX_ctrl, EVP_CIPHER_CTX_free, EVP_CIPHER_CTX_new, + EVP_CIPHER_CTX_set_key_length, EVP_DecryptFinal_ex, EVP_DecryptInit_ex, EVP_DecryptUpdate, + EVP_EncryptFinal_ex, EVP_EncryptInit_ex, EVP_EncryptUpdate, EVP_aes_256_gcm, + EVP_CIPHER_CTX, EVP_CTRL_GCM_GET_TAG, EVP_CTRL_GCM_SET_IVLEN, EVP_CTRL_GCM_SET_TAG, + }, + prints, +}; + +pub fn new_ssl_ctx(ctx: *mut VirtAddr) -> *mut VirtAddr { + if !ctx.is_null() { + unsafe { EVP_CIPHER_CTX_free(ctx as *mut EVP_CIPHER_CTX) }; + } + VirtAddr::from_ptr(unsafe { EVP_CIPHER_CTX_new() }).as_mut_ptr() +} + +pub fn init_aes_256_gcm_ctx( + ssl_ctx: *mut VirtAddr, + is_encryption: bool, + key: VirtAddr, + key_len: c_int, + iv: VirtAddr, + iv_len: c_int, +) -> Result<(), ()> { + if ssl_ctx.is_null() { + prints!("ERR: SSL context can't be null\n"); + return Err(()); + } + + let ctx: *mut EVP_CIPHER_CTX = ssl_ctx as *mut EVP_CIPHER_CTX; + + if is_encryption { + // Set encrypt operation + let ret: c_int = unsafe { + EVP_EncryptInit_ex( + ctx, + EVP_aes_256_gcm(), + ptr::null_mut(), + ptr::null(), + ptr::null(), + ) + }; + if ret != 1 { + prints!("ERR: EVP_EncryptInit_ex failed, rc={}\n", ret); + return Err(()); + } + } else { + // Set decrypt operation + let ret: c_int = unsafe { + EVP_DecryptInit_ex( + ctx, + EVP_aes_256_gcm(), + ptr::null_mut(), + ptr::null(), + ptr::null(), + ) + }; + if ret != 1 { + prints!("ERR: EVP_DecryptInit_ex failed, rc={}\n", ret); + return Err(()); + } + } + + // Set key size + let mut ret: c_int = unsafe { EVP_CIPHER_CTX_set_key_length(ctx, key_len) }; + if ret != 1 { + prints!( + "ERR: Failed to set the key length for encryption ({})\n", + ret + ); + return Err(()); + } + + // Set IV size + let gcm_set_ivlen_cint: c_int = match c_int::try_from(EVP_CTRL_GCM_SET_IVLEN) { + Ok(c) => c, + Err(_) => { + prints!("ERR: Operation SET_IVLEN too big for c_int\n"); + return Err(()); + } + }; + ret = unsafe { EVP_CIPHER_CTX_ctrl(ctx, gcm_set_ivlen_cint, iv_len, ptr::null_mut()) }; + if ret != 1 { + prints!("ERR: EVP_CIPHER_CTX_ctrl failed ({})\n", ret); + return Err(()); + } + + // Set key and IV + if is_encryption { + ret = unsafe { + EVP_EncryptInit_ex(ctx, ptr::null(), ptr::null_mut(), key.as_ptr(), iv.as_ptr()) + }; + if ret != 1 { + prints!("ERR: EVP_EncryptInit_ex failed ({})\n", ret); + return Err(()); + } + } else { + ret = unsafe { + EVP_DecryptInit_ex(ctx, ptr::null(), ptr::null_mut(), key.as_ptr(), iv.as_ptr()) + }; + if ret != 1 { + prints!("ERR: EVP_DecryptInit_ex failed, rc={}\n", ret); + return Err(()); + } + } + + Ok(()) +} + +pub fn aes_256_gcm_encrypt( + ssl_ctx: *mut VirtAddr, + plaintext: VirtAddr, + plaintext_len: c_int, + aad: VirtAddr, + aad_len: c_int, + ciphertext: VirtAddr, + ciphertext_len: *mut c_int, + authtag: VirtAddr, + authtag_len: c_int, +) -> Result<(), ()> { + if ssl_ctx.is_null() { + prints!("ERR: SSL context can't be null for encryption\n"); + return Err(()); + } + if plaintext_len > unsafe { *ciphertext_len } { + prints!( + "ERR: plaintext can't be bigger than the ciphertext buffer ({} > {})", + plaintext_len, + { unsafe { *ciphertext_len } } + ); + return Err(()); + } + + let ctx: *mut EVP_CIPHER_CTX = ssl_ctx as *mut EVP_CIPHER_CTX; + + let mut len: c_int = 0; + + // Provide Additional Authenticated Data (AAD) + let mut ret: i32 = unsafe { + EVP_EncryptUpdate( + ctx, + ptr::null_mut(), + ptr::addr_of_mut!(len), + aad.as_ptr(), + aad_len, + ) + }; + if ret != 1 { + prints!("ERR: Failed to provide AAD for encryption ({})\n", ret); + return Err(()); + } + + // Provide plaintext + ret = unsafe { + EVP_EncryptUpdate( + ctx, + ciphertext.as_mut_ptr(), + ptr::addr_of_mut!(len), + plaintext.as_ptr(), + plaintext_len, + ) + }; + if ret != 1 { + prints!( + "ERR: Failed to provide the message to be encrypted ({})\n", + ret + ); + return Err(()); + } + + unsafe { + *ciphertext_len = len; + } + + // Finalize encryption + let count: isize = match isize::try_from(len) { + Ok(c) => c, + Err(_) => { + prints!("ERR: ciphertext len too big, {} bytes\n", len); + return Err(()); + } + }; + let ciphertext_offset_ptr: *mut c_uchar = + unsafe { ciphertext.as_mut_ptr::().offset(count) }; + ret = unsafe { EVP_EncryptFinal_ex(ctx, ciphertext_offset_ptr, ptr::addr_of_mut!(len)) }; + if ret != 1 { + prints!("ERR: Failed to finalise the encryption ({})\n", ret); + return Err(()); + } + + // Get auth tag + let get_tag_value: c_int = match c_int::try_from(EVP_CTRL_GCM_GET_TAG) { + Ok(g) => g, + Err(_) => { + prints!("ERR: GCM_GET_TAG too big, value {}\n", EVP_CTRL_GCM_GET_TAG); + return Err(()); + } + }; + ret = unsafe { + EVP_CIPHER_CTX_ctrl( + ctx, + get_tag_value, + authtag_len, + authtag.as_mut_ptr() as *mut c_void, + ) + }; + if ret != 1 { + prints!("ERR: Failed to get the tag in the encryption ({})\n", ret); + return Err(()); + } + + Ok(()) +} + +pub fn aes_256_gcm_decrypt( + ssl_ctx: *mut VirtAddr, + plaintext: VirtAddr, + plaintext_len: *mut c_int, + aad: VirtAddr, + aad_len: c_int, + ciphertext: VirtAddr, + ciphertext_len: c_int, + authtag: VirtAddr, + authtag_len: c_int, +) -> Result<(), ()> { + if ssl_ctx.is_null() { + prints!("ERR: SSL context can't be null for decryption\n"); + return Err(()); + } + if ciphertext_len > unsafe { *plaintext_len } { + prints!( + "ERR: ciphertext can't be bigger than plaintext buffer ({} > {})", + ciphertext_len, + { unsafe { *plaintext_len } } + ); + return Err(()); + } + + let ctx: *mut EVP_CIPHER_CTX = ssl_ctx as *mut EVP_CIPHER_CTX; + + let mut len: c_int = 0; + + // Provide Additional Authenticated Data (AAD) + let mut ret: i32 = unsafe { + EVP_DecryptUpdate( + ctx, + ptr::null_mut(), + ptr::addr_of_mut!(len), + aad.as_ptr(), + aad_len, + ) + }; + if ret != 1 { + prints!("ERR: Failed to provide the AAD for decryption ({})\n", ret); + return Err(()); + } + + // Provide ciphertext + ret = unsafe { + EVP_DecryptUpdate( + ctx, + plaintext.as_mut_ptr(), + ptr::addr_of_mut!(len), + ciphertext.as_ptr(), + ciphertext_len, + ) + }; + if ret != 1 { + prints!( + "ERR: Failed to provide the ciphertext for decryption ({})\n", + ret + ); + return Err(()); + } + + unsafe { + *plaintext_len = len; + } + + // Provide auth tag + let set_tag_value: c_int = match c_int::try_from(EVP_CTRL_GCM_SET_TAG) { + Ok(t) => t, + Err(_) => { + prints!("ERR: GCM_SET_TAG too big, value {}\n", EVP_CTRL_GCM_SET_TAG); + return Err(()); + } + }; + ret = unsafe { + EVP_CIPHER_CTX_ctrl( + ctx, + set_tag_value, + authtag_len, + authtag.as_mut_ptr() as *mut c_void, + ) + }; + if ret != 1 { + prints!( + "ERR: Failed to provide the auth tag for decryption ({})\n", + ret + ); + return Err(()); + } + + // Finalize decryption + let count: isize = match isize::try_from(unsafe { *plaintext_len }) { + Ok(c) => c, + Err(_) => { + prints!("ERR: Decrypted data too big, {} bytes\n", { + unsafe { *plaintext_len } + }); + return Err(()); + } + }; + let plaintext_offset_ptr: *mut c_uchar = unsafe { plaintext.as_mut_ptr::().offset(count) }; + ret = unsafe { EVP_DecryptFinal_ex(ctx, plaintext_offset_ptr, ptr::addr_of_mut!(len)) }; + if ret != 1 { + prints!("ERR: Failed to finalize decryption ({})\n", ret); + return Err(()); + } + + Ok(()) +} diff --git a/src/lib.rs b/src/lib.rs index 3a4b3ed..94c5a0a 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -16,16 +16,22 @@ // overwrite _start(). #![cfg_attr(not(test), no_main)] +/// Bindgen generated FFI bindings and test cases +pub mod bindings; /// Initialize BIOS for the guest pub mod bios; /// Prepare and start SMP pub mod cpu; +/// Crypto API +pub mod crypto; /// Global constants pub mod globals; /// Prepare page table, handle memory (de)allocations pub mod mem; /// Implementation of SVSM protocols and calls pub mod protocols; +/// PSP firmware messages +pub mod psp; /// Handle requests from the SVSM guest pub mod svsm_request; /// Auxiliary functions and macros @@ -33,6 +39,7 @@ pub mod util; /// Handle the list of VMSA pages pub mod vmsa_list; /// Wrappers for external dependencies +#[cfg(not(test))] pub mod wrapper; extern crate alloc; @@ -42,6 +49,7 @@ use crate::cpu::rmpadjust; use crate::cpu::*; use crate::globals::*; use crate::mem::*; +use crate::psp::request::snp_guest_request_init; use crate::svsm_request::svsm_request_loop; use crate::util::*; use crate::vmsa::*; @@ -145,6 +153,9 @@ pub extern "C" fn svsm_main() -> ! { // Initialize and start APs smp_init(); + // Initialize resources for SNP_GUEST_REQUEST messages + snp_guest_request_init(); + // Load BIOS start_bios(); diff --git a/src/mem/mod.rs b/src/mem/mod.rs index 1f386c2..b819bd8 100644 --- a/src/mem/mod.rs +++ b/src/mem/mod.rs @@ -18,6 +18,8 @@ pub mod ghcb; pub mod map_guard; /// Page Table and its related operations pub mod pgtable; +/// SNP Secrets +pub mod snpsecrets; pub use crate::mem::alloc::{ mem_allocate, mem_allocate_frame, mem_allocate_frames, mem_callocate, mem_create_stack, @@ -32,6 +34,8 @@ pub use crate::mem::pgtable::{ pub use crate::mem::map_guard::MapGuard; +pub use crate::mem::snpsecrets::{SnpSecrets, VMPCK_SIZE}; + pub use crate::mem::ghcb::ghcb_init; pub use crate::mem::fwcfg::{fwcfg_get_bios_area, fwcfg_init}; diff --git a/src/mem/snpsecrets.rs b/src/mem/snpsecrets.rs new file mode 100644 index 0000000..c91b45e --- /dev/null +++ b/src/mem/snpsecrets.rs @@ -0,0 +1,70 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright (C) 2022 Advanced Micro Devices, Inc. + * + * Authors: Carlos Bilbao and + * Tom Lendacky + */ + +use crate::{funcs, get_svsm_secrets_page, prints}; + +/// 32 +pub const VMPCK_SIZE: usize = 32; + +#[derive(Clone, Copy, Debug)] +#[repr(C, packed)] +pub struct SnpSecrets { + version: u32, + flags: u32, + fms: u32, + reserved1: [u8; 4], + + gosvw: [u8; 16], + + vmpck0: [u8; VMPCK_SIZE], + vmpck1: [u8; VMPCK_SIZE], + vmpck2: [u8; VMPCK_SIZE], + vmpck3: [u8; VMPCK_SIZE], + + os_reserved: [u8; 96], + + reserved2: [u8; 64], + + // SVSM fields start at offset 0x140 into the secrets page + svsm_base: u64, + svsm_size: u64, + svsm_caa: u64, + svsm_max_version: u32, + svsm_guest_vmpl: u8, + reserved3: [u8; 3], +} + +#[allow(dead_code)] +impl SnpSecrets { + pub fn clear_vmpck0(&mut self) { + self.vmpck0.iter_mut().for_each(|e| *e = 0); + } + + pub fn is_vmpck0_clear(self) -> bool { + self.vmpck0.into_iter().all(|e: u8| e == 0) + } + + funcs!(svsm_base, u64); + funcs!(svsm_size, u64); + funcs!(svsm_caa, u64); + funcs!(svsm_max_version, u32); + funcs!(svsm_guest_vmpl, u8); + funcs!(vmpck0, [u8; VMPCK_SIZE]); +} + +pub fn disable_vmpck0() { + let svsm_secrets_ptr: *mut SnpSecrets = get_svsm_secrets_page().as_mut_ptr(); + prints!("WARNING: VMPCK0 disabled!\n"); + unsafe { (*svsm_secrets_ptr).clear_vmpck0() } +} + +pub fn is_vmpck0_clear() -> bool { + let svsm_secrets_ptr: *mut SnpSecrets = get_svsm_secrets_page().as_mut_ptr(); + + unsafe { (*svsm_secrets_ptr).is_vmpck0_clear() } +} diff --git a/src/psp/guest_request_cmd.rs b/src/psp/guest_request_cmd.rs new file mode 100644 index 0000000..f0371d3 --- /dev/null +++ b/src/psp/guest_request_cmd.rs @@ -0,0 +1,591 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright (C) 2023 IBM + * + * Authors: Claudio Carvalho + */ + +use crate::cpu::vc::vc_snp_guest_request; +use crate::crypto::ssl::{ + aes_256_gcm_decrypt, aes_256_gcm_encrypt, init_aes_256_gcm_ctx, new_ssl_ctx, +}; +use crate::mem::snpsecrets::{disable_vmpck0, is_vmpck0_clear}; +use crate::mem::{mem_allocate, pgtable_make_pages_shared, SnpSecrets, VMPCK_SIZE}; +use crate::util::util::memset; +use crate::{ + funcs, get_svsm_secrets_page, getter_func, prints, ALIGN, ALIGNED, BIT, PAGE_COUNT, PAGE_SHIFT, + PAGE_SIZE, +}; + +use alloc::boxed::Box; +use core::cmp::min; +use core::ffi::c_int; +use core::ptr; +use core::ptr::copy_nonoverlapping; +use core::sync::atomic::{AtomicU64, Ordering}; +use memoffset::offset_of; +use x86_64::addr::VirtAddr; + +/// +/// AEAD Algorithm +/// + +/// 1 +const SNP_AEAD_AES_256_GCM: u8 = 1; + +/// +/// Hypervisor error codes +/// + +/// BIT!(32) +pub const SNP_GUEST_REQ_INVALID_LEN: u64 = BIT!(32); +/// BIT!(33) +pub const SNP_GUEST_REQ_ERR_BUSY: u64 = BIT!(33); + +/// +/// SnpGuestRequestMsg +/// + +/// 0 +pub const SNP_MSG_TYPE_INVALID: u8 = 0; +/// 5 +pub const SNP_MSG_REPORT_REQ: u8 = 5; +/// 6 +pub const SNP_MSG_REPORT_RSP: u8 = 6; + +/// 1 +const HDR_VERSION: u8 = 1; +/// 1 +const MSG_VERSION: u8 = 1; +/// In the SEV-SNP ABI spec, the authentication tag should be at most +/// 128 bits. +/// 16 +const AUTHTAG_SIZE: c_int = 16; +/// In the SEV-SNP ABI spec, the IV should be at most 96 bits; but +/// the bits not used must be zeroed. +/// 12 +const IV_SIZE: c_int = 12; +/// 4000 +const MSG_PAYLOAD_SIZE: usize = 4000; + +/// 0x4000 +pub const SNP_GUEST_REQ_MAX_DATA_SIZE: usize = 0x4000; + +#[repr(C, packed)] +#[derive(Debug, Copy, Clone)] +struct SnpGuestRequestMsgHdr { + authtag: [u8; 32usize], + msg_seqno: u64, + rsvd1: [u8; 8usize], + algo: u8, + hdr_version: u8, + hdr_sz: u16, + msg_type: u8, + msg_version: u8, + msg_sz: u16, + rsvd2: u32, + msg_vmpck: u8, + rsvd3: [u8; 35usize], +} + +impl SnpGuestRequestMsgHdr { + pub fn is_valid(&self, msg_type: u8, msg_seqno: u64) -> bool { + const MSG_HDR_SIZE: usize = core::mem::size_of::(); + + let header_size: u16 = match u16::try_from(MSG_HDR_SIZE) { + Ok(v) => v, + Err(_) => { + prints!("ERR: header size too big, {} bytes\n", MSG_HDR_SIZE); + return false; + } + }; + + // header version + if self.hdr_version != HDR_VERSION { + prints!( + "ERR: response header version {} should be {}\n", + { self.hdr_version }, + HDR_VERSION + ); + return false; + } + + // header size + if self.hdr_sz != header_size { + prints!( + "ERR: response header size {} should be {}\n", + { self.hdr_sz }, + header_size + ); + return false; + } + + // algo + if self.algo != SNP_AEAD_AES_256_GCM { + prints!( + "ERR: response algo {}, but should be {}\n", + { self.algo }, + SNP_AEAD_AES_256_GCM + ); + return false; + } + + // message type + if self.msg_type != msg_type { + prints!( + "ERR: response message type {}, but should be {}\n", + { self.msg_type }, + { msg_type } + ); + return false; + } + + // message vmpck + if self.msg_vmpck != 0 { + prints!("ERR: response message vmpck {}, but should be 0\n", { + self.msg_vmpck + }); + return false; + } + + // message sequence number + if self.msg_seqno != msg_seqno { + prints!( + "ERR: response message seqno {}, but should be {}\n", + { self.msg_seqno }, + msg_seqno + ); + return false; + } + + true + } +} + +#[repr(C, packed)] +#[derive(Debug, Copy, Clone)] +struct SnpGuestRequestMsg { + hdr: SnpGuestRequestMsgHdr, + payload: [u8; MSG_PAYLOAD_SIZE], +} + +static SEQ_NUM: AtomicU64 = AtomicU64::new(0); + +fn seqno_last_used() -> u64 { + SEQ_NUM.load(Ordering::Relaxed) +} + +fn seqno_add_two() { + SEQ_NUM.fetch_add(2, Ordering::Relaxed); +} + +pub struct SnpGuestRequestCmd { + // SNP_GUEST_REQUEST requires two unique pages: one for + // the request and another for the response message. Both + // of them are assigned to the hypervisor (shared). + req_shared_page: VirtAddr, + resp_shared_page: VirtAddr, + + // Message encryption and decryption are performed in a + // private page to avoid data leaking. + staging_priv_page: VirtAddr, + + // SSL context is saved to simplify the clean-up logic in + // the error path. We free it after use. + ssl_ctx: *mut VirtAddr, + + // SNP Extended Guest Request. Its pages are also shared + // with the hypervisor + data_gva: VirtAddr, + data_npages: usize, + + is_initialized: bool, +} + +impl SnpGuestRequestCmd { + getter_func!(req_shared_page, VirtAddr); + getter_func!(resp_shared_page, VirtAddr); + getter_func!(data_gva, VirtAddr); + funcs!(data_npages, usize); + + pub const fn new() -> Self { + SnpGuestRequestCmd { + req_shared_page: VirtAddr::zero(), + resp_shared_page: VirtAddr::zero(), + + data_gva: VirtAddr::zero(), + data_npages: 0, + + staging_priv_page: VirtAddr::zero(), + ssl_ctx: VirtAddr::zero().as_mut_ptr(), + + is_initialized: false, + } + } + + pub fn init(&mut self) -> Result<(), ()> { + if !self.is_initialized { + self.req_shared_page = mem_allocate(PAGE_SIZE as usize)?; + self.resp_shared_page = mem_allocate(PAGE_SIZE as usize)?; + self.staging_priv_page = mem_allocate(PAGE_SIZE as usize)?; + + self.data_gva = mem_allocate(SNP_GUEST_REQ_MAX_DATA_SIZE)?; + if !ALIGNED!(self.data_gva.as_u64(), PAGE_SIZE) { + prints!("ERR: data_gva is not page aligned\n"); + return Err(()); + } + self.data_npages = PAGE_COUNT!(SNP_GUEST_REQ_MAX_DATA_SIZE as u64) as usize; + + // The SNP ABI spec says the request, response and data pages have + // to be shared with the hypervisor + pgtable_make_pages_shared(self.req_shared_page, PAGE_SIZE); + pgtable_make_pages_shared(self.resp_shared_page, PAGE_SIZE); + pgtable_make_pages_shared(self.data_gva, SNP_GUEST_REQ_MAX_DATA_SIZE as u64); + + self.is_initialized = true; + } + + Ok(()) + } + + fn set_ssl_ctx(&mut self, is_encryption: bool, msg_seqno: u64) -> Result<(), ()> { + self.ssl_ctx = new_ssl_ctx(self.ssl_ctx); + if self.ssl_ctx.is_null() { + prints!("ERR: Failed to create a new SSL context for encryption\n"); + return Err(()); + } + + let svsm_secrets_ptr: *mut SnpSecrets = get_svsm_secrets_page().as_mut_ptr(); + let vmpck0_va = VirtAddr::from_ptr(unsafe { *svsm_secrets_ptr }.vmpck0().as_ptr()); + let vmpck0_len: c_int = match c_int::try_from(VMPCK_SIZE) { + Ok(c) => c, + Err(_) => { + prints!("ERR: VMPCK too big for c_int\n"); + return Err(()); + } + }; + let mut msg_seqno_array: [u8; IV_SIZE as usize] = [0u8; IV_SIZE as usize]; + const U64_SIZE: usize = core::mem::size_of::(); + msg_seqno_array[..U64_SIZE].copy_from_slice(&u64::to_ne_bytes(msg_seqno)); + let msg_seqno_array_va = VirtAddr::from_ptr(msg_seqno_array.as_ptr()); + + init_aes_256_gcm_ctx( + self.ssl_ctx, + is_encryption, + vmpck0_va, + vmpck0_len, + msg_seqno_array_va, + IV_SIZE, + )?; + + Ok(()) + } + + /// Encrypt the plaintext using AES-256-GCM as described in the SNP ABI spec, where: + /// key = vmpck[0] + /// IV = sequence number + /// AAD = last 16 bytes of the SnpGuestRequestMsgHdr + fn encrypt_request( + &mut self, + msg_type: u8, + plaintext: VirtAddr, + plaintext_len: u16, + ) -> Result<(), ()> { + // Check VMPCK0 is valid + if is_vmpck0_clear() { + prints!("ERR: vmpck0 invalid\n"); + return Err(()); + } + + // Clear the staging private page before using it for encrypting the request + memset( + self.staging_priv_page.as_mut_ptr::(), + 0u8, + PAGE_SIZE as usize, + ); + + const MSG_HDR_SIZE: usize = core::mem::size_of::(); + + // Construct the request message header + let req: *mut SnpGuestRequestMsg = self.staging_priv_page.as_mut_ptr(); + let msg_seqno: u64 = match seqno_last_used().checked_add(1) { + Some(v) => v, + None => { + prints!("ERR: Request sequence number overflow\n"); + return Err(()); + } + }; + unsafe { + (*req).hdr.hdr_sz = match u16::try_from(MSG_HDR_SIZE) { + Ok(v) => v, + Err(_) => { + prints!("ERR: header size={} too big for u16\n", MSG_HDR_SIZE); + return Err(()); + } + }; + (*req).hdr.algo = SNP_AEAD_AES_256_GCM; + (*req).hdr.hdr_version = HDR_VERSION; + (*req).hdr.msg_sz = plaintext_len; + (*req).hdr.msg_type = msg_type; + (*req).hdr.msg_version = MSG_VERSION; + (*req).hdr.msg_vmpck = 0; + (*req).hdr.msg_seqno = msg_seqno; + } + + self.set_ssl_ctx(true, msg_seqno)?; + + let algo_offset: c_int = match c_int::try_from(offset_of!(SnpGuestRequestMsgHdr, algo)) { + Ok(o) => o, + Err(_) => { + prints!("ERR: algo offset is too big for c_int\n"); + return Err(()); + } + }; + let msg_hdr_size_cint: c_int = match c_int::try_from(MSG_HDR_SIZE) { + Ok(c) => c, + Err(_) => { + prints!("ERR: msg header size too big for c_int\n"); + return Err(()); + } + }; + let aad_len: c_int = msg_hdr_size_cint - algo_offset; + let aad: VirtAddr = unsafe { VirtAddr::from_ptr(&(*req).hdr.algo) }; + let ciphertext: VirtAddr = unsafe { VirtAddr::from_ptr((*req).payload.as_ptr()) }; + let mut ciphertext_len: c_int = match c_int::try_from(MSG_PAYLOAD_SIZE) { + Ok(c) => c, + Err(_) => { + prints!("ERR: msg payload size too big for c_int\n"); + return Err(()); + } + }; + let authtag: VirtAddr = unsafe { VirtAddr::from_ptr((*req).hdr.authtag.as_mut_ptr()) }; + + aes_256_gcm_encrypt( + self.ssl_ctx, + plaintext, + c_int::from(plaintext_len), + aad, + aad_len, + ciphertext, + ptr::addr_of_mut!(ciphertext_len), + authtag, + AUTHTAG_SIZE, + )?; + + prints!( + "INFO: SNP_GUEST_REQUEST msg_type {} encrypted ({} bytes)\n", + msg_type, + ciphertext_len + ); + //prints!("DEBUG: req_msg {:p} {:x?}\n", { &(*req) }, { *req }); + + memset( + self.req_shared_page.as_mut_ptr::(), + 0u8, + PAGE_SIZE as usize, + ); + unsafe { + copy_nonoverlapping( + self.staging_priv_page.as_ptr::(), + self.req_shared_page.as_mut_ptr::(), + PAGE_SIZE as usize, + ); + } + + Ok(()) + } + + /// Send the encrypted SNP_GUEST_REQUEST message to the PSP. + fn send(&mut self, extended: bool, mut psp_rc: &mut u64) -> Result<(), ()> { + memset( + self.resp_shared_page.as_mut_ptr::(), + 0u8, + PAGE_SIZE as usize, + ); + + // Send the encrypted request + vc_snp_guest_request(extended, &mut psp_rc, self)?; + + match *psp_rc { + // Success + 0 => {} + // certs_buf too small, the hypervisor did not forward the request. + // Save the number of pages required for the certificate chain + // and send the request again as a non-extended request + // to prevent IV reuse. + SNP_GUEST_REQ_INVALID_LEN => { + if extended { + let npages_required: usize = self.data_npages(); + vc_snp_guest_request(false, &mut psp_rc, self)?; + self.set_data_npages(npages_required); + if *psp_rc != 0 { + return Err(()); + } + *psp_rc = SNP_GUEST_REQ_INVALID_LEN; + } + } + // Hypervisor busy, the request was not forwarded to the PSP. Send + // the request again to prevent IV reuse. + SNP_GUEST_REQ_ERR_BUSY => { + vc_snp_guest_request(extended, &mut psp_rc, self)?; + if *psp_rc != 0 { + return Err(()); + } + } + // Failed. See the status codes in the SEV SNP ABI spec or in the + // linux kernel include/uapi/linux/psp-sev.h + _ => { + prints!("ERR: SNP_GUEST_REQUEST failed, rc={}\n", { *psp_rc }); + return Err(()); + } + } + + // The PSP firmware increases the sequence number only when + // it receives a request successfully. Hence, we sync our + // sequence number (add two) only when we receive a response + // successfully. + seqno_add_two(); + + Ok(()) + } + + fn decrypt_response(&mut self, msg_type: u8) -> Result, ()> { + // Check VMPCK0 is valid + if is_vmpck0_clear() { + prints!("ERR: vmpck0 invalid\n"); + return Err(()); + } + + // Decrypt the response in a private page to avoid any interference from + // the hypervisor + memset( + self.staging_priv_page.as_mut_ptr::(), + 0u8, + PAGE_SIZE as usize, + ); + unsafe { + copy_nonoverlapping( + self.resp_shared_page.as_ptr::(), + self.staging_priv_page.as_mut_ptr::(), + PAGE_SIZE as usize, + ); + } + + let resp: *const SnpGuestRequestMsg = self.staging_priv_page.as_ptr(); + + unsafe { + // Check if the response header is valid + if !(*resp).hdr.is_valid(msg_type, seqno_last_used()) { + return Err(()); + } + } + + self.set_ssl_ctx(false, seqno_last_used())?; + + let mut plaintext: [u8; MSG_PAYLOAD_SIZE] = [0u8; MSG_PAYLOAD_SIZE]; + let mut plaintext_len: c_int = match c_int::try_from(MSG_PAYLOAD_SIZE) { + Ok(c) => c, + Err(_) => { + prints!("ERR: msg payload size too big for c_int\n"); + return Err(()); + } + }; + let algo_offset: c_int = match c_int::try_from(offset_of!(SnpGuestRequestMsgHdr, algo)) { + Ok(o) => o, + Err(_) => { + prints!("ERR: algo offset is too big for c_int\n"); + return Err(()); + } + }; + const MSG_HDR_SIZE: usize = core::mem::size_of::(); + let __msg_hdr_size: c_int = match c_int::try_from(MSG_HDR_SIZE) { + Ok(c) => c, + Err(_) => { + prints!("ERR: msg header size is too big for c_int\n"); + return Err(()); + } + }; + let aad_len: c_int = __msg_hdr_size - algo_offset; + let aad: VirtAddr = unsafe { VirtAddr::from_ptr(&(*resp).hdr.algo) }; + let ciphertext: VirtAddr = unsafe { VirtAddr::from_ptr((*resp).payload.as_ptr()) }; + let ciphertext_len: c_int = unsafe { c_int::from((*resp).hdr.msg_sz) }; + let authtag: VirtAddr = unsafe { VirtAddr::from_ptr((*resp).hdr.authtag.as_ptr()) }; + + aes_256_gcm_decrypt( + self.ssl_ctx, + VirtAddr::from_ptr(plaintext.as_mut_ptr()), + ptr::addr_of_mut!(plaintext_len), + aad, + aad_len, + ciphertext, + ciphertext_len, + authtag, + AUTHTAG_SIZE, + )?; + + prints!( + "INFO: SNP_GUEST_REQUEST msg_type {} decrypted ({} bytes)\n", + msg_type, + plaintext_len + ); + //prints!("DEBUG: resp_msg {:x?}\n", { &buf[..500] }); + + Ok(plaintext.into()) + } + + /// Send a SNP_GUEST_REQUEST message to the platform security processor (PSP) following + /// the GHCB protocol. Messages are a encrypted/decrypted using AES_GCM. + pub fn send_request( + &mut self, + msg_type: u8, + extended: bool, + payload: VirtAddr, + payload_size: u16, + psp_rc: &mut u64, + ) -> Result, ()> { + if !self.is_initialized { + return Err(()); + } + self.encrypt_request(msg_type, payload, payload_size)?; + + if self.send(extended, psp_rc).is_err() { + disable_vmpck0(); + return Err(()); + } + + let result: Result, ()> = self.decrypt_response(msg_type + 1); + if result.is_err() { + disable_vmpck0(); + } + + result + } + + /// Copy to buf the certificates obtained in the last extended report request + pub fn copy_from_data(&self, buf: VirtAddr, buf_size: usize) { + unsafe { + ptr::copy_nonoverlapping( + self.data_gva.as_mut_ptr::(), + buf.as_mut_ptr::(), + min(buf_size, SNP_GUEST_REQ_MAX_DATA_SIZE as usize), + ); + } + } + + /// Check if the first sz bytes of the data buffer are empty + pub fn is_data_bytes_empty(&self, sz: usize) -> bool { + let m: usize = min(sz, SNP_GUEST_REQ_MAX_DATA_SIZE); + let buf: *const [u8; SNP_GUEST_REQ_MAX_DATA_SIZE] = + self.data_gva.as_ptr() as *const [u8; SNP_GUEST_REQ_MAX_DATA_SIZE]; + unsafe { (*buf)[..m].is_empty() } + } + + /// Clear sz bytes from the data buffer + pub fn clear_data_bytes(&self, sz: usize) { + memset( + self.data_gva.as_mut_ptr::(), + 0u8, + min(sz, SNP_GUEST_REQ_MAX_DATA_SIZE), + ); + } +} diff --git a/src/psp/mod.rs b/src/psp/mod.rs new file mode 100644 index 0000000..6da5854 --- /dev/null +++ b/src/psp/mod.rs @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright (C) 2023 IBM + * + * Authors: + * Claudio Carvalho + */ + +/// SNP Guest Request driver +pub mod guest_request_cmd; +/// Attestation report structures +pub mod msg_report; +/// SNP Guest Request services +pub mod request; diff --git a/src/psp/msg_report.rs b/src/psp/msg_report.rs new file mode 100644 index 0000000..c7d8f46 --- /dev/null +++ b/src/psp/msg_report.rs @@ -0,0 +1,135 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright (C) 2023 IBM + * + * Authors: + * Claudio Carvalho + */ + +use crate::{getter_func, prints}; + +use alloc::boxed::Box; + +/// 64 +pub const USER_DATA_SIZE: usize = 64; + +#[repr(C, packed)] +#[derive(Debug, Copy, Clone)] +pub struct SnpReportRequest { + user_data: [u8; USER_DATA_SIZE], + vmpl: u32, + rsvd: [u8; 28usize], +} + +impl SnpReportRequest { + pub fn new() -> Self { + Self { + user_data: [0u8; USER_DATA_SIZE], + vmpl: 0u32, + rsvd: [0u8; 28], + } + } + + pub fn set_user_data(&mut self, data: &[u8; USER_DATA_SIZE]) { + self.user_data.copy_from_slice(data); + } +} + +#[repr(C)] +#[repr(align(2048))] +#[derive(Debug, Copy, Clone)] +pub struct SnpReportResponse { + status: u32, + report_size: u32, + _reserved: [u8; 24], + report: AttestationReport, +} + +impl SnpReportResponse { + getter_func!(status, u32); + getter_func!(report_size, u32); + getter_func!(report, AttestationReport); + + pub fn is_valid(&self) -> bool { + // Check status + if self.status != 0 { + prints!("ERR: Bad report status={}\n", { self.status }); + return false; + } + + const REPORT_SIZE: usize = core::mem::size_of::(); + + // Check report size + if self.report_size != REPORT_SIZE as u32 { + prints!( + "ERR: Report size {:#x}, but should be {:#x} bytes)\n", + { self.report_size }, + REPORT_SIZE + ); + return false; + } + + true + } +} + +impl TryFrom> for SnpReportResponse { + type Error = (); + + fn try_from(payload: Box<[u8]>) -> Result { + let resp: SnpReportResponse = { + let (head, body, _tail) = unsafe { payload.align_to::() }; + if !head.is_empty() { + prints!("ERR: Report response not aligned\n"); + return Err(()); + } + body[0] + }; + + Ok(resp) + } +} + +// Converted tcb_version from enum to +// struct to make alignment simple. +#[repr(C, packed)] +#[derive(Debug, Copy, Clone)] +struct TcbVersion { + raw: u64, +} + +#[repr(C, packed)] +#[derive(Debug, Copy, Clone)] +struct Signature { + r: [u8; 72usize], + s: [u8; 72usize], + reserved: [u8; 368usize], +} + +#[repr(C, packed)] +#[derive(Debug, Copy, Clone)] +pub struct AttestationReport { + version: u32, + guest_svn: u32, + policy: u64, + family_id: [u8; 16usize], + image_id: [u8; 16usize], + vmpl: u32, + signature_algo: u32, + platform_version: TcbVersion, + platform_info: u64, + flags: u32, + reserved0: u32, + report_data: [u8; 64usize], + measurement: [u8; 48usize], + host_data: [u8; 32usize], + id_key_digest: [u8; 48usize], + author_key_digest: [u8; 48usize], + report_id: [u8; 32usize], + report_id_ma: [u8; 32usize], + reported_tcb: TcbVersion, + reserved1: [u8; 24usize], + chip_id: [u8; 64usize], + reserved2: [u8; 192usize], + signature: Signature, +} diff --git a/src/psp/request.rs b/src/psp/request.rs new file mode 100644 index 0000000..0c68762 --- /dev/null +++ b/src/psp/request.rs @@ -0,0 +1,142 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright (C) 2023 IBM + * + * Authors: + * Claudio Carvalho + */ + +use crate::protocols::error_codes::*; +use crate::psp::guest_request_cmd::{ + SnpGuestRequestCmd, SNP_GUEST_REQ_INVALID_LEN, SNP_GUEST_REQ_MAX_DATA_SIZE, SNP_MSG_REPORT_REQ, +}; +use crate::psp::msg_report::{SnpReportRequest, SnpReportResponse, USER_DATA_SIZE}; +use crate::util::locking::{LockGuard, SpinLock}; +use crate::{prints, ALIGN, ALIGNED, PAGE_COUNT, PAGE_SHIFT, PAGE_SIZE}; + +use alloc::boxed::Box; +use x86_64::VirtAddr; + +/// SNP_GUEST_REQUEST Object +static GUEST_REQUEST_CMD: SpinLock = SpinLock::new(SnpGuestRequestCmd::new()); + +pub struct CertsBuf { + addr: VirtAddr, + size: usize, +} + +impl CertsBuf { + pub fn new(va: VirtAddr, sz: usize) -> Self { + CertsBuf { addr: va, size: sz } + } +} + +pub fn snp_guest_request_init() { + if GUEST_REQUEST_CMD.lock().init().is_err() { + // Since the SNP_GUEST_REQUET resources failed to initialize, + // all subsequent SNP Guest Request will fail + prints!("ERR: Failed to initialize SNP_GUEST_REQUEST\n"); + } +} + +/// Request a vmpl0 attestation report to the platform security processor (PSP). +/// +/// @user_data: data that will be included in the attestation report and signed +/// @psp_rc : PSP return code. +/// @certs_buf: Optional. Buffer to store the certificate chain needed to verify +/// the attestation report. Make sure to load the certificates from +/// from the host using the sev-guest tools. +/// +/// It returns the SnpReportResponse if success, otherwise an error code. +/// +/// Further information can be found in the Secure Nested Paging Firmware ABI +/// Specification, Chapter 7, subsection Attestation +pub fn get_report( + user_data: &[u8; USER_DATA_SIZE], + psp_rc: &mut u64, + mut certs_buf: Option<&mut CertsBuf>, +) -> Result { + const REPORT_REQUEST_SIZE: usize = core::mem::size_of::(); + // The size of the SnpReportRequest structure needs to fit in the + // SnpGuestRequest.hdr.msg_size field, which is a u16. + let req_size: u16 = match u16::try_from(REPORT_REQUEST_SIZE) { + Ok(sz) => sz, + Err(_) => { + prints!( + "ERR: BUG: Report request size={} is too big for u16\n", + REPORT_REQUEST_SIZE + ); + return Err(SVSM_ERR_PROTOCOL_BASE); + } + }; + + let mut cmd: LockGuard = GUEST_REQUEST_CMD.lock(); + let extended: bool = certs_buf.is_some(); + + if extended { + // Get a mutable raw pointer, otherwise we will not be able to use certs_buf later again + let buf: &mut CertsBuf = certs_buf.as_mut().unwrap(); + + if buf.addr.is_null() || buf.size == 0 { + return Err(SVSM_ERR_INVALID_PARAMETER); + } + if buf.size > SNP_GUEST_REQ_MAX_DATA_SIZE as usize { + prints!("ERR: certs_buf_size={:#x} too big\n", { buf.size }); + return Err(SVSM_ERR_INVALID_PARAMETER); + } + if !ALIGNED!({ buf.addr.as_u64() }, PAGE_SIZE) { + prints!("ERR: certs_buf_size={:#x} not page aligned\n", { buf.addr }); + return Err(SVSM_ERR_INVALID_PARAMETER); + } + let npages: usize = PAGE_COUNT!({ buf.size as u64 }) as usize; + cmd.set_data_npages(npages); + cmd.clear_data_bytes(buf.size); + } + + // Instantiate a vmpl0 report request + let mut req: SnpReportRequest = SnpReportRequest::new(); + req.set_user_data(user_data); + + let result: Result, ()> = cmd.send_request( + SNP_MSG_REPORT_REQ, + extended, + VirtAddr::from_ptr(&req), + req_size, + psp_rc, + ); + + if result.is_err() { + if extended && *psp_rc == SNP_GUEST_REQ_INVALID_LEN { + let buf: &mut CertsBuf = certs_buf.as_mut().unwrap(); + prints!("ERR: Certificate buffer is too small, {} bytes\n", { + buf.size + }); + buf.size = (cmd.data_npages() << PAGE_SHIFT) as usize; + return Err(SVSM_ERR_INVALID_PARAMETER); + } + + return Err(SVSM_ERR_PROTOCOL_BASE); + } + + let message: Box<[u8]> = result.unwrap(); + let resp: SnpReportResponse = match SnpReportResponse::try_from(message) { + Ok(r) => r, + Err(()) => return Err(SVSM_ERR_PROTOCOL_BASE), + }; + if !resp.is_valid() { + return Err(SVSM_ERR_PROTOCOL_BASE); + } + + // The sev-guest tools, in the host, are used to load the certificates needed to + // verify the attestation report. If they were not loaded (yet), print a warning. + if extended { + let buf: &mut CertsBuf = certs_buf.as_mut().unwrap(); + if cmd.is_data_bytes_empty(buf.size) { + prints!("WARNING: Attestation report certificates not found.\n"); + } else { + cmd.copy_from_data(buf.addr, buf.size); + } + } + + Ok(resp) +} diff --git a/src/start/start.S b/src/start/start.S index 43231ed..dc58a4e 100644 --- a/src/start/start.S +++ b/src/start/start.S @@ -219,10 +219,7 @@ terminate_hlt: GLOBAL(bsp_guard_page) .fill 512, 8, 0 bsp_stack_start: - .fill 512, 8, 0 - .fill 512, 8, 0 - .fill 512, 8, 0 - .fill 512, 8, 0 + .fill 8192, 8, 0 bsp_stack_end: /* diff --git a/src/wrapper.rs b/src/wrapper.rs index d3248c7..ae56e51 100644 --- a/src/wrapper.rs +++ b/src/wrapper.rs @@ -7,76 +7,59 @@ * Vikram Narayanan */ -#![allow(non_camel_case_types)] +use crate::mem::{mem_allocate, mem_callocate, mem_free, mem_reallocate}; +use crate::{prints, vc_terminate_svsm_general}; -#[cfg(not(test))] -mod wrappers { - use crate::mem::{mem_allocate, mem_callocate, mem_free, mem_reallocate}; - use crate::prints; - - use core::{ptr, slice, str}; - use x86_64::VirtAddr; - - #[no_mangle] - pub extern "C" fn malloc(size: cty::c_ulong) -> *mut cty::c_void { - if let Ok(va) = mem_allocate(size as usize) { - return va.as_mut_ptr(); - }; - ptr::null_mut() - } +use core::ffi::{c_char, c_int, c_ulong, c_void}; +use core::{ptr, slice, str}; +use x86_64::VirtAddr; - #[no_mangle] - pub extern "C" fn calloc(items: cty::c_ulong, size: cty::c_ulong) -> *mut cty::c_void { - if let Some(num_bytes) = items.checked_mul(size as u64) { - if let Ok(va) = mem_callocate(num_bytes as usize) { - return va.as_mut_ptr(); - } - } - ptr::null_mut() - } +#[no_mangle] +pub extern "C" fn malloc(size: c_ulong) -> *mut c_void { + if let Ok(va) = mem_allocate(size as usize) { + return va.as_mut_ptr(); + }; + ptr::null_mut() +} - #[no_mangle] - pub extern "C" fn realloc(p: *mut cty::c_void, size: cty::c_ulong) -> *mut cty::c_void { - if let Ok(va) = mem_reallocate(VirtAddr::new(p as u64), size as usize) { +#[no_mangle] +pub extern "C" fn calloc(items: c_ulong, size: c_ulong) -> *mut c_void { + if let Some(num_bytes) = items.checked_mul(size as u64) { + if let Ok(va) = mem_callocate(num_bytes as usize) { return va.as_mut_ptr(); } - ptr::null_mut() } + ptr::null_mut() +} - #[no_mangle] - #[cfg(not(test))] - pub extern "C" fn free(p: *mut cty::c_void) { - if p.is_null() { - return; - } - mem_free(VirtAddr::new(p as u64)); +#[no_mangle] +pub extern "C" fn realloc(p: *mut c_void, size: c_ulong) -> *mut c_void { + if let Ok(va) = mem_reallocate(VirtAddr::new(p as u64), size as usize) { + return va.as_mut_ptr(); } + ptr::null_mut() +} - #[no_mangle] - pub extern "C" fn serial_out(s: *const cty::c_char, size: cty::c_int) { - let str_slice: &[u8] = unsafe { slice::from_raw_parts(s as *const u8, size as usize) }; - if let Ok(rust_str) = str::from_utf8(str_slice) { - prints!("{}", rust_str); - } else { - prints!("ERR: BUG: serial_out arg1 is not a valid utf8 string\n"); - } +#[no_mangle] +#[cfg(not(test))] +pub extern "C" fn free(p: *mut c_void) { + if p.is_null() { + return; } + mem_free(VirtAddr::new(p as u64)); } -#[cfg(test)] -#[allow(dead_code)] -mod test_wrappers { - - extern "C" { - fn malloc(size: cty::c_ulong) -> *mut cty::c_void; - fn calloc(items: cty::c_ulong, size: cty::c_ulong) -> *mut cty::c_void; - fn realloc(p: *mut cty::c_void, size: cty::c_ulong) -> *mut cty::c_void; - fn free(ptr: *mut cty::c_void); +#[no_mangle] +pub extern "C" fn serial_out(s: *const c_char, size: c_int) { + let str_slice: &[u8] = unsafe { slice::from_raw_parts(s as *const u8, size as usize) }; + if let Ok(rust_str) = str::from_utf8(str_slice) { + prints!("{}", rust_str); + } else { + prints!("ERR: BUG: serial_out arg1 is not a valid utf8 string\n"); } } #[no_mangle] pub extern "C" fn abort() -> ! { - use crate::vc_terminate_svsm_general; vc_terminate_svsm_general(); } diff --git a/svsm-target.json b/x86_64-unknown-none.json similarity index 100% rename from svsm-target.json rename to x86_64-unknown-none.json