Skip to content

Commit

Permalink
Merge pull request coconut-svsm#461 from msft-jlange/page_validate
Browse files Browse the repository at this point in the history
platform: extend the way pages are identified for validation/invalidation
  • Loading branch information
joergroedel authored Oct 2, 2024
2 parents 15bc1d1 + c2a9101 commit 0242978
Show file tree
Hide file tree
Showing 14 changed files with 288 additions and 108 deletions.
18 changes: 15 additions & 3 deletions kernel/src/boot_stage2.rs
Original file line number Diff line number Diff line change
Expand Up @@ -81,6 +81,16 @@ global_asm!(
decl %ecx
jnz 1b
/* Insert a self-map entry */
movl $pgtable, %edi
movl %edi, %eax
orl $0x63, %eax
/* The value 0xF68 is equivalent to 8 * PGTABLE_LVL3_IDX_PTE_SELFMAP */
movl %eax, 0xF68(%edi)
movl $0x80000000, %eax
orl %edx, %eax
movl %eax, 0xF6C(%edi)
/* Signal APs */
movl $setup_flag, %edi
movl $1, (%edi)
Expand All @@ -99,11 +109,13 @@ global_asm!(
bts $5, %eax
movl %eax, %cr4
/* Enable long mode, EFER.LME. */
/* Enable long mode, EFER.LME. Also ensure NXE is set. */
movl $0xc0000080, %ecx
rdmsr
bts $8, %eax
jc 2f
movl %eax, %ebx
orl $0x900, %eax
cmp %eax, %ebx
jz 2f
wrmsr
2:
Expand Down
14 changes: 0 additions & 14 deletions kernel/src/cpu/efer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,7 @@
//
// Author: Joerg Roedel <[email protected]>

use super::features::cpu_has_nx;
use super::msr::{read_msr, write_msr, EFER};
use crate::platform::SvsmPlatform;
use bitflags::bitflags;

bitflags! {
Expand Down Expand Up @@ -34,15 +32,3 @@ pub fn write_efer(efer: EFERFlags) {
let val = efer.bits();
write_msr(EFER, val);
}

pub fn efer_init(platform: &dyn SvsmPlatform) {
let mut efer = read_efer();

// All processors that are capable of virtualization will support
// no-execute table entries, so there is no reason to support any processor
// that does not enumerate NX capability.
assert!(cpu_has_nx(platform), "CPU does not support NX");

efer.insert(EFERFlags::NXE);
write_efer(efer);
}
10 changes: 0 additions & 10 deletions kernel/src/cpu/features.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,18 +6,8 @@

use crate::platform::SvsmPlatform;

const X86_FEATURE_NX: u32 = 20;
const X86_FEATURE_PGE: u32 = 13;

pub fn cpu_has_nx(platform: &dyn SvsmPlatform) -> bool {
let ret = platform.cpuid(0x80000001);

match ret {
None => false,
Some(c) => (c.edx >> X86_FEATURE_NX) & 1 == 1,
}
}

pub fn cpu_has_pge(platform: &dyn SvsmPlatform) -> bool {
let ret = platform.cpuid(0x00000001);

Expand Down
4 changes: 2 additions & 2 deletions kernel/src/igvm_params.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ use crate::cpu::efer::EFERFlags;
use crate::error::SvsmError;
use crate::fw_meta::SevFWMetaData;
use crate::mm::{GuestPtr, PerCPUPageMappingGuard, PAGE_SIZE};
use crate::platform::{PageStateChangeOp, SVSM_PLATFORM};
use crate::platform::{PageStateChangeOp, PageValidateOp, SVSM_PLATFORM};
use crate::types::PageSize;
use crate::utils::MemoryRegion;
use alloc::vec::Vec;
Expand Down Expand Up @@ -173,7 +173,7 @@ impl IgvmParams<'_> {
}

let mem_map_va_region = MemoryRegion::<VirtAddr>::new(mem_map_va, mem_map_region.len());
platform.validate_page_range(mem_map_va_region)?;
platform.validate_virtual_page_range(mem_map_va_region, PageValidateOp::Validate)?;

// Calculate the maximum number of entries that can be inserted.
let max_entries = fw_info.memory_map_page_count as usize * PAGE_SIZE
Expand Down
30 changes: 12 additions & 18 deletions kernel/src/mm/address_space.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,9 @@
use crate::address::{PhysAddr, VirtAddr};
use crate::utils::immut_after_init::ImmutAfterInitCell;

#[cfg(target_os = "none")]
use crate::mm::pagetable::PageTable;

#[derive(Debug, Copy, Clone)]
#[allow(dead_code)]
pub struct FixedAddressMappingRange {
Expand Down Expand Up @@ -38,16 +41,6 @@ impl FixedAddressMappingRange {
}
}
}

#[cfg(target_os = "none")]
fn virt_to_phys(&self, vaddr: VirtAddr) -> Option<PhysAddr> {
if (vaddr < self.virt_start) || (vaddr >= self.virt_end) {
None
} else {
let offset: usize = vaddr - self.virt_start;
Some(self.phys_start + offset)
}
}
}

#[derive(Debug, Copy, Clone)]
Expand All @@ -74,16 +67,12 @@ pub fn init_kernel_mapping_info(

#[cfg(target_os = "none")]
pub fn virt_to_phys(vaddr: VirtAddr) -> PhysAddr {
if let Some(addr) = FIXED_MAPPING.kernel_mapping.virt_to_phys(vaddr) {
return addr;
}
if let Some(ref mapping) = FIXED_MAPPING.heap_mapping {
if let Some(addr) = mapping.virt_to_phys(vaddr) {
return addr;
match PageTable::virt_to_phys(vaddr) {
Some(paddr) => paddr,
None => {
panic!("Invalid virtual address {:#018x}", vaddr);
}
}

panic!("Invalid virtual address {:#018x}", vaddr);
}

#[cfg(target_os = "none")]
Expand Down Expand Up @@ -203,6 +192,11 @@ pub const SVSM_PERTASK_END: VirtAddr = SVSM_PERTASK_BASE.const_add(SIZE_LEVEL3);
/// Kernel stack for a task
pub const SVSM_PERTASK_STACK_BASE: VirtAddr = SVSM_PERTASK_BASE;

/// Page table self-map level 3 index
pub const PGTABLE_LVL3_IDX_PTE_SELFMAP: usize = 493;

pub const SVSM_PTE_BASE: VirtAddr = virt_from_idx(PGTABLE_LVL3_IDX_PTE_SELFMAP);

//
// User-space mapping constants
//
Expand Down
14 changes: 10 additions & 4 deletions kernel/src/mm/page_visibility.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ use crate::mm::validate::{
valid_bitmap_clear_valid_4k, valid_bitmap_set_valid_4k, valid_bitmap_valid_addr,
};
use crate::mm::{virt_to_phys, PageBox};
use crate::platform::{PageStateChangeOp, SVSM_PLATFORM};
use crate::platform::{PageStateChangeOp, PageValidateOp, SVSM_PLATFORM};
use crate::protocols::errors::SvsmReqError;
use crate::types::{PageSize, PAGE_SIZE};
use crate::utils::MemoryRegion;
Expand All @@ -39,7 +39,10 @@ unsafe fn make_page_shared(vaddr: VirtAddr) -> Result<(), SvsmError> {
let platform = SVSM_PLATFORM.as_dyn_ref();

// Revoke page validation before changing page state.
platform.invalidate_page_range(MemoryRegion::new(vaddr, PAGE_SIZE))?;
platform.validate_virtual_page_range(
MemoryRegion::new(vaddr, PAGE_SIZE),
PageValidateOp::Invalidate,
)?;
let paddr = virt_to_phys(vaddr);
if valid_bitmap_valid_addr(paddr) {
valid_bitmap_clear_valid_4k(paddr);
Expand Down Expand Up @@ -83,8 +86,11 @@ unsafe fn make_page_private(vaddr: VirtAddr) -> Result<(), SvsmError> {
PageStateChangeOp::Private,
)?;

// Revoke page validation before changing page state.
platform.validate_page_range(MemoryRegion::new(vaddr, PAGE_SIZE))?;
// Validate the page now that it is private again.
platform.validate_virtual_page_range(
MemoryRegion::new(vaddr, PAGE_SIZE),
PageValidateOp::Validate,
)?;
if valid_bitmap_valid_addr(paddr) {
valid_bitmap_set_valid_4k(paddr);
}
Expand Down
116 changes: 106 additions & 10 deletions kernel/src/mm/pagetable.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,8 +11,10 @@ use crate::cpu::flush_tlb_global_sync;
use crate::cpu::idt::common::PageFaultError;
use crate::cpu::registers::RFlags;
use crate::error::SvsmError;
use crate::mm::PageBox;
use crate::mm::{phys_to_virt, virt_to_phys, PGTABLE_LVL3_IDX_SHARED};
use crate::mm::{
phys_to_virt, virt_to_phys, PageBox, PGTABLE_LVL3_IDX_PTE_SELFMAP, PGTABLE_LVL3_IDX_SHARED,
SVSM_PTE_BASE,
};
use crate::platform::SvsmPlatform;
use crate::types::{PageSize, PAGE_SIZE, PAGE_SIZE_2M};
use crate::utils::immut_after_init::{ImmutAfterInitCell, ImmutAfterInitResult};
Expand Down Expand Up @@ -53,7 +55,6 @@ pub fn paging_init_early(platform: &dyn SvsmPlatform) -> ImmutAfterInitResult<()
init_encrypt_mask(platform)?;

let mut feature_mask = PTEntryFlags::all();
feature_mask.remove(PTEntryFlags::NX);
feature_mask.remove(PTEntryFlags::GLOBAL);
FEATURE_MASK.reinit(&feature_mask)
}
Expand Down Expand Up @@ -361,6 +362,17 @@ impl PTEntry {
let addr = PhysAddr::from(self.0.bits() & 0x000f_ffff_ffff_f000);
strip_confidentiality_bits(addr)
}

/// Read a page table entry from the specified virtual address.
///
/// # Safety
///
/// Reads from an arbitrary virtual address, making this essentially a
/// raw pointer read. The caller must be certain to calculate the correct
/// address.
pub unsafe fn read_pte(vaddr: VirtAddr) -> Self {
*vaddr.as_ptr::<Self>()
}
}

/// A pagetable page with multiple entries.
Expand Down Expand Up @@ -457,13 +469,33 @@ impl PageTable {
virt_to_phys(pgtable)
}

/// Allocate a new page table root.
///
/// # Errors
/// Returns [`SvsmError`] if the page cannot be allocated.
pub fn allocate_new() -> Result<PageBox<Self>, SvsmError> {
let mut pgtable = PageBox::try_new(PageTable::default())?;
let paddr = virt_to_phys(pgtable.vaddr());

// Set the self-map entry.
let entry = &mut pgtable.root[PGTABLE_LVL3_IDX_PTE_SELFMAP];
let flags = PTEntryFlags::PRESENT
| PTEntryFlags::WRITABLE
| PTEntryFlags::ACCESSED
| PTEntryFlags::DIRTY
| PTEntryFlags::NX;
entry.set(make_private_address(paddr), flags);

Ok(pgtable)
}

/// Clone the shared part of the page table; excluding the private
/// parts.
///
/// # Errors
/// Returns [`SvsmError`] if the page cannot be allocated.
pub fn clone_shared(&self) -> Result<PageBox<PageTable>, SvsmError> {
let mut pgtable = PageBox::try_new(PageTable::default())?;
let mut pgtable = Self::allocate_new()?;
pgtable.root.entries[PGTABLE_LVL3_IDX_SHARED] = self.root.entries[PGTABLE_LVL3_IDX_SHARED];
Ok(pgtable)
}
Expand Down Expand Up @@ -561,6 +593,72 @@ impl PageTable {
Self::walk_addr_lvl3(&mut self.root, vaddr)
}

/// Calculate the virtual address of a PTE in the self-map, which maps a
/// specified virtual address.
///
/// # Parameters
/// - `vaddr': The virtual address whose PTE should be located.
///
/// # Returns
/// The virtual address of the PTE.
fn get_pte_address(vaddr: VirtAddr) -> VirtAddr {
SVSM_PTE_BASE + ((usize::from(vaddr) & 0x0000_FFFF_FFFF_F000) >> 9)
}

/// Perform a virtual to physical translation using the self-map.
///
/// # Parameters
/// - `vaddr': The virtual address to transalte.
///
/// # Returns
/// Some(PhysAddr) if the virtual address is valid.
/// None if the virtual address is not valid.
pub fn virt_to_phys(vaddr: VirtAddr) -> Option<PhysAddr> {
// Calculate the virtual addresses of each level of the paging
// hierarchy in the self-map.
let pte_addr = Self::get_pte_address(vaddr);
let pde_addr = Self::get_pte_address(pte_addr);
let pdpe_addr = Self::get_pte_address(pde_addr);
let pml4e_addr = Self::get_pte_address(pdpe_addr);

// Check each entry in the paging hierarchy to determine whether this
// address is mapped. Because the hierarchy is read from the top
// down using self-map addresses that were calculated correctly,
// the reads are safe to perform.
let pml4e = unsafe { PTEntry::read_pte(pml4e_addr) };
if !pml4e.present() {
return None;
}

// There is no need to check for a large page in the PML4E because
// the architecture does not support the large bit at the top-level
// entry. If a large page is detected at a lower level of the
// hierarchy, the low bits from the virtual address must be combined
// with the physical address from the PDE/PDPE.
let pdpe = unsafe { PTEntry::read_pte(pdpe_addr) };
if !pdpe.present() {
return None;
}
if pdpe.huge() {
return Some(pdpe.address() + (usize::from(vaddr) & 0x3FFF_FFFF));
}

let pde = unsafe { PTEntry::read_pte(pde_addr) };
if !pde.present() {
return None;
}
if pde.huge() {
return Some(pde.address() + (usize::from(vaddr) & 0x001F_FFFF));
}

let pte = unsafe { PTEntry::read_pte(pte_addr) };
if pte.present() {
Some(pte.address() + (usize::from(vaddr) & 0xFFF))
} else {
None
}
}

fn alloc_pte_lvl3(entry: &mut PTEntry, vaddr: VirtAddr, size: PageSize) -> Mapping<'_> {
let flags = entry.flags();

Expand All @@ -576,7 +674,7 @@ impl PageTable {
| PTEntryFlags::WRITABLE
| PTEntryFlags::USER
| PTEntryFlags::ACCESSED;
entry.set(paddr, flags);
entry.set(make_private_address(paddr), flags);

let idx = Self::index::<2>(vaddr);
Self::alloc_pte_lvl2(&mut page[idx], vaddr, size)
Expand All @@ -597,7 +695,7 @@ impl PageTable {
| PTEntryFlags::WRITABLE
| PTEntryFlags::USER
| PTEntryFlags::ACCESSED;
entry.set(paddr, flags);
entry.set(make_private_address(paddr), flags);

let idx = Self::index::<1>(vaddr);
Self::alloc_pte_lvl1(&mut page[idx], vaddr, size)
Expand All @@ -618,7 +716,7 @@ impl PageTable {
| PTEntryFlags::WRITABLE
| PTEntryFlags::USER
| PTEntryFlags::ACCESSED;
entry.set(paddr, flags);
entry.set(make_private_address(paddr), flags);

let idx = Self::index::<0>(vaddr);
Mapping::Level0(&mut page[idx])
Expand Down Expand Up @@ -1031,9 +1129,7 @@ impl PageTable {
| PTEntryFlags::USER
| PTEntryFlags::ACCESSED;
let entry = &mut self.root[idx];
// The C bit is not required here because all page table fetches are
// made as C=1.
entry.set(paddr, flags);
entry.set(make_private_address(paddr), flags);
}
}
}
Expand Down
Loading

0 comments on commit 0242978

Please sign in to comment.