Skip to content

Commit

Permalink
ASLR: Lay down the foundations
Browse files Browse the repository at this point in the history
- Tests: x86_64-related paging tests should use a guest_address that is
  not 0
- Tests: Move them in separate files, use appropriate 'use' directives
- Tests: Use init_guest_mem wrapper in test_virt_to_phys
- Fix kernel memory loading
- Add guest_address getter in UhyveVm
- Change names of constants to clarify their purpose
- Use u64 for arch::RAM_START instead of GuestVirtAddr
- Remove pagetable_l0 from virt_to_phys function
- Various `cargo fmt`-related changes
- aarch64: Blindly replace constant names and similar RAM_START change

We currently rely on guest_address in MmapMemory to calculate the
offsets during the initialization of the VM and when converting
virtual addresses to physical addresses. The latter case is intended
to be temporary - we should read the value from the CR3 register at
a later point, but this is too complex for the time being because of
the different architectures.

Although this current revision does work with relocatable binaries, it
is not making use of this functionality _just_ yet.

Fixes hermit-os#719.

Co-authored-by: Jonathan <[email protected]>
  • Loading branch information
n0toose and jounathaen committed Dec 3, 2024
1 parent b094c9a commit a6c55b2
Show file tree
Hide file tree
Showing 12 changed files with 175 additions and 123 deletions.
45 changes: 22 additions & 23 deletions src/arch/aarch64/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ use bitflags::bitflags;
use uhyve_interface::{GuestPhysAddr, GuestVirtAddr};

use crate::{
consts::{BOOT_INFO_ADDR, BOOT_PGT},
consts::{BOOT_INFO_OFFSET, PGT_OFFSET},
mem::MmapMemory,
paging::PagetableError,
};
Expand Down Expand Up @@ -115,7 +115,6 @@ fn is_valid_address(virtual_address: GuestVirtAddr) -> bool {
pub fn virt_to_phys(
addr: GuestVirtAddr,
mem: &MmapMemory,
pagetable_l0: GuestPhysAddr,
) -> Result<GuestPhysAddr, PagetableError> {
if !is_valid_address(addr) {
return Err(PagetableError::InvalidAddress);
Expand All @@ -133,7 +132,7 @@ pub fn virt_to_phys(
// - We are page_aligned, and thus also PageTableEntry aligned.
let mut pagetable: &[PageTableEntry] = unsafe {
std::mem::transmute::<&[u8], &[PageTableEntry]>(
mem.slice_at(pagetable_l0, PAGE_SIZE).unwrap(),
mem.slice_at(mem.guest_address, PAGE_SIZE).unwrap(),
)
};
// TODO: Depending on the virtual address length and granule (defined in TCR register by TG and TxSZ), we could reduce the number of pagetable walks. Hermit doesn't do this at the moment.
Expand All @@ -155,67 +154,67 @@ pub fn virt_to_phys(
Ok(pte.address())
}

pub fn init_guest_mem(mem: &mut [u8]) {
pub fn init_guest_mem(mem: &mut [u8], _guest_address: GuestPhysAddr) {
let mem_addr = std::ptr::addr_of_mut!(mem[0]);

assert!(mem.len() >= BOOT_PGT.as_u64() as usize + 512 * size_of::<u64>());
assert!(mem.len() >= PGT_OFFSET as usize + 512 * size_of::<u64>());
let pgt_slice = unsafe {
std::slice::from_raw_parts_mut(mem_addr.offset(BOOT_PGT.as_u64() as isize) as *mut u64, 512)
std::slice::from_raw_parts_mut(mem_addr.offset(PGT_OFFSET as isize) as *mut u64, 512)
};
pgt_slice.fill(0);
pgt_slice[0] = BOOT_PGT.as_u64() + 0x1000 + PT_PT;
pgt_slice[511] = BOOT_PGT.as_u64() + PT_PT + PT_SELF;
pgt_slice[0] = PGT_OFFSET + 0x1000 + PT_PT;
pgt_slice[511] = PGT_OFFSET + PT_PT + PT_SELF;

assert!(mem.len() >= BOOT_PGT.as_u64() as usize + 0x1000 + 512 * size_of::<u64>());
assert!(mem.len() >= PGT_OFFSET as usize + 0x1000 + 512 * size_of::<u64>());
let pgt_slice = unsafe {
std::slice::from_raw_parts_mut(
mem_addr.offset(BOOT_PGT.as_u64() as isize + 0x1000) as *mut u64,
mem_addr.offset(PGT_OFFSET as isize + 0x1000) as *mut u64,
512,
)
};
pgt_slice.fill(0);
pgt_slice[0] = BOOT_PGT.as_u64() + 0x2000 + PT_PT;
pgt_slice[0] = PGT_OFFSET + 0x2000 + PT_PT;

assert!(mem.len() >= BOOT_PGT.as_u64() as usize + 0x2000 + 512 * size_of::<u64>());
assert!(mem.len() >= PGT_OFFSET as usize + 0x2000 + 512 * size_of::<u64>());
let pgt_slice = unsafe {
std::slice::from_raw_parts_mut(
mem_addr.offset(BOOT_PGT.as_u64() as isize + 0x2000) as *mut u64,
mem_addr.offset(PGT_OFFSET as isize + 0x2000) as *mut u64,
512,
)
};
pgt_slice.fill(0);
pgt_slice[0] = BOOT_PGT.as_u64() + 0x3000 + PT_PT;
pgt_slice[1] = BOOT_PGT.as_u64() + 0x4000 + PT_PT;
pgt_slice[2] = BOOT_PGT.as_u64() + 0x5000 + PT_PT;
pgt_slice[0] = PGT_OFFSET + 0x3000 + PT_PT;
pgt_slice[1] = PGT_OFFSET + 0x4000 + PT_PT;
pgt_slice[2] = PGT_OFFSET + 0x5000 + PT_PT;

assert!(mem.len() >= BOOT_PGT.as_u64() as usize + 0x3000 + 512 * size_of::<u64>());
assert!(mem.len() >= PGT_OFFSET as usize + 0x3000 + 512 * size_of::<u64>());
let pgt_slice = unsafe {
std::slice::from_raw_parts_mut(
mem_addr.offset(BOOT_PGT.as_u64() as isize + 0x3000) as *mut u64,
mem_addr.offset(PGT_OFFSET as isize + 0x3000) as *mut u64,
512,
)
};
pgt_slice.fill(0);
// map Uhyve ports into the virtual address space
pgt_slice[0] = PT_MEM_CD;
// map BootInfo into the virtual address space
pgt_slice[BOOT_INFO_ADDR.as_u64() as usize / PAGE_SIZE] = BOOT_INFO_ADDR.as_u64() + PT_MEM;
pgt_slice[BOOT_INFO_OFFSET as usize / PAGE_SIZE] = BOOT_INFO_OFFSET + PT_MEM;

assert!(mem.len() >= BOOT_PGT.as_u64() as usize + 0x4000 + 512 * size_of::<u64>());
assert!(mem.len() >= PGT_OFFSET as usize + 0x4000 + 512 * size_of::<u64>());
let pgt_slice = unsafe {
std::slice::from_raw_parts_mut(
mem_addr.offset(BOOT_PGT.as_u64() as isize + 0x4000) as *mut u64,
mem_addr.offset(PGT_OFFSET as isize + 0x4000) as *mut u64,
512,
)
};
for (idx, i) in pgt_slice.iter_mut().enumerate() {
*i = 0x200000u64 + (idx * PAGE_SIZE) as u64 + PT_MEM;
}

assert!(mem.len() >= BOOT_PGT.as_u64() as usize + 0x5000 + 512 * size_of::<u64>());
assert!(mem.len() >= PGT_OFFSET as usize + 0x5000 + 512 * size_of::<u64>());
let pgt_slice = unsafe {
std::slice::from_raw_parts_mut(
mem_addr.offset(BOOT_PGT.as_u64() as isize + 0x5000) as *mut u64,
mem_addr.offset(PGT_OFFSET as isize + 0x5000) as *mut u64,
512,
)
};
Expand Down
49 changes: 28 additions & 21 deletions src/arch/x86_64/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -15,9 +15,10 @@ use x86_64::structures::paging::{
PageTable, PageTableIndex,
};

use crate::{arch::paging::initialize_pagetables, mem::MmapMemory, paging::PagetableError};
use crate::{consts::PML4_OFFSET, mem::MmapMemory, paging::PagetableError};

pub const RAM_START: GuestPhysAddr = GuestPhysAddr::new(0x00);

const MHZ_TO_HZ: u64 = 1000000;
const KHZ_TO_HZ: u64 = 1000;

Expand Down Expand Up @@ -110,16 +111,19 @@ pub fn get_cpu_frequency_from_os() -> std::result::Result<u32, FrequencyDetectio
pub fn virt_to_phys(
addr: GuestVirtAddr,
mem: &MmapMemory,
pagetable_l0: GuestPhysAddr,
) -> Result<GuestPhysAddr, PagetableError> {
/// Number of Offset bits of a virtual address for a 4 KiB page, which are shifted away to get its Page Frame Number (PFN).
pub const PAGE_BITS: u64 = 12;

/// Number of bits of the index in each table (PML4, PDPT, PDT, PGT).
pub const PAGE_MAP_BITS: usize = 9;

let mut page_table =
unsafe { (mem.host_address(pagetable_l0).unwrap() as *mut PageTable).as_mut() }.unwrap();
let mut page_table = unsafe {
(mem.host_address(GuestPhysAddr::new(mem.guest_address.as_u64() + PML4_OFFSET))
.unwrap() as *mut PageTable)
.as_mut()
}
.unwrap();
let mut page_bits = 39;
let mut entry = PageTableEntry::new();

Expand Down Expand Up @@ -147,20 +151,17 @@ pub fn virt_to_phys(
Ok((entry.addr() + (addr.as_u64() & !((!0u64) << PAGE_BITS))).into())
}

pub fn init_guest_mem(mem: &mut [u8]) {
pub fn init_guest_mem(mem: &mut [u8], guest_address: GuestPhysAddr) {
// TODO: we should maybe return an error on failure (e.g., the memory is too small)
initialize_pagetables(mem);
paging::initialize_pagetables(mem, guest_address);
}

#[cfg(test)]
mod tests {
use x86_64::structures::paging::PageTableFlags;

use super::*;
use crate::{
arch::paging::MIN_PHYSMEM_SIZE,
consts::{BOOT_PDE, BOOT_PDPTE, BOOT_PML4},
};
use crate::consts::{MIN_PHYSMEM_SIZE, PDE_OFFSET, PDPTE_OFFSET, PML4_OFFSET};

// test is derived from
// https://github.com/gz/rust-cpuid/blob/master/examples/tsc_frequency.rs
Expand Down Expand Up @@ -250,38 +251,44 @@ mod tests {
.is_test(true)
.try_init();

let guest_address = GuestPhysAddr::new(0x11111000);
let mem = MmapMemory::new(
0,
align_up!(MIN_PHYSMEM_SIZE * 2, 0x20_0000),
GuestPhysAddr::zero(),
MIN_PHYSMEM_SIZE * 2,
guest_address,
true,
true,
);
println!("mmap memory created {mem:?}");
initialize_pagetables(unsafe { mem.as_slice_mut() }.try_into().unwrap());

init_guest_mem(
unsafe { mem.as_slice_mut() }.try_into().unwrap(),
guest_address,
);

// Get the address of the first entry in PML4 (the address of the PML4 itself)
let virt_addr = GuestVirtAddr::new(0xFFFFFFFFFFFFF000);
let p_addr = virt_to_phys(virt_addr, &mem, BOOT_PML4).unwrap();
assert_eq!(p_addr, BOOT_PML4);
let p_addr = virt_to_phys(virt_addr, &mem).unwrap();
assert_eq!(p_addr, guest_address + PML4_OFFSET);

// The last entry on the PML4 is the address of the PML4 with flags
let virt_addr = GuestVirtAddr::new(0xFFFFFFFFFFFFF000 | (4096 - 8));
let p_addr = virt_to_phys(virt_addr, &mem, BOOT_PML4).unwrap();
let p_addr = virt_to_phys(virt_addr, &mem).unwrap();
assert_eq!(
mem.read::<u64>(p_addr).unwrap(),
BOOT_PML4.as_u64() | (PageTableFlags::PRESENT | PageTableFlags::WRITABLE).bits()
(guest_address + PML4_OFFSET).as_u64()
| (PageTableFlags::PRESENT | PageTableFlags::WRITABLE).bits()
);

// the first entry on the 3rd level entry in the pagetables is the address of the boot pdpte
let virt_addr = GuestVirtAddr::new(0xFFFFFFFFFFE00000);
let p_addr = virt_to_phys(virt_addr, &mem, BOOT_PML4).unwrap();
assert_eq!(p_addr, BOOT_PDPTE);
let p_addr = virt_to_phys(virt_addr, &mem).unwrap();
assert_eq!(p_addr, guest_address + PDPTE_OFFSET);

// the first entry on the 2rd level entry in the pagetables is the address of the boot pde
let virt_addr = GuestVirtAddr::new(0xFFFFFFFFC0000000);
let p_addr = virt_to_phys(virt_addr, &mem, BOOT_PML4).unwrap();
assert_eq!(p_addr, BOOT_PDE);
let p_addr = virt_to_phys(virt_addr, &mem).unwrap();
assert_eq!(p_addr, guest_address + PDE_OFFSET);
// That address points to a huge page
assert!(
PageTableFlags::from_bits_truncate(mem.read::<u64>(p_addr).unwrap()).contains(
Expand Down
63 changes: 36 additions & 27 deletions src/arch/x86_64/paging/mod.rs
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
use uhyve_interface::GuestPhysAddr;
use x86_64::{
structures::paging::{Page, PageTable, PageTableFlags, Size2MiB},
PhysAddr,
Expand All @@ -14,15 +15,14 @@ pub fn create_gdt_entry(flags: u64, base: u64, limit: u64) -> u64 {
| (limit & 0x0000ffffu64)
}

pub const MIN_PHYSMEM_SIZE: usize = BOOT_PDE.as_u64() as usize + 0x1000;

/// Creates the pagetables and the GDT in the guest memory space.
///
/// The memory slice must be larger than [`MIN_PHYSMEM_SIZE`].
/// Also, the memory `mem` needs to be zeroed for [`PAGE_SIZE`] bytes at the
/// offsets [`BOOT_PML4`] and [`BOOT_PDPTE`], otherwise the integrity of the
/// pagetables and thus the integrity of the guest's memory is not ensured
pub fn initialize_pagetables(mem: &mut [u8]) {
/// `mem` and `GuestPhysAddr` must be 2MiB page aligned.
pub fn initialize_pagetables(mem: &mut [u8], guest_address: GuestPhysAddr) {
assert!(mem.len() >= MIN_PHYSMEM_SIZE);
let mem_addr = std::ptr::addr_of_mut!(mem[0]);

Expand All @@ -32,23 +32,23 @@ pub fn initialize_pagetables(mem: &mut [u8]) {
// these and it is asserted to be large enough.
unsafe {
gdt_entry = mem_addr
.add(BOOT_GDT.as_u64() as usize)
.add(GDT_OFFSET as usize)
.cast::<[u64; 3]>()
.as_mut()
.unwrap();

pml4 = mem_addr
.add(BOOT_PML4.as_u64() as usize)
.add(PML4_OFFSET as usize)
.cast::<PageTable>()
.as_mut()
.unwrap();
pdpte = mem_addr
.add(BOOT_PDPTE.as_u64() as usize)
.add(PDPTE_OFFSET as usize)
.cast::<PageTable>()
.as_mut()
.unwrap();
pde = mem_addr
.add(BOOT_PDE.as_u64() as usize)
.add(PDE_OFFSET as usize)
.cast::<PageTable>()
.as_mut()
.unwrap();
Expand All @@ -68,15 +68,15 @@ pub fn initialize_pagetables(mem: &mut [u8]) {
gdt_entry[BOOT_GDT_DATA] = create_gdt_entry(0xC093, 0, 0xFFFFF);

pml4[0].set_addr(
BOOT_PDPTE.into(),
(guest_address + PDPTE_OFFSET).into(),
PageTableFlags::PRESENT | PageTableFlags::WRITABLE,
);
pml4[511].set_addr(
BOOT_PML4.into(),
(guest_address + PML4_OFFSET).into(),
PageTableFlags::PRESENT | PageTableFlags::WRITABLE,
);
pdpte[0].set_addr(
BOOT_PDE.into(),
(guest_address + PDE_OFFSET).into(),
PageTableFlags::PRESENT | PageTableFlags::WRITABLE,
);

Expand Down Expand Up @@ -114,7 +114,7 @@ fn pretty_print_pagetable(pt: &PageTable) {
mod tests {
use super::*;
use crate::{
consts::{BOOT_PDE, BOOT_PDPTE, BOOT_PML4},
consts::{GDT_OFFSET, PDE_OFFSET, PDPTE_OFFSET, PML4_OFFSET},
mem::HugePageAlignedMem,
};

Expand All @@ -124,33 +124,42 @@ mod tests {
.filter(None, log::LevelFilter::Debug)
.is_test(true)
.try_init();
let guest_address = GuestPhysAddr::new(0x20000);

let aligned_mem = HugePageAlignedMem::<MIN_PHYSMEM_SIZE>::new();
initialize_pagetables((aligned_mem.mem).try_into().unwrap());
// This will return a pagetable setup that we will check.
initialize_pagetables((aligned_mem.mem).try_into().unwrap(), guest_address);

// Test pagetable setup
let addr_pdpte = u64::from_le_bytes(
aligned_mem.mem[(BOOT_PML4.as_u64() as usize)..(BOOT_PML4.as_u64() as usize + 8)]
// Check PDPTE address
let addr_pdpte = GuestPhysAddr::new(u64::from_le_bytes(
aligned_mem.mem[(PML4_OFFSET as usize)..(PML4_OFFSET as usize + 8)]
.try_into()
.unwrap(),
);
));
assert_eq!(
addr_pdpte,
BOOT_PDPTE.as_u64() | (PageTableFlags::PRESENT | PageTableFlags::WRITABLE).bits()
addr_pdpte - guest_address,
PDPTE_OFFSET | (PageTableFlags::PRESENT | PageTableFlags::WRITABLE).bits()
);
let addr_pde = u64::from_le_bytes(
aligned_mem.mem[(BOOT_PDPTE.as_u64() as usize)..(BOOT_PDPTE.as_u64() as usize + 8)]

// Check PDE
let addr_pde = GuestPhysAddr::new(u64::from_le_bytes(
aligned_mem.mem[(PDPTE_OFFSET as usize)..(PDPTE_OFFSET as usize + 8)]
.try_into()
.unwrap(),
);
));
assert_eq!(
addr_pde,
BOOT_PDE.as_u64() | (PageTableFlags::PRESENT | PageTableFlags::WRITABLE).bits()
addr_pde - guest_address,
PDE_OFFSET | (PageTableFlags::PRESENT | PageTableFlags::WRITABLE).bits()
);

// Check PDE's pagetable bits
for i in (0..4096).step_by(8) {
let addr = BOOT_PDE.as_u64() as usize + i;
let entry = u64::from_le_bytes(aligned_mem.mem[addr..(addr + 8)].try_into().unwrap());
let pde_addr = (PDE_OFFSET) as usize + i;
let entry = u64::from_le_bytes(
aligned_mem.mem[pde_addr..(pde_addr + 8)]
.try_into()
.unwrap(),
);
assert!(
PageTableFlags::from_bits_truncate(entry)
.difference(
Expand All @@ -159,14 +168,14 @@ mod tests {
| PageTableFlags::HUGE_PAGE
)
.is_empty(),
"Pagetable bits at {addr:#x} are incorrect"
"Pagetable bits at {pde_addr:#x} are incorrect"
)
}

// Test GDT
let gdt_results = [0x0, 0xAF9B000000FFFF, 0xCF93000000FFFF];
for (i, res) in gdt_results.iter().enumerate() {
let gdt_addr = BOOT_GDT.as_u64() as usize + i * 8;
let gdt_addr = GDT_OFFSET as usize + i * 8;
let gdt_entry =
u64::from_le_bytes(aligned_mem.mem[gdt_addr..gdt_addr + 8].try_into().unwrap());
assert_eq!(*res, gdt_entry);
Expand Down
Loading

0 comments on commit a6c55b2

Please sign in to comment.