From eb735917547adefb455df3e0713cb9b080575e85 Mon Sep 17 00:00:00 2001 From: Jonathan Klimt Date: Tue, 17 Sep 2024 11:29:56 +0200 Subject: [PATCH] Use GuestPhysAddress in the VM struct and the x86 paging initialization --- src/arch/x86_64/mod.rs | 22 +++++++------------ src/arch/x86_64/paging/mod.rs | 26 +++++++++++++---------- src/linux/gdb/section_offsets.rs | 2 +- src/linux/x86_64/kvm_cpu.rs | 26 +++++++++++------------ src/vm.rs | 36 +++++++++++++++++--------------- 5 files changed, 56 insertions(+), 56 deletions(-) diff --git a/src/arch/x86_64/mod.rs b/src/arch/x86_64/mod.rs index 1ab0df4c..2ec2bde6 100644 --- a/src/arch/x86_64/mod.rs +++ b/src/arch/x86_64/mod.rs @@ -151,7 +151,7 @@ pub fn virt_to_phys( Ok(entry.addr() + (addr.as_u64() & !((!0u64) << PAGE_BITS))) } -pub fn init_guest_mem(mem: &mut [u8], guest_address: u64) { +pub fn init_guest_mem(mem: &mut [u8], guest_address: GuestPhysAddr) { // TODO: we should maybe return an error on failure (e.g., the memory is too small) paging::initialize_pagetables(mem, guest_address); } @@ -246,42 +246,36 @@ mod tests { #[test] fn test_virt_to_phys() { - let guest_address = 0x11111000; - let mem = MmapMemory::new( - 0, - MIN_PHYSMEM_SIZE * 2, - GuestPhysAddr::new(guest_address), - true, - true, - ); + let guest_address = GuestPhysAddr::new(0x11111000); + let mem = MmapMemory::new(0, MIN_PHYSMEM_SIZE * 2, guest_address, true, true); init_guest_mem( unsafe { mem.as_slice_mut() }.try_into().unwrap(), - guest_address + guest_address, ); // Get the address of the first entry in PML4 (the address of the PML4 itself) let virt_addr = GuestVirtAddr::new(0xFFFFFFFFFFFFF000); let p_addr = virt_to_phys(virt_addr, &mem).unwrap(); - assert_eq!(p_addr, GuestPhysAddr::new(guest_address + PML4_OFFSET)); + assert_eq!(p_addr, guest_address + PML4_OFFSET); // The last entry on the PML4 is the address of the PML4 with flags let virt_addr = GuestVirtAddr::new(0xFFFFFFFFFFFFF000 | (4096 - 8)); let p_addr = virt_to_phys(virt_addr, &mem).unwrap(); assert_eq!( mem.read::(p_addr).unwrap(), - (guest_address + PML4_OFFSET) + (guest_address + PML4_OFFSET).as_u64() | (PageTableFlags::PRESENT | PageTableFlags::WRITABLE).bits() ); // the first entry on the 3rd level entry in the pagetables is the address of the boot pdpte let virt_addr = GuestVirtAddr::new(0xFFFFFFFFFFE00000); let p_addr = virt_to_phys(virt_addr, &mem).unwrap(); - assert_eq!(p_addr, GuestPhysAddr::new(guest_address + PDPTE_OFFSET)); + assert_eq!(p_addr, guest_address + PDPTE_OFFSET); // the first entry on the 2rd level entry in the pagetables is the address of the boot pde let virt_addr = GuestVirtAddr::new(0xFFFFFFFFC0000000); let p_addr = virt_to_phys(virt_addr, &mem).unwrap(); - assert_eq!(p_addr, GuestPhysAddr::new(guest_address + PDE_OFFSET)); + assert_eq!(p_addr, guest_address + PDE_OFFSET); // That address points to a huge page assert!( PageTableFlags::from_bits_truncate(mem.read::(p_addr).unwrap()).contains( diff --git a/src/arch/x86_64/paging/mod.rs b/src/arch/x86_64/paging/mod.rs index 113d36bf..0e8a3040 100644 --- a/src/arch/x86_64/paging/mod.rs +++ b/src/arch/x86_64/paging/mod.rs @@ -11,8 +11,9 @@ use crate::consts::*; /// The memory slice must be larger than [`MIN_PHYSMEM_SIZE`]. /// Also, the memory `mem` needs to be zeroed for [`PAGE_SIZE`] bytes at the /// offsets [`BOOT_PML4`] and [`BOOT_PDPTE`], otherwise the integrity of the -/// pagetables and thus the integrity of the guest's memory is not ensured -pub fn initialize_pagetables(mem: &mut [u8], guest_address: u64) { +/// pagetables and thus the integrity of the guest's memory is not ensured. +/// `mem` and `GuestPhysAddr` must be 2MiB page aligned. +pub fn initialize_pagetables(mem: &mut [u8], guest_address: GuestPhysAddr) { assert!(mem.len() >= MIN_PHYSMEM_SIZE); let mem_addr = std::ptr::addr_of_mut!(mem[0]); @@ -58,15 +59,15 @@ pub fn initialize_pagetables(mem: &mut [u8], guest_address: u64) { gdt_entry[BOOT_GDT_DATA] = create_gdt_entry(0xC093, 0, 0xFFFFF); pml4[0].set_addr( - GuestPhysAddr::new(guest_address + PDPTE_OFFSET), + guest_address + PDPTE_OFFSET, PageTableFlags::PRESENT | PageTableFlags::WRITABLE, ); pml4[511].set_addr( - GuestPhysAddr::new(guest_address + PML4_OFFSET), + guest_address + PML4_OFFSET, PageTableFlags::PRESENT | PageTableFlags::WRITABLE, ); pdpte[0].set_addr( - GuestPhysAddr::new(guest_address + PDE_OFFSET), + guest_address + PDE_OFFSET, PageTableFlags::PRESENT | PageTableFlags::WRITABLE, ); @@ -95,29 +96,32 @@ mod tests { #[test] fn test_pagetable_initialization() { - let guest_address = 0x15000; + let guest_address = GuestPhysAddr::new(0x20_0000); let mut mem: Vec = vec![0; MIN_PHYSMEM_SIZE]; // This will return a pagetable setup that we will check. - initialize_pagetables((&mut mem[0..MIN_PHYSMEM_SIZE]).try_into().unwrap(), guest_address); + initialize_pagetables( + (&mut mem[0..MIN_PHYSMEM_SIZE]).try_into().unwrap(), + guest_address, + ); // Check PDPTE address - let addr_pdpte = u64::from_le_bytes( + let addr_pdpte = GuestPhysAddr::new(u64::from_le_bytes( mem[(PML4_OFFSET as usize)..(PML4_OFFSET as usize + 8)] .try_into() .unwrap(), - ); + )); assert_eq!( addr_pdpte - guest_address, PDPTE_OFFSET | (PageTableFlags::PRESENT | PageTableFlags::WRITABLE).bits() ); // Check PDE - let addr_pde = u64::from_le_bytes( + let addr_pde = GuestPhysAddr::new(u64::from_le_bytes( mem[(PDPTE_OFFSET as usize)..(PDPTE_OFFSET as usize + 8)] .try_into() .unwrap(), - ); + )); assert_eq!( addr_pde - guest_address, PDE_OFFSET | (PageTableFlags::PRESENT | PageTableFlags::WRITABLE).bits() diff --git a/src/linux/gdb/section_offsets.rs b/src/linux/gdb/section_offsets.rs index 9bf88b9f..05914300 100644 --- a/src/linux/gdb/section_offsets.rs +++ b/src/linux/gdb/section_offsets.rs @@ -4,7 +4,7 @@ use super::GdbUhyve; impl target::ext::section_offsets::SectionOffsets for GdbUhyve { fn get_section_offsets(&mut self) -> Result, Self::Error> { - let offset = self.vm.get_offset(); + let offset = self.vm.kernel_start_addr().as_u64(); Ok(Offsets::Sections { text: offset, data: offset, diff --git a/src/linux/x86_64/kvm_cpu.rs b/src/linux/x86_64/kvm_cpu.rs index 53e36a8f..166300b2 100644 --- a/src/linux/x86_64/kvm_cpu.rs +++ b/src/linux/x86_64/kvm_cpu.rs @@ -56,9 +56,9 @@ impl VirtualizationBackend for KvmVm { }, }; kvcpu.init( - parent_vm.get_entry_point(), + parent_vm.entry_point(), parent_vm.stack_address(), - parent_vm.guest_address(), + parent_vm.memory_start(), id, )?; @@ -265,9 +265,9 @@ impl KvmCpu { fn setup_long_mode( &self, - entry_point: u64, - stack_address: u64, - guest_address: u64, + entry_point: GuestPhysAddr, + stack_address: GuestPhysAddr, + guest_address: GuestPhysAddr, cpu_id: u32, ) -> Result<(), kvm_ioctls::Error> { //debug!("Setup long mode"); @@ -280,7 +280,7 @@ impl KvmCpu { | Cr0Flags::PAGING; sregs.cr0 = cr0.bits(); - sregs.cr3 = guest_address + PML4_OFFSET; + sregs.cr3 = guest_address.as_u64() + PML4_OFFSET; let cr4 = Cr4Flags::PHYSICAL_ADDRESS_EXTENSION; sregs.cr4 = cr4.bits(); @@ -311,17 +311,17 @@ impl KvmCpu { sregs.ss = seg; //sregs.fs = seg; //sregs.gs = seg; - sregs.gdt.base = guest_address + GDT_OFFSET; + sregs.gdt.base = guest_address.as_u64() + GDT_OFFSET; sregs.gdt.limit = ((std::mem::size_of::() * BOOT_GDT_MAX) - 1) as u16; self.vcpu.set_sregs(&sregs)?; let mut regs = self.vcpu.get_regs()?; regs.rflags = 2; - regs.rip = entry_point; - regs.rdi = guest_address + BOOT_INFO_OFFSET; + regs.rip = entry_point.as_u64(); + regs.rdi = guest_address.as_u64() + BOOT_INFO_OFFSET; regs.rsi = cpu_id.into(); - regs.rsp = stack_address; + regs.rsp = stack_address.as_u64(); self.vcpu.set_regs(®s)?; @@ -342,9 +342,9 @@ impl KvmCpu { fn init( &mut self, - entry_point: u64, - stack_address: u64, - guest_address: u64, + entry_point: GuestPhysAddr, + stack_address: GuestPhysAddr, + guest_address: GuestPhysAddr, cpu_id: u32, ) -> HypervisorResult<()> { self.setup_long_mode(entry_point, stack_address, guest_address, cpu_id)?; diff --git a/src/vm.rs b/src/vm.rs index 5c971cbe..f7265143 100644 --- a/src/vm.rs +++ b/src/vm.rs @@ -172,9 +172,9 @@ impl Default for Output { pub struct UhyveVm { /// The starting position of the image in physical memory - offset: u64, - entry_point: u64, - stack_address: u64, + kernel_address: GuestPhysAddr, + entry_point: GuestPhysAddr, + stack_address: GuestPhysAddr, guest_address: GuestPhysAddr, pub mem: Arc, path: PathBuf, @@ -269,9 +269,9 @@ impl UhyveVm { }; let mut vm = Self { - offset, - entry_point: 0, - stack_address: 0, + kernel_address: GuestPhysAddr::new(offset), + entry_point: GuestPhysAddr::new(0), + stack_address: GuestPhysAddr::new(0), guest_address, mem: mem.into(), path: kernel_path, @@ -306,20 +306,20 @@ impl UhyveVm { } /// Returns the section offsets relative to their base addresses - pub fn get_offset(&self) -> u64 { - self.offset + pub fn kernel_start_addr(&self) -> GuestPhysAddr { + self.kernel_address } - pub fn get_entry_point(&self) -> u64 { + pub fn entry_point(&self) -> GuestPhysAddr { self.entry_point } - pub fn stack_address(&self) -> u64 { + pub fn stack_address(&self) -> GuestPhysAddr { self.stack_address } - pub fn guest_address(&self) -> u64 { - self.guest_address.as_u64() + pub fn memory_start(&self) -> GuestPhysAddr { + self.guest_address } /// Returns the number of cores for the vm. @@ -346,7 +346,7 @@ impl UhyveVm { unsafe { self.mem.as_slice_mut() } // slice only lives during this fn call .try_into() .expect("Guest memory is not large enough for pagetables"), - self.mem.guest_address.as_u64(), + self.mem.guest_address, ); } @@ -355,7 +355,7 @@ impl UhyveVm { let elf = fs::read(self.kernel_path())?; let object = KernelObject::parse(&elf).map_err(LoadKernelError::ParseKernelError)?; - let kernel_end_address = self.offset as usize + object.mem_size(); + let kernel_end_address = self.kernel_address.as_u64() as usize + object.mem_size(); if kernel_end_address > self.mem.memory_size - self.mem.guest_address.as_u64() as usize { return Err(LoadKernelError::InsufficientMemory); @@ -368,9 +368,9 @@ impl UhyveVm { // Safety: Slice only lives during this fn call, so no aliasing happens &mut unsafe { self.mem.as_slice_uninit_mut() } [KERNEL_OFFSET as usize..object.mem_size() + KERNEL_OFFSET as usize], - self.offset, + self.kernel_address.as_u64(), ); - self.entry_point = entry_point; + self.entry_point = GuestPhysAddr::new(entry_point); let sep = self .args() @@ -421,9 +421,11 @@ impl UhyveVm { self.boot_info = raw_boot_info_ptr; } - self.stack_address = (self.offset).checked_sub(KERNEL_STACK_SIZE).expect( + assert!( + self.kernel_address.as_u64() > KERNEL_STACK_SIZE, "there should be enough space for the boot stack before the kernel start address", ); + self.stack_address = self.kernel_address - KERNEL_STACK_SIZE; Ok(()) }