diff --git a/src/arch/aarch64/mod.rs b/src/arch/aarch64/mod.rs index 8576a8b2..f0d02cee 100644 --- a/src/arch/aarch64/mod.rs +++ b/src/arch/aarch64/mod.rs @@ -120,6 +120,8 @@ pub fn virt_to_phys( return Err(PagetableError::InvalidAddress); } + let guest_address = (*crate::vm::GUEST_ADDRESS.get().unwrap()).as_u64(); + // Assumptions: // - We use 4KiB granule // - We use maximum VA length @@ -132,7 +134,7 @@ pub fn virt_to_phys( // - We are page_aligned, and thus also PageTableEntry aligned. let mut pagetable: &[PageTableEntry] = unsafe { std::mem::transmute::<&[u8], &[PageTableEntry]>( - mem.slice_at(mem.guest_address, PAGE_SIZE).unwrap(), + mem.slice_at(guest_address, PAGE_SIZE).unwrap(), ) }; // TODO: Depending on the virtual address length and granule (defined in TCR register by TG and TxSZ), we could reduce the number of pagetable walks. Hermit doesn't do this at the moment. @@ -154,7 +156,7 @@ pub fn virt_to_phys( Ok(pte.address()) } -pub fn init_guest_mem(mem: &mut [u8], _guest_address: u64) { +pub fn init_guest_mem(mem: &mut [u8]) { let mem_addr = std::ptr::addr_of_mut!(mem[0]); assert!(mem.len() >= PGT_OFFSET as usize + 512 * size_of::()); diff --git a/src/arch/x86_64/mod.rs b/src/arch/x86_64/mod.rs index 4d0a13d5..4b0f3061 100644 --- a/src/arch/x86_64/mod.rs +++ b/src/arch/x86_64/mod.rs @@ -121,8 +121,9 @@ pub fn virt_to_phys( /// Number of bits of the index in each table (PML4, PDPT, PDT, PGT). pub const PAGE_MAP_BITS: usize = 9; + let guest_address = (*crate::vm::GUEST_ADDRESS.get().unwrap()).as_u64(); let mut page_table = unsafe { - (mem.host_address(GuestPhysAddr::new(mem.guest_address.as_u64() + PML4_OFFSET)) + (mem.host_address(GuestPhysAddr::new(guest_address + PML4_OFFSET)) .unwrap() as *mut PageTable) .as_mut() } @@ -153,9 +154,9 @@ pub fn virt_to_phys( Ok(entry.addr() + (addr.as_u64() & !((!0u64) << PAGE_BITS))) } -pub fn init_guest_mem(mem: &mut [u8], guest_address: u64) { +pub fn init_guest_mem(mem: &mut [u8]) { // TODO: we should maybe return an error on failure (e.g., the memory is too small) - paging::initialize_pagetables(mem, guest_address); + paging::initialize_pagetables(mem); } #[cfg(test)] @@ -256,7 +257,6 @@ mod tests { ); init_guest_mem( unsafe { mem.as_slice_mut() }.try_into().unwrap(), - guest_address, ); // Get the address of the first entry in PML4 (the address of the PML4 itself) diff --git a/src/arch/x86_64/paging/mod.rs b/src/arch/x86_64/paging/mod.rs index 1502b1ad..2f00d9f2 100644 --- a/src/arch/x86_64/paging/mod.rs +++ b/src/arch/x86_64/paging/mod.rs @@ -12,9 +12,10 @@ use crate::consts::*; /// Also, the memory `mem` needs to be zeroed for [`PAGE_SIZE`] bytes at the /// offsets [`BOOT_PML4`] and [`BOOT_PDPTE`], otherwise the integrity of the /// pagetables and thus the integrity of the guest's memory is not ensured -pub fn initialize_pagetables(mem: &mut [u8], guest_address: u64) { +pub fn initialize_pagetables(mem: &mut [u8]) { assert!(mem.len() >= MIN_PHYSMEM_SIZE); let mem_addr = std::ptr::addr_of_mut!(mem[0]); + let guest_address = (*crate::vm::GUEST_ADDRESS.get().unwrap()).as_u64(); let (gdt_entry, pml4, pdpte, pde); // Safety: @@ -99,10 +100,7 @@ mod tests { let mut mem: Vec = vec![0; MIN_PHYSMEM_SIZE]; // This will return a pagetable setup that we will check. - initialize_pagetables( - (&mut mem[0..MIN_PHYSMEM_SIZE]).try_into().unwrap(), - guest_address, - ); + initialize_pagetables((&mut mem[0..MIN_PHYSMEM_SIZE]).try_into().unwrap()); // Check PDPTE address let addr_pdpte = u64::from_le_bytes( diff --git a/src/linux/x86_64/kvm_cpu.rs b/src/linux/x86_64/kvm_cpu.rs index a1285a64..7e9b7212 100644 --- a/src/linux/x86_64/kvm_cpu.rs +++ b/src/linux/x86_64/kvm_cpu.rs @@ -37,7 +37,7 @@ pub fn initialize_kvm(mem: &MmapMemory, use_pit: bool) -> HypervisorResult<()> { slot: 0, flags: mem.flags, memory_size: sz as u64, - guest_phys_addr: mem.guest_address.as_u64(), + guest_phys_addr: (*crate::vm::GUEST_ADDRESS.get().unwrap()).as_u64(), userspace_addr: mem.host_address as u64, }; @@ -50,7 +50,7 @@ pub fn initialize_kvm(mem: &MmapMemory, use_pit: bool) -> HypervisorResult<()> { slot: 1, flags: mem.flags, memory_size: (mem.memory_size - KVM_32BIT_GAP_START - KVM_32BIT_GAP_SIZE) as u64, - guest_phys_addr: mem.guest_address.as_u64() + guest_phys_addr: (*crate::vm::GUEST_ADDRESS.get().unwrap()).as_u64() + (KVM_32BIT_GAP_START + KVM_32BIT_GAP_SIZE) as u64, userspace_addr: (mem.host_address as usize + KVM_32BIT_GAP_START + KVM_32BIT_GAP_SIZE) as u64, diff --git a/src/macos/x86_64/vcpu.rs b/src/macos/x86_64/vcpu.rs index 38507ed3..41040619 100644 --- a/src/macos/x86_64/vcpu.rs +++ b/src/macos/x86_64/vcpu.rs @@ -648,7 +648,7 @@ impl VirtualCPU for XhyveCpu { vcpu.init( parent_vm.get_entry_point(), parent_vm.stack_address(), - parent_vm.guest_address(), + , id, )?; diff --git a/src/vm.rs b/src/vm.rs index 1a9bfd48..f623a4dd 100644 --- a/src/vm.rs +++ b/src/vm.rs @@ -5,7 +5,7 @@ use std::{ num::NonZeroU32, path::PathBuf, ptr, - sync::{Arc, Mutex}, + sync::{Arc, Mutex, OnceLock}, time::SystemTime, }; @@ -15,6 +15,7 @@ use hermit_entry::{ }; use log::{error, warn}; use thiserror::Error; +use uhyve_interface::GuestPhysAddr; #[cfg(target_arch = "x86_64")] use crate::arch::x86_64::{ @@ -29,6 +30,8 @@ use crate::{ pub type HypervisorResult = Result; +pub static GUEST_ADDRESS: OnceLock = OnceLock::new(); + #[derive(Error, Debug)] pub enum LoadKernelError { #[error(transparent)] @@ -75,7 +78,7 @@ pub struct UhyveVm { offset: u64, entry_point: u64, stack_address: u64, - guest_address: u64, + guest_address: GuestPhysAddr, pub mem: Arc, num_cpus: u32, path: PathBuf, @@ -90,21 +93,40 @@ pub struct UhyveVm { impl UhyveVm { pub fn new(kernel_path: PathBuf, params: Params) -> HypervisorResult> { let memory_size = params.memory_size.get(); + let guest_address = *GUEST_ADDRESS.get_or_init(|| arch::RAM_START); // TODO: Move functionality to load_kernel. We don't know whether the binaries are relocatable yet. // TODO: Use random address instead of arch::RAM_START here. #[cfg(target_os = "linux")] #[cfg(target_arch = "x86_64")] - let mem = MmapMemory::new(0, memory_size, arch::RAM_START, params.thp, params.ksm); + let mem = MmapMemory::new( + 0, + memory_size, + guest_address, + params.thp, + params.ksm, + ); // TODO: guest_address is only taken into account on Linux platforms. // TODO: Before changing this, fix init_guest_mem in `src/arch/aarch64/mod.rs` #[cfg(target_os = "linux")] #[cfg(not(target_arch = "x86_64"))] - let mem = MmapMemory::new(0, memory_size, arch::RAM_START, params.thp, params.ksm); + let mem = MmapMemory::new( + 0, + memory_size, + guest_address, + params.thp, + params.ksm, + ); #[cfg(not(target_os = "linux"))] - let mem = MmapMemory::new(0, memory_size, arch::RAM_START, false, false); + let mem = MmapMemory::new( + 0, + memory_size, + guest_address, + false, + false, + ); // create virtio interface // TODO: Remove allow once fixed: @@ -130,7 +152,7 @@ impl UhyveVm { offset: 0, entry_point: 0, stack_address: 0, - guest_address: mem.guest_address.as_u64(), + guest_address: guest_address, mem: mem.into(), num_cpus: cpu_count, path: kernel_path, @@ -165,7 +187,7 @@ impl UhyveVm { } pub fn guest_address(&self) -> u64 { - self.guest_address + self.guest_address.as_u64() } /// Returns the number of cores for the vm. @@ -188,7 +210,6 @@ impl UhyveVm { unsafe { self.mem.as_slice_mut() } // slice only lives during this fn call .try_into() .expect("Guest memory is not large enough for pagetables"), - self.guest_address, ); } @@ -201,7 +222,7 @@ impl UhyveVm { // TODO: should be a random start address, if we have a relocatable executable let kernel_start_address = object .start_addr() - .unwrap_or(self.mem.guest_address.as_u64() + kernel_offset as u64) + .unwrap_or_else(|| self.mem.guest_address.as_u64() + kernel_offset as u64) as usize; let kernel_end_address = kernel_start_address + object.mem_size(); self.offset = kernel_start_address as u64; @@ -261,7 +282,7 @@ impl fmt::Debug for UhyveVm { f.debug_struct("UhyveVm") .field("entry_point", &self.entry_point) .field("stack_address", &self.stack_address) - .field("guest_address", &self.guest_address) + .field("guest_address", &self.guest_address.as_u64()) .field("mem", &self.mem) .field("num_cpus", &self.num_cpus) .field("path", &self.path)