Skip to content

Commit

Permalink
Use GuestPhysAddress in the VM struct and the x86 paging initialization
Browse files Browse the repository at this point in the history
  • Loading branch information
jounathaen committed Dec 2, 2024
1 parent 8b2d14b commit eb73591
Show file tree
Hide file tree
Showing 5 changed files with 56 additions and 56 deletions.
22 changes: 8 additions & 14 deletions src/arch/x86_64/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,7 @@ pub fn virt_to_phys(
Ok(entry.addr() + (addr.as_u64() & !((!0u64) << PAGE_BITS)))
}

pub fn init_guest_mem(mem: &mut [u8], guest_address: u64) {
pub fn init_guest_mem(mem: &mut [u8], guest_address: GuestPhysAddr) {
// TODO: we should maybe return an error on failure (e.g., the memory is too small)
paging::initialize_pagetables(mem, guest_address);
}
Expand Down Expand Up @@ -246,42 +246,36 @@ mod tests {

#[test]
fn test_virt_to_phys() {
let guest_address = 0x11111000;
let mem = MmapMemory::new(
0,
MIN_PHYSMEM_SIZE * 2,
GuestPhysAddr::new(guest_address),
true,
true,
);
let guest_address = GuestPhysAddr::new(0x11111000);
let mem = MmapMemory::new(0, MIN_PHYSMEM_SIZE * 2, guest_address, true, true);
init_guest_mem(
unsafe { mem.as_slice_mut() }.try_into().unwrap(),
guest_address
guest_address,
);

// Get the address of the first entry in PML4 (the address of the PML4 itself)
let virt_addr = GuestVirtAddr::new(0xFFFFFFFFFFFFF000);
let p_addr = virt_to_phys(virt_addr, &mem).unwrap();
assert_eq!(p_addr, GuestPhysAddr::new(guest_address + PML4_OFFSET));
assert_eq!(p_addr, guest_address + PML4_OFFSET);

// The last entry on the PML4 is the address of the PML4 with flags
let virt_addr = GuestVirtAddr::new(0xFFFFFFFFFFFFF000 | (4096 - 8));
let p_addr = virt_to_phys(virt_addr, &mem).unwrap();
assert_eq!(
mem.read::<u64>(p_addr).unwrap(),
(guest_address + PML4_OFFSET)
(guest_address + PML4_OFFSET).as_u64()
| (PageTableFlags::PRESENT | PageTableFlags::WRITABLE).bits()
);

// the first entry on the 3rd level entry in the pagetables is the address of the boot pdpte
let virt_addr = GuestVirtAddr::new(0xFFFFFFFFFFE00000);
let p_addr = virt_to_phys(virt_addr, &mem).unwrap();
assert_eq!(p_addr, GuestPhysAddr::new(guest_address + PDPTE_OFFSET));
assert_eq!(p_addr, guest_address + PDPTE_OFFSET);

// the first entry on the 2rd level entry in the pagetables is the address of the boot pde
let virt_addr = GuestVirtAddr::new(0xFFFFFFFFC0000000);
let p_addr = virt_to_phys(virt_addr, &mem).unwrap();
assert_eq!(p_addr, GuestPhysAddr::new(guest_address + PDE_OFFSET));
assert_eq!(p_addr, guest_address + PDE_OFFSET);
// That address points to a huge page
assert!(
PageTableFlags::from_bits_truncate(mem.read::<u64>(p_addr).unwrap()).contains(
Expand Down
26 changes: 15 additions & 11 deletions src/arch/x86_64/paging/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,8 +11,9 @@ use crate::consts::*;
/// The memory slice must be larger than [`MIN_PHYSMEM_SIZE`].
/// Also, the memory `mem` needs to be zeroed for [`PAGE_SIZE`] bytes at the
/// offsets [`BOOT_PML4`] and [`BOOT_PDPTE`], otherwise the integrity of the
/// pagetables and thus the integrity of the guest's memory is not ensured
pub fn initialize_pagetables(mem: &mut [u8], guest_address: u64) {
/// pagetables and thus the integrity of the guest's memory is not ensured.
/// `mem` and `GuestPhysAddr` must be 2MiB page aligned.
pub fn initialize_pagetables(mem: &mut [u8], guest_address: GuestPhysAddr) {
assert!(mem.len() >= MIN_PHYSMEM_SIZE);
let mem_addr = std::ptr::addr_of_mut!(mem[0]);

Expand Down Expand Up @@ -58,15 +59,15 @@ pub fn initialize_pagetables(mem: &mut [u8], guest_address: u64) {
gdt_entry[BOOT_GDT_DATA] = create_gdt_entry(0xC093, 0, 0xFFFFF);

pml4[0].set_addr(
GuestPhysAddr::new(guest_address + PDPTE_OFFSET),
guest_address + PDPTE_OFFSET,
PageTableFlags::PRESENT | PageTableFlags::WRITABLE,
);
pml4[511].set_addr(
GuestPhysAddr::new(guest_address + PML4_OFFSET),
guest_address + PML4_OFFSET,
PageTableFlags::PRESENT | PageTableFlags::WRITABLE,
);
pdpte[0].set_addr(
GuestPhysAddr::new(guest_address + PDE_OFFSET),
guest_address + PDE_OFFSET,
PageTableFlags::PRESENT | PageTableFlags::WRITABLE,
);

Expand Down Expand Up @@ -95,29 +96,32 @@ mod tests {

#[test]
fn test_pagetable_initialization() {
let guest_address = 0x15000;
let guest_address = GuestPhysAddr::new(0x20_0000);

let mut mem: Vec<u8> = vec![0; MIN_PHYSMEM_SIZE];
// This will return a pagetable setup that we will check.
initialize_pagetables((&mut mem[0..MIN_PHYSMEM_SIZE]).try_into().unwrap(), guest_address);
initialize_pagetables(
(&mut mem[0..MIN_PHYSMEM_SIZE]).try_into().unwrap(),
guest_address,
);

// Check PDPTE address
let addr_pdpte = u64::from_le_bytes(
let addr_pdpte = GuestPhysAddr::new(u64::from_le_bytes(
mem[(PML4_OFFSET as usize)..(PML4_OFFSET as usize + 8)]
.try_into()
.unwrap(),
);
));
assert_eq!(
addr_pdpte - guest_address,
PDPTE_OFFSET | (PageTableFlags::PRESENT | PageTableFlags::WRITABLE).bits()
);

// Check PDE
let addr_pde = u64::from_le_bytes(
let addr_pde = GuestPhysAddr::new(u64::from_le_bytes(
mem[(PDPTE_OFFSET as usize)..(PDPTE_OFFSET as usize + 8)]
.try_into()
.unwrap(),
);
));
assert_eq!(
addr_pde - guest_address,
PDE_OFFSET | (PageTableFlags::PRESENT | PageTableFlags::WRITABLE).bits()
Expand Down
2 changes: 1 addition & 1 deletion src/linux/gdb/section_offsets.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ use super::GdbUhyve;

impl target::ext::section_offsets::SectionOffsets for GdbUhyve {
fn get_section_offsets(&mut self) -> Result<Offsets<u64>, Self::Error> {
let offset = self.vm.get_offset();
let offset = self.vm.kernel_start_addr().as_u64();
Ok(Offsets::Sections {
text: offset,
data: offset,
Expand Down
26 changes: 13 additions & 13 deletions src/linux/x86_64/kvm_cpu.rs
Original file line number Diff line number Diff line change
Expand Up @@ -56,9 +56,9 @@ impl VirtualizationBackend for KvmVm {
},
};
kvcpu.init(
parent_vm.get_entry_point(),
parent_vm.entry_point(),
parent_vm.stack_address(),
parent_vm.guest_address(),
parent_vm.memory_start(),
id,
)?;

Expand Down Expand Up @@ -265,9 +265,9 @@ impl KvmCpu {

fn setup_long_mode(
&self,
entry_point: u64,
stack_address: u64,
guest_address: u64,
entry_point: GuestPhysAddr,
stack_address: GuestPhysAddr,
guest_address: GuestPhysAddr,
cpu_id: u32,
) -> Result<(), kvm_ioctls::Error> {
//debug!("Setup long mode");
Expand All @@ -280,7 +280,7 @@ impl KvmCpu {
| Cr0Flags::PAGING;
sregs.cr0 = cr0.bits();

sregs.cr3 = guest_address + PML4_OFFSET;
sregs.cr3 = guest_address.as_u64() + PML4_OFFSET;

let cr4 = Cr4Flags::PHYSICAL_ADDRESS_EXTENSION;
sregs.cr4 = cr4.bits();
Expand Down Expand Up @@ -311,17 +311,17 @@ impl KvmCpu {
sregs.ss = seg;
//sregs.fs = seg;
//sregs.gs = seg;
sregs.gdt.base = guest_address + GDT_OFFSET;
sregs.gdt.base = guest_address.as_u64() + GDT_OFFSET;
sregs.gdt.limit = ((std::mem::size_of::<u64>() * BOOT_GDT_MAX) - 1) as u16;

self.vcpu.set_sregs(&sregs)?;

let mut regs = self.vcpu.get_regs()?;
regs.rflags = 2;
regs.rip = entry_point;
regs.rdi = guest_address + BOOT_INFO_OFFSET;
regs.rip = entry_point.as_u64();
regs.rdi = guest_address.as_u64() + BOOT_INFO_OFFSET;
regs.rsi = cpu_id.into();
regs.rsp = stack_address;
regs.rsp = stack_address.as_u64();

self.vcpu.set_regs(&regs)?;

Expand All @@ -342,9 +342,9 @@ impl KvmCpu {

fn init(
&mut self,
entry_point: u64,
stack_address: u64,
guest_address: u64,
entry_point: GuestPhysAddr,
stack_address: GuestPhysAddr,
guest_address: GuestPhysAddr,
cpu_id: u32,
) -> HypervisorResult<()> {
self.setup_long_mode(entry_point, stack_address, guest_address, cpu_id)?;
Expand Down
36 changes: 19 additions & 17 deletions src/vm.rs
Original file line number Diff line number Diff line change
Expand Up @@ -172,9 +172,9 @@ impl Default for Output {

pub struct UhyveVm<VirtBackend: VirtualizationBackend> {
/// The starting position of the image in physical memory
offset: u64,
entry_point: u64,
stack_address: u64,
kernel_address: GuestPhysAddr,
entry_point: GuestPhysAddr,
stack_address: GuestPhysAddr,
guest_address: GuestPhysAddr,
pub mem: Arc<MmapMemory>,
path: PathBuf,
Expand Down Expand Up @@ -269,9 +269,9 @@ impl<VirtBackend: VirtualizationBackend> UhyveVm<VirtBackend> {
};

let mut vm = Self {
offset,
entry_point: 0,
stack_address: 0,
kernel_address: GuestPhysAddr::new(offset),
entry_point: GuestPhysAddr::new(0),
stack_address: GuestPhysAddr::new(0),
guest_address,
mem: mem.into(),
path: kernel_path,
Expand Down Expand Up @@ -306,20 +306,20 @@ impl<VirtBackend: VirtualizationBackend> UhyveVm<VirtBackend> {
}

/// Returns the section offsets relative to their base addresses
pub fn get_offset(&self) -> u64 {
self.offset
pub fn kernel_start_addr(&self) -> GuestPhysAddr {
self.kernel_address
}

pub fn get_entry_point(&self) -> u64 {
pub fn entry_point(&self) -> GuestPhysAddr {
self.entry_point
}

pub fn stack_address(&self) -> u64 {
pub fn stack_address(&self) -> GuestPhysAddr {
self.stack_address
}

pub fn guest_address(&self) -> u64 {
self.guest_address.as_u64()
pub fn memory_start(&self) -> GuestPhysAddr {
self.guest_address
}

/// Returns the number of cores for the vm.
Expand All @@ -346,7 +346,7 @@ impl<VirtBackend: VirtualizationBackend> UhyveVm<VirtBackend> {
unsafe { self.mem.as_slice_mut() } // slice only lives during this fn call
.try_into()
.expect("Guest memory is not large enough for pagetables"),
self.mem.guest_address.as_u64(),
self.mem.guest_address,
);
}

Expand All @@ -355,7 +355,7 @@ impl<VirtBackend: VirtualizationBackend> UhyveVm<VirtBackend> {
let elf = fs::read(self.kernel_path())?;
let object = KernelObject::parse(&elf).map_err(LoadKernelError::ParseKernelError)?;

let kernel_end_address = self.offset as usize + object.mem_size();
let kernel_end_address = self.kernel_address.as_u64() as usize + object.mem_size();

if kernel_end_address > self.mem.memory_size - self.mem.guest_address.as_u64() as usize {
return Err(LoadKernelError::InsufficientMemory);
Expand All @@ -368,9 +368,9 @@ impl<VirtBackend: VirtualizationBackend> UhyveVm<VirtBackend> {
// Safety: Slice only lives during this fn call, so no aliasing happens
&mut unsafe { self.mem.as_slice_uninit_mut() }
[KERNEL_OFFSET as usize..object.mem_size() + KERNEL_OFFSET as usize],
self.offset,
self.kernel_address.as_u64(),
);
self.entry_point = entry_point;
self.entry_point = GuestPhysAddr::new(entry_point);

let sep = self
.args()
Expand Down Expand Up @@ -421,9 +421,11 @@ impl<VirtBackend: VirtualizationBackend> UhyveVm<VirtBackend> {
self.boot_info = raw_boot_info_ptr;
}

self.stack_address = (self.offset).checked_sub(KERNEL_STACK_SIZE).expect(
assert!(
self.kernel_address.as_u64() > KERNEL_STACK_SIZE,
"there should be enough space for the boot stack before the kernel start address",
);
self.stack_address = self.kernel_address - KERNEL_STACK_SIZE;

Ok(())
}
Expand Down

0 comments on commit eb73591

Please sign in to comment.