Skip to content

Commit

Permalink
cpu/gdt: fix GDT load lifetime
Browse files Browse the repository at this point in the history
When running the LGDT instruction, the address of the entries in the
structure given to the CPU must be valid at least until the next LIDT
instruction, otherwise it will access invalid memory.

In order for GDT::load() to be sound, the entries passed to it must
have a lifetime of 'static - this makes sure that the entires will
remain valid for the rest of the execution of the SVSM.

Co-developed-by: Thomas Leroy <[email protected]>
Signed-off-by: Carlos López <[email protected]>
  • Loading branch information
00xc committed May 24, 2024
1 parent b660ff6 commit 7c2f1f3
Showing 1 changed file with 33 additions and 29 deletions.
62 changes: 33 additions & 29 deletions kernel/src/cpu/gdt.rs
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ pub struct GDT {

impl GDT {
pub const fn new() -> Self {
GDT {
Self {
entries: [
GDTEntry::null(),
GDTEntry::code_64_kernel(),
Expand All @@ -74,20 +74,33 @@ impl GDT {
}
}

pub fn base_limit(&self) -> (u64, u32) {
let gdt_entries = GDT_SIZE as usize;
let base = (self as *const GDT) as u64;
let limit = ((mem::size_of::<u64>() * gdt_entries) - 1) as u32;
(base, limit)
unsafe fn set_tss_entry(&mut self, desc0: GDTEntry, desc1: GDTEntry) {
let idx = (SVSM_TSS / 8) as usize;

let tss_entries = &self.entries[idx..idx + 1].as_mut_ptr();

tss_entries.add(0).write_volatile(desc0);
tss_entries.add(1).write_volatile(desc1);
}

fn descriptor(&self) -> GDTDesc {
GDTDesc {
size: (GDT_SIZE * 8) - 1,
addr: VirtAddr::from(self.entries.as_ptr()),
unsafe fn clear_tss_entry(&mut self) {
self.set_tss_entry(GDTEntry::null(), GDTEntry::null());
}

pub fn load_tss(&mut self, tss: &X86Tss) {
let (desc0, desc1) = tss.to_gdt_entry();

unsafe {
self.set_tss_entry(desc0, desc1);
asm!("ltr %ax", in("ax") SVSM_TSS, options(att_syntax));
self.clear_tss_entry()
}
}
}

impl ReadLockGuard<'static, GDT> {
/// Load a GDT. Its lifetime must be static so that its entries are
/// always available to the CPU.
pub fn load(&self) {
let gdt_desc = self.descriptor();
unsafe {
Expand Down Expand Up @@ -115,27 +128,18 @@ impl GDT {
}
}

unsafe fn set_tss_entry(&mut self, desc0: GDTEntry, desc1: GDTEntry) {
let idx = (SVSM_TSS / 8) as usize;

let tss_entries = &self.entries[idx..idx + 1].as_mut_ptr();

tss_entries.add(0).write_volatile(desc0);
tss_entries.add(1).write_volatile(desc1);
}

unsafe fn clear_tss_entry(&mut self) {
self.set_tss_entry(GDTEntry::null(), GDTEntry::null());
fn descriptor(&self) -> GDTDesc {
GDTDesc {
size: (GDT_SIZE * 8) - 1,
addr: VirtAddr::from(self.entries.as_ptr()),
}
}

pub fn load_tss(&mut self, tss: &X86Tss) {
let (desc0, desc1) = tss.to_gdt_entry();

unsafe {
self.set_tss_entry(desc0, desc1);
asm!("ltr %ax", in("ax") SVSM_TSS, options(att_syntax));
self.clear_tss_entry()
}
pub fn base_limit(&self) -> (u64, u32) {
let gdt_entries = GDT_SIZE as usize;
let base: *const GDT = core::ptr::from_ref(self);
let limit = ((mem::size_of::<u64>() * gdt_entries) - 1) as u32;
(base as u64, limit)
}
}

Expand Down

0 comments on commit 7c2f1f3

Please sign in to comment.