From c62dc639065547441b85303d7cbc387e21dea966 Mon Sep 17 00:00:00 2001 From: Kitlith Date: Thu, 31 Oct 2019 14:24:55 -0700 Subject: [PATCH] Add safety comments and fix clippy. --- kernel/src/i386/interrupt_service_routines.rs | 12 +++- kernel/src/i386/process_switch.rs | 8 ++- kernel/src/main.rs | 3 +- kernel/src/scheduler.rs | 6 ++ kernel/src/sync/spin_lock_irq.rs | 59 ++++++++++++------- 5 files changed, 63 insertions(+), 25 deletions(-) diff --git a/kernel/src/i386/interrupt_service_routines.rs b/kernel/src/i386/interrupt_service_routines.rs index e62ea9d79..a14a85c02 100644 --- a/kernel/src/i386/interrupt_service_routines.rs +++ b/kernel/src/i386/interrupt_service_routines.rs @@ -28,7 +28,6 @@ //! [syscall_interrupt_dispatcher]: self::interrupt_service_routines::syscall_interrupt_dispatcher use crate::i386::structures::idt::{PageFaultErrorCode, Idt}; -use crate::i386::instructions::interrupts::sti; use crate::mem::VirtualAddress; use crate::paging::kernel_memory::get_kernel_memory; use crate::i386::PrivilegeLevel; @@ -65,6 +64,10 @@ pub fn check_thread_killed() { if scheduler::get_current_thread().state.load(Ordering::SeqCst) == ThreadState::TerminationPending { let lock = SpinLockIRQ::new(()); loop { // in case of spurious wakeups + // TODO: Is it valid for interrupts to be active here? + // BODY: The interrupt wrappers call decrement_lock_count before calling this function, + // BODY: because it has the possibility to never return. + // BODY: Upon a spurious wake up, interrupts will end up being enabled. let _ = scheduler::unschedule(&lock, lock.lock()); } } @@ -572,7 +575,9 @@ macro_rules! generate_trap_gate_handler { if $interrupt_context { let _ = INSIDE_INTERRUPT_COUNT.fetch_add(1, Ordering::SeqCst); - disable_interrupts(); + // Lets SpinLockIrq know that we are an interrupt; interrupts should not be re-enabled. + // Safety: Paired with decrement_lock_count, which is called before exiting the interrupt. + unsafe { disable_interrupts(); } } if let PrivilegeLevel::Ring0 = SegmentSelector(userspace_context.cs as u16).rpl() { @@ -594,6 +599,8 @@ macro_rules! generate_trap_gate_handler { if $interrupt_context { let _ = INSIDE_INTERRUPT_COUNT.fetch_sub(1, Ordering::SeqCst); + // Safety: Paired with disable_interrupts, which was called earlier in this wrapper function. + // Additionally, this is called shortly before an iret occurs, inside the asm wrapper. unsafe { decrement_lock_count(); } } @@ -1137,5 +1144,6 @@ pub unsafe fn init() { (*idt).load(); } + // Paired with disable_interrupts in kernel common_start. crate::sync::spin_lock_irq::enable_interrupts(); } diff --git a/kernel/src/i386/process_switch.rs b/kernel/src/i386/process_switch.rs index 763a6a347..409b4f348 100644 --- a/kernel/src/i386/process_switch.rs +++ b/kernel/src/i386/process_switch.rs @@ -347,7 +347,13 @@ fn jump_to_entrypoint(ep: usize, userspace_stack_ptr: usize, arg1: usize, arg2: const_assert_eq!((GdtIndex::UTlsRegion as u16) << 3 | 0b11, 0x3B); const_assert_eq!((GdtIndex::UTlsElf as u16) << 3 | 0b11, 0x43); const_assert_eq!((GdtIndex::UStack as u16) << 3 | 0b11, 0x4B); - // We're about to enable interrupts by doing an iret. + + + // Safety: This is paired with an undropped SpinLockIrq (interrupt_manager) in scheduler::internal_schedule. + // (Normally, this SpinLockIrq evens out with an identical one in the same function in the new process, + // however, when a new process starts this object is not present, therefore we must manually decrement + // the counter.) + // Additionally, an iret occurs later in this function, enabling interrupts. unsafe { crate::sync::spin_lock_irq::decrement_lock_count(); } unsafe { asm!(" diff --git a/kernel/src/main.rs b/kernel/src/main.rs index 670ccc26b..5bb3b7ed8 100644 --- a/kernel/src/main.rs +++ b/kernel/src/main.rs @@ -252,7 +252,8 @@ pub extern "C" fn common_start(multiboot_info_addr: usize) -> ! { init_cpu_locals(1); // Now that cpu_locals are present, ensure that SpinLockIRQs // are aware that interrupts are disabled. - sync::spin_lock_irq::disable_interrupts(); + // Safety: paired with enable_interrupts in interrupt_service_routines::init + unsafe { sync::spin_lock_irq::disable_interrupts(); } info!("Enabling interrupts"); unsafe { i386::interrupt_service_routines::init(); } diff --git a/kernel/src/scheduler.rs b/kernel/src/scheduler.rs index b8c4c353e..ce3789e2f 100644 --- a/kernel/src/scheduler.rs +++ b/kernel/src/scheduler.rs @@ -257,6 +257,12 @@ where // TODO: Ensure the global counter is <= 1 let interrupt_manager = SpinLockIRQ::new(()); + + // This is complicated: interrupt_lock is needed to disable interrupts while + // performing a process switch: however, it is not dropped until after a process_switch + // *back* to this process. Usually it is paired with the SpinLockIrq being dropped in + // 'process_b', but in the case of a brand new process, it is paired with a manual decrement + // before performing an iret to userspace. let mut interrupt_lock = interrupt_manager.lock(); loop { diff --git a/kernel/src/sync/spin_lock_irq.rs b/kernel/src/sync/spin_lock_irq.rs index 891367b2d..77137a050 100644 --- a/kernel/src/sync/spin_lock_irq.rs +++ b/kernel/src/sync/spin_lock_irq.rs @@ -18,8 +18,8 @@ use crate::cpu_locals::ARE_CPU_LOCALS_INITIALIZED_YET; /// # Description /// /// Allows recursively disabling interrupts while keeping a sane behavior. -/// Should only be manipulated through sync::enable_interrupts and -/// sync::disable_interrupts. +/// Should only be manipulated through [enable_interrupts], +/// [disable_interrupts], and [decrement_lock_count]. /// /// Used by the SpinLockIRQ to implement recursive irqsave logic. #[thread_local] @@ -27,19 +27,30 @@ static INTERRUPT_DISABLE_COUNTER: AtomicU8 = AtomicU8::new(0); /// Decrement the interrupt disable counter. /// -/// Look at documentation for ProcessStruct::pint_disable_counter to know more. -pub fn enable_interrupts() { - if !INTERRUPT_DISARM.load(Ordering::SeqCst) && ARE_CPU_LOCALS_INITIALIZED_YET.load(Ordering::SeqCst) { - if INTERRUPT_DISABLE_COUNTER.fetch_sub(1, Ordering::SeqCst) == 1 { - unsafe { interrupts::sti() } - } +/// Look at documentation for [INTERRUPT_DISABLE_COUNTER] to know more. +/// +/// # Unsafety: +/// +/// Should be called in pairs with [disable_iterrupts] or [decrement_lock_count], +/// otherwise the counter will get out of sync and deadlocks will likely occur. +pub unsafe fn enable_interrupts() { + if !INTERRUPT_DISARM.load(Ordering::SeqCst) && ARE_CPU_LOCALS_INITIALIZED_YET.load(Ordering::SeqCst) && INTERRUPT_DISABLE_COUNTER.fetch_sub(1, Ordering::SeqCst) == 1 { + unsafe { interrupts::sti() } } } -/// Decrement the interrupt disable counter without possibly re-enabling interrupts. +/// Decrement the interrupt disable counter without re-enabling interrupts. +/// +/// Used to decrement counter while keeping interrupts disabled before an iret. +/// Look at documentation for [INTERRUPT_DISABLE_COUNTER] to know more. +/// +/// # Unsafety: /// -/// Used to decrement counter while keeping interrupts disabled inside an interrupt. -/// Look at documentation for INTERRUPT_DISABLE_COUNTER to know more. +/// Should be called in pairs with [enable_iterrupts], +/// otherwise the counter will get out of sync and deadlocks will likely occur. +/// +/// Additionally, this should only be used when interrupts are about to be enabled anyway, +/// such as by an iret to userspace. pub unsafe fn decrement_lock_count() { if !INTERRUPT_DISARM.load(Ordering::SeqCst) && ARE_CPU_LOCALS_INITIALIZED_YET.load(Ordering::SeqCst) { let _ = INTERRUPT_DISABLE_COUNTER.fetch_sub(1, Ordering::SeqCst); @@ -48,12 +59,15 @@ pub unsafe fn decrement_lock_count() { /// Increment the interrupt disable counter. /// -/// Look at documentation for INTERRUPT_DISABLE_COUNTER to know more. -pub fn disable_interrupts() { - if !INTERRUPT_DISARM.load(Ordering::SeqCst) && ARE_CPU_LOCALS_INITIALIZED_YET.load(Ordering::SeqCst) { - if INTERRUPT_DISABLE_COUNTER.fetch_add(1, Ordering::SeqCst) == 0 { - unsafe { interrupts::cli() } - } +/// Look at documentation for [INTERRUPT_DISABLE_COUNTER] to know more. +/// +/// # Unsafety: +/// +/// Should be called in pairs with [enable_iterrupts], +/// otherwise the counter will get out of sync and deadlocks will likely occur. +pub unsafe fn disable_interrupts() { + if !INTERRUPT_DISARM.load(Ordering::SeqCst) && ARE_CPU_LOCALS_INITIALIZED_YET.load(Ordering::SeqCst) && INTERRUPT_DISABLE_COUNTER.fetch_add(1, Ordering::SeqCst) == 0 { + unsafe { interrupts::cli() } } } @@ -82,7 +96,7 @@ pub unsafe fn permanently_disable_interrupts() { /// have been dropped. /// /// Note that it is allowed to lock/unlock the locks in a different order. It uses -/// a global counter to disable/enable interrupts. View INTERRUPT_DISABLE_COUNTER +/// a global counter to disable/enable interrupts. View [INTERRUPT_DISABLE_COUNTER] /// documentation for more information. pub struct SpinLockIRQ { /// SpinLock we wrap. @@ -106,7 +120,7 @@ impl SpinLockIRQ { impl SpinLockIRQ { /// Disables interrupts and locks the mutex. pub fn lock(&self) -> SpinLockIRQGuard<'_, T> { - // Disable irqs + // Safety: Paired with enable_interrupts in the impl of Drop for SpinLockIrqGuard. unsafe { disable_interrupts(); } // TODO: Disable preemption. @@ -119,7 +133,8 @@ impl SpinLockIRQ { /// Disables interrupts and locks the mutex. pub fn try_lock(&self) -> Option> { - // Disable irqs + // Safety: Paired with enable_interrupts in the impl of Drop for SpinLockIrq, + // or in case a guard is not created, later in this function. unsafe { disable_interrupts(); } // TODO: Disable preemption. @@ -130,6 +145,7 @@ impl SpinLockIRQ { Some(internalguard) => Some(SpinLockIRQGuard(ManuallyDrop::new(internalguard))), None => { // We couldn't lock. Restore irqs and return None + // Safety: Paired with disable_interrupts above in the case that a guard is not created. unsafe { enable_interrupts(); } None } @@ -165,7 +181,8 @@ impl<'a, T: ?Sized + 'a> Drop for SpinLockIRQGuard<'a, T> { // unlock unsafe { ManuallyDrop::drop(&mut self.0); } - // Restore irq + // Safety: paired with disable_interrupts in SpinLockIRQ::{lock, try_lock}, which returns + // this guard to re-enable interrupts when it is dropped. unsafe { enable_interrupts(); } // TODO: Enable preempt