From 0d026af73d71b71302f8769ca3d99f81dd3def01 Mon Sep 17 00:00:00 2001 From: Kitlith Date: Fri, 1 Nov 2019 19:43:37 -0700 Subject: [PATCH] Move Safety comments inside the unsafe blocks. It's funny, I could've sworn there was already some tool for this, but couldn't find it when I went to look for it. *shrug* --- kernel/src/i386/interrupt_service_routines.rs | 14 ++++++---- kernel/src/i386/process_switch.rs | 15 ++++++---- kernel/src/main.rs | 6 ++-- kernel/src/sync/spin_lock_irq.rs | 28 ++++++++++++------- 4 files changed, 40 insertions(+), 23 deletions(-) diff --git a/kernel/src/i386/interrupt_service_routines.rs b/kernel/src/i386/interrupt_service_routines.rs index a14a85c02..50c20b8a2 100644 --- a/kernel/src/i386/interrupt_service_routines.rs +++ b/kernel/src/i386/interrupt_service_routines.rs @@ -576,8 +576,10 @@ macro_rules! generate_trap_gate_handler { if $interrupt_context { let _ = INSIDE_INTERRUPT_COUNT.fetch_add(1, Ordering::SeqCst); // Lets SpinLockIrq know that we are an interrupt; interrupts should not be re-enabled. - // Safety: Paired with decrement_lock_count, which is called before exiting the interrupt. - unsafe { disable_interrupts(); } + unsafe { + // Safety: Paired with decrement_lock_count, which is called before exiting the interrupt. + disable_interrupts(); + } } if let PrivilegeLevel::Ring0 = SegmentSelector(userspace_context.cs as u16).rpl() { @@ -599,9 +601,11 @@ macro_rules! generate_trap_gate_handler { if $interrupt_context { let _ = INSIDE_INTERRUPT_COUNT.fetch_sub(1, Ordering::SeqCst); - // Safety: Paired with disable_interrupts, which was called earlier in this wrapper function. - // Additionally, this is called shortly before an iret occurs, inside the asm wrapper. - unsafe { decrement_lock_count(); } + unsafe { + // Safety: Paired with disable_interrupts, which was called earlier in this wrapper function. + // Additionally, this is called shortly before an iret occurs, inside the asm wrapper. + decrement_lock_count(); + } } // if we're returning to userspace, check we haven't been killed diff --git a/kernel/src/i386/process_switch.rs b/kernel/src/i386/process_switch.rs index 409b4f348..c75434e28 100644 --- a/kernel/src/i386/process_switch.rs +++ b/kernel/src/i386/process_switch.rs @@ -349,12 +349,15 @@ fn jump_to_entrypoint(ep: usize, userspace_stack_ptr: usize, arg1: usize, arg2: const_assert_eq!((GdtIndex::UStack as u16) << 3 | 0b11, 0x4B); - // Safety: This is paired with an undropped SpinLockIrq (interrupt_manager) in scheduler::internal_schedule. - // (Normally, this SpinLockIrq evens out with an identical one in the same function in the new process, - // however, when a new process starts this object is not present, therefore we must manually decrement - // the counter.) - // Additionally, an iret occurs later in this function, enabling interrupts. - unsafe { crate::sync::spin_lock_irq::decrement_lock_count(); } + unsafe { + // Safety: This is paired with an undropped SpinLockIrq (interrupt_manager) in scheduler::internal_schedule. + // (Normally, this SpinLockIrq evens out with an identical one in the same function in the new process, + // however, when a new process starts this object is not present, therefore we must manually decrement + // the counter.) + // Additionally, an iret occurs later in this function, enabling interrupts. + crate::sync::spin_lock_irq::decrement_lock_count(); + } + unsafe { asm!(" mov ax,0x33 // ds, es <- UData, Ring 3 diff --git a/kernel/src/main.rs b/kernel/src/main.rs index 5bb3b7ed8..a8c6e9369 100644 --- a/kernel/src/main.rs +++ b/kernel/src/main.rs @@ -252,8 +252,10 @@ pub extern "C" fn common_start(multiboot_info_addr: usize) -> ! { init_cpu_locals(1); // Now that cpu_locals are present, ensure that SpinLockIRQs // are aware that interrupts are disabled. - // Safety: paired with enable_interrupts in interrupt_service_routines::init - unsafe { sync::spin_lock_irq::disable_interrupts(); } + unsafe { + // Safety: paired with enable_interrupts in interrupt_service_routines::init + sync::spin_lock_irq::disable_interrupts(); + } info!("Enabling interrupts"); unsafe { i386::interrupt_service_routines::init(); } diff --git a/kernel/src/sync/spin_lock_irq.rs b/kernel/src/sync/spin_lock_irq.rs index 22d03c647..2858ae031 100644 --- a/kernel/src/sync/spin_lock_irq.rs +++ b/kernel/src/sync/spin_lock_irq.rs @@ -120,8 +120,10 @@ impl SpinLockIRQ { impl SpinLockIRQ { /// Disables interrupts and locks the mutex. pub fn lock(&self) -> SpinLockIRQGuard<'_, T> { - // Safety: Paired with enable_interrupts in the impl of Drop for SpinLockIrqGuard. - unsafe { disable_interrupts(); } + unsafe { + // Safety: Paired with enable_interrupts in the impl of Drop for SpinLockIrqGuard. + disable_interrupts(); + } // TODO: Disable preemption. // TODO: Spin acquire @@ -133,9 +135,11 @@ impl SpinLockIRQ { /// Disables interrupts and locks the mutex. pub fn try_lock(&self) -> Option> { - // Safety: Paired with enable_interrupts in the impl of Drop for SpinLockIrq, - // or in case a guard is not created, later in this function. - unsafe { disable_interrupts(); } + unsafe { + // Safety: Paired with enable_interrupts in the impl of Drop for SpinLockIrq, + // or in case a guard is not created, later in this function. + disable_interrupts(); + } // TODO: Disable preemption. // TODO: Spin acquire @@ -145,8 +149,10 @@ impl SpinLockIRQ { Some(internalguard) => Some(SpinLockIRQGuard(ManuallyDrop::new(internalguard))), None => { // We couldn't lock. Restore irqs and return None - // Safety: Paired with disable_interrupts above in the case that a guard is not created. - unsafe { enable_interrupts(); } + unsafe { + // Safety: Paired with disable_interrupts above in the case that a guard is not created. + enable_interrupts(); + } None } } @@ -181,9 +187,11 @@ impl<'a, T: ?Sized + 'a> Drop for SpinLockIRQGuard<'a, T> { // unlock unsafe { ManuallyDrop::drop(&mut self.0); } - // Safety: paired with disable_interrupts in SpinLockIRQ::{lock, try_lock}, which returns - // this guard to re-enable interrupts when it is dropped. - unsafe { enable_interrupts(); } + unsafe { + // Safety: paired with disable_interrupts in SpinLockIRQ::{lock, try_lock}, which returns + // this guard to re-enable interrupts when it is dropped. + enable_interrupts(); + } // TODO: Enable preempt }