From 074db8ec1e67e8e7d95dece52dc56290d0a455ae Mon Sep 17 00:00:00 2001 From: Brian Smith Date: Tue, 18 Jun 2024 13:11:12 -0700 Subject: [PATCH] netbsd: Simplify weak lookup. Remove LazyPtr. Avoid constructing an invalid pointer as a sentinel to indicate that the pointer is uninitialized. Now, a null pointer means it is uninitialized, and a non-null pointer means it is initialized. This is less questionable from a safety perspective, and should also be more efficient. Reduce duplication between the "getrandom is available" and the fallback case. --- src/lazy.rs | 60 +-------------------------------------------------- src/netbsd.rs | 60 +++++++++++++++++++++++++++++++++------------------ 2 files changed, 40 insertions(+), 80 deletions(-) diff --git a/src/lazy.rs b/src/lazy.rs index 73cece0e..b6331816 100644 --- a/src/lazy.rs +++ b/src/lazy.rs @@ -1,10 +1,7 @@ //! Helpers built around pointer-sized atomics. #![cfg(target_has_atomic = "ptr")] #![allow(dead_code)] -use core::{ - ffi::c_void, - sync::atomic::{AtomicPtr, AtomicUsize, Ordering}, -}; +use core::sync::atomic::{AtomicUsize, Ordering}; // This structure represents a lazily initialized static usize value. Useful // when it is preferable to just rerun initialization instead of locking. @@ -67,58 +64,3 @@ impl LazyBool { self.0.unsync_init(|| usize::from(init())) != 0 } } - -// This structure represents a lazily initialized static pointer value. -/// -/// It's intended to be used for weak linking of a C function that may -/// or may not be present at runtime. -/// -/// Based off of the DlsymWeak struct in libstd: -/// https://github.com/rust-lang/rust/blob/1.61.0/library/std/src/sys/unix/weak.rs#L84 -/// except that the caller must manually cast self.ptr() to a function pointer. -pub struct LazyPtr { - addr: AtomicPtr, -} - -impl LazyPtr { - /// A non-null pointer value which indicates we are uninitialized. - /// - /// This constant should ideally not be a valid pointer. However, - /// if by chance initialization function passed to the `unsync_init` - /// method does return UNINIT, there will not be undefined behavior. - /// The initialization function will just be called each time `get()` - /// is called. This would be inefficient, but correct. - const UNINIT: *mut c_void = !0usize as *mut c_void; - - /// Construct new `LazyPtr` in uninitialized state. - pub const fn new() -> Self { - Self { - addr: AtomicPtr::new(Self::UNINIT), - } - } - - // Runs the init() function at most once, returning the value of some run of - // init(). Multiple callers can run their init() functions in parallel. - // init() should always return the same value, if it succeeds. - pub fn unsync_init(&self, init: impl Fn() -> *mut c_void) -> *mut c_void { - #[cold] - fn do_init(this: &LazyPtr, init: impl Fn() -> *mut c_void) -> *mut c_void { - let addr = init(); - this.addr.store(addr, Ordering::Release); - addr - } - - // Despite having only a single atomic variable (self.addr), we still - // cannot always use Ordering::Relaxed, as we need to make sure a - // successful call to `init` is "ordered before" any data read through - // the returned pointer (which occurs when the function is called). - // Our implementation mirrors that of the one in libstd, meaning that - // the use of non-Relaxed operations is probably unnecessary. - let val = self.addr.load(Ordering::Acquire); - if val != Self::UNINIT { - val - } else { - do_init(self, init) - } - } -} diff --git a/src/netbsd.rs b/src/netbsd.rs index c4ea75e4..ffa00693 100644 --- a/src/netbsd.rs +++ b/src/netbsd.rs @@ -1,15 +1,26 @@ //! Implementation for NetBSD -use crate::{lazy::LazyPtr, util_libc::sys_fill_exact, Error}; -use core::{ffi::c_void, mem::MaybeUninit, ptr}; +use crate::{util_libc::sys_fill_exact, Error}; +use core::{ + ffi::c_void, + mem::MaybeUninit, + ptr, + sync::atomic::{AtomicPtr, Ordering}, +}; + +unsafe extern "C" fn polyfill_using_kern_arand( + buf: *mut u8, + mut buflen: libc::size_t, + flags: libc::c_uint, +) -> libc::ssize_t { + debug_assert_eq!(flags, 0); -fn kern_arnd(buf: &mut [MaybeUninit]) -> libc::ssize_t { static MIB: [libc::c_int; 2] = [libc::CTL_KERN, libc::KERN_ARND]; - let mut len = buf.len(); + let mut len = buflen; let ret = unsafe { libc::sysctl( MIB.as_ptr(), MIB.len() as libc::c_uint, - buf.as_mut_ptr().cast::(), + buf.cast::(), &mut len, ptr::null(), 0, @@ -25,27 +36,34 @@ fn kern_arnd(buf: &mut [MaybeUninit]) -> libc::ssize_t { type GetRandomFn = unsafe extern "C" fn(*mut u8, libc::size_t, libc::c_uint) -> libc::ssize_t; // getrandom(2) was introduced in NetBSD 10.0 -static GETRANDOM: LazyPtr = LazyPtr::new(); +static GETRANDOM: AtomicPtr = AtomicPtr::new(ptr::null()); -fn dlsym_getrandom() -> *mut c_void { +#[cold] +fn init() -> GetRandomFn { static NAME: &[u8] = b"getrandom\0"; let name_ptr = NAME.as_ptr().cast::(); - unsafe { libc::dlsym(libc::RTLD_DEFAULT, name_ptr) } + let ptr: *mut c_void = unsafe { libc::dlsym(libc::RTLD_DEFAULT, name_ptr) }; + let r = if !ptr.is_null() { + unsafe { core::mem::transmute::<*mut c_void, GetRandomFn>(fptr) } + } else { + polyfill_using_kern_arand + }; + GETRANDOM.store(r, Ordering::Release); + r } pub fn getrandom_inner(dest: &mut [MaybeUninit]) -> Result<(), Error> { - let fptr = GETRANDOM.unsync_init(dlsym_getrandom); - if !fptr.is_null() { - let func: GetRandomFn = unsafe { core::mem::transmute(fptr) }; - return sys_fill_exact(dest, |buf| unsafe { - func(buf.as_mut_ptr().cast::(), buf.len(), 0) - }); - } - - // NetBSD will only return up to 256 bytes at a time, and - // older NetBSD kernels will fail on longer buffers. - for chunk in dest.chunks_mut(256) { - sys_fill_exact(chunk, kern_arnd)? + // Despite being only a single atomic variable, we still cannot always use + // Ordering::Relaxed, as we need to make sure a successful call to `init` + // is "ordered before" any data read through the returned pointer (which + // occurs when the function is called). Our implementation mirrors that of + // the one in libstd, meaning that the use of non-Relaxed operations is + // probably unnecessary. + let mut fptr = GETRANDM.load(Ordering::Acquire); + if fptr.is_null() { + fptr = init(); } - Ok(()) + sys_fill_exact(dest, |buf| unsafe { + func(buf.as_mut_ptr().cast::(), buf.len(), 0) + }) }