Skip to content

Commit

Permalink
Rename Weak to LazyPtr and move it to lazy.rs (#427)
Browse files Browse the repository at this point in the history
Currently the `Weak` type is used only by the NetBSD backend, but in
future it may be used by other backends. To simplify code a bit and
prepare for potential use on Windows, `Weak` now accepts a "pointer
initialization function" instead of a function name, i.e. it now works
similarly to `LazyUsize` and `LazyBool`.
  • Loading branch information
newpavlov authored May 22, 2024
1 parent cf65e83 commit bcbadc1
Show file tree
Hide file tree
Showing 4 changed files with 75 additions and 77 deletions.
66 changes: 60 additions & 6 deletions src/lazy.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,8 @@
use core::sync::atomic::{AtomicUsize, Ordering::Relaxed};
#![allow(dead_code)]
use core::{
ffi::c_void,
sync::atomic::{AtomicPtr, AtomicUsize, Ordering},
};

// This structure represents a lazily initialized static usize value. Useful
// when it is preferable to just rerun initialization instead of locking.
Expand All @@ -21,22 +25,22 @@ use core::sync::atomic::{AtomicUsize, Ordering::Relaxed};
pub(crate) struct LazyUsize(AtomicUsize);

impl LazyUsize {
// The initialization is not completed.
const UNINIT: usize = usize::max_value();

pub const fn new() -> Self {
Self(AtomicUsize::new(Self::UNINIT))
}

// The initialization is not completed.
pub const UNINIT: usize = usize::max_value();

// Runs the init() function at most once, returning the value of some run of
// init(). Multiple callers can run their init() functions in parallel.
// init() should always return the same value, if it succeeds.
pub fn unsync_init(&self, init: impl FnOnce() -> usize) -> usize {
// Relaxed ordering is fine, as we only have a single atomic variable.
let mut val = self.0.load(Relaxed);
let mut val = self.0.load(Ordering::Relaxed);
if val == Self::UNINIT {
val = init();
self.0.store(val, Relaxed);
self.0.store(val, Ordering::Relaxed);
}
val
}
Expand All @@ -54,3 +58,53 @@ impl LazyBool {
self.0.unsync_init(|| init() as usize) != 0
}
}

// This structure represents a lazily initialized static pointer value.
///
/// It's intended to be used for weak linking of a C function that may
/// or may not be present at runtime.
///
/// Based off of the DlsymWeak struct in libstd:
/// https://github.com/rust-lang/rust/blob/1.61.0/library/std/src/sys/unix/weak.rs#L84
/// except that the caller must manually cast self.ptr() to a function pointer.
pub struct LazyPtr {
addr: AtomicPtr<c_void>,
}

impl LazyPtr {
/// A non-null pointer value which indicates we are uninitialized.
///
/// This constant should ideally not be a valid pointer. However,
/// if by chance initialization function passed to the `unsync_init`
/// method does return UNINIT, there will not be undefined behavior.
/// The initialization function will just be called each time `get()`
/// is called. This would be inefficient, but correct.
const UNINIT: *mut c_void = !0usize as *mut c_void;

/// Construct new `LazyPtr` in uninitialized state.
pub const fn new() -> Self {
Self {
addr: AtomicPtr::new(Self::UNINIT),
}
}

// Runs the init() function at most once, returning the value of some run of
// init(). Multiple callers can run their init() functions in parallel.
// init() should always return the same value, if it succeeds.
pub fn unsync_init(&self, init: impl Fn() -> *mut c_void) -> *mut c_void {
// Despite having only a single atomic variable (self.addr), we still
// cannot always use Ordering::Relaxed, as we need to make sure a
// successful call to `init` is "ordered before" any data read through
// the returned pointer (which occurs when the function is called).
// Our implementation mirrors that of the one in libstd, meaning that
// the use of non-Relaxed operations is probably unnecessary.
match self.addr.load(Ordering::Acquire) {
Self::UNINIT => {
let addr = init();
self.addr.store(addr, Ordering::Release);
addr
}
addr => addr,
}
}
}
1 change: 1 addition & 0 deletions src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -306,6 +306,7 @@ cfg_if! {
#[path = "solaris.rs"] mod imp;
} else if #[cfg(target_os = "netbsd")] {
mod util_libc;
mod lazy;
#[path = "netbsd.rs"] mod imp;
} else if #[cfg(target_os = "fuchsia")] {
#[path = "fuchsia.rs"] mod imp;
Expand Down
21 changes: 13 additions & 8 deletions src/netbsd.rs
Original file line number Diff line number Diff line change
@@ -1,9 +1,6 @@
//! Implementation for NetBSD
use crate::{
util_libc::{sys_fill_exact, Weak},
Error,
};
use core::{mem::MaybeUninit, ptr};
use crate::{lazy::LazyPtr, util_libc::sys_fill_exact, Error};
use core::{ffi::c_void, mem::MaybeUninit, ptr};

fn kern_arnd(buf: &mut [MaybeUninit<u8>]) -> libc::ssize_t {
static MIB: [libc::c_int; 2] = [libc::CTL_KERN, libc::KERN_ARND];
Expand All @@ -27,10 +24,18 @@ fn kern_arnd(buf: &mut [MaybeUninit<u8>]) -> libc::ssize_t {

type GetRandomFn = unsafe extern "C" fn(*mut u8, libc::size_t, libc::c_uint) -> libc::ssize_t;

// getrandom(2) was introduced in NetBSD 10.0
static GETRANDOM: LazyPtr = LazyPtr::new();

fn dlsym_getrandom() -> *mut c_void {
static NAME: &[u8] = b"getrandom\0";
let name_ptr = NAME.as_ptr() as *const libc::c_char;
unsafe { libc::dlsym(libc::RTLD_DEFAULT, name_ptr) }
}

pub fn getrandom_inner(dest: &mut [MaybeUninit<u8>]) -> Result<(), Error> {
// getrandom(2) was introduced in NetBSD 10.0
static GETRANDOM: Weak = unsafe { Weak::new("getrandom\0") };
if let Some(fptr) = GETRANDOM.ptr() {
let fptr = GETRANDOM.unsync_init(dlsym_getrandom);
if !fptr.is_null() {
let func: GetRandomFn = unsafe { core::mem::transmute(fptr) };
return sys_fill_exact(dest, |buf| unsafe {
func(buf.as_mut_ptr() as *mut u8, buf.len(), 0)
Expand Down
64 changes: 1 addition & 63 deletions src/util_libc.rs
Original file line number Diff line number Diff line change
@@ -1,12 +1,6 @@
#![allow(dead_code)]
use crate::Error;
use core::{
mem::MaybeUninit,
num::NonZeroU32,
ptr::NonNull,
sync::atomic::{fence, AtomicPtr, Ordering},
};
use libc::c_void;
use core::{mem::MaybeUninit, num::NonZeroU32};

cfg_if! {
if #[cfg(any(target_os = "netbsd", target_os = "openbsd", target_os = "android"))] {
Expand Down Expand Up @@ -76,62 +70,6 @@ pub fn sys_fill_exact(
Ok(())
}

// A "weak" binding to a C function that may or may not be present at runtime.
// Used for supporting newer OS features while still building on older systems.
// Based off of the DlsymWeak struct in libstd:
// https://github.com/rust-lang/rust/blob/1.61.0/library/std/src/sys/unix/weak.rs#L84
// except that the caller must manually cast self.ptr() to a function pointer.
pub struct Weak {
name: &'static str,
addr: AtomicPtr<c_void>,
}

impl Weak {
// A non-null pointer value which indicates we are uninitialized. This
// constant should ideally not be a valid address of a function pointer.
// However, if by chance libc::dlsym does return UNINIT, there will not
// be undefined behavior. libc::dlsym will just be called each time ptr()
// is called. This would be inefficient, but correct.
// TODO: Replace with core::ptr::invalid_mut(1) when that is stable.
const UNINIT: *mut c_void = 1 as *mut c_void;

// Construct a binding to a C function with a given name. This function is
// unsafe because `name` _must_ be null terminated.
pub const unsafe fn new(name: &'static str) -> Self {
Self {
name,
addr: AtomicPtr::new(Self::UNINIT),
}
}

// Return the address of a function if present at runtime. Otherwise,
// return None. Multiple callers can call ptr() concurrently. It will
// always return _some_ value returned by libc::dlsym. However, the
// dlsym function may be called multiple times.
pub fn ptr(&self) -> Option<NonNull<c_void>> {
// Despite having only a single atomic variable (self.addr), we still
// cannot always use Ordering::Relaxed, as we need to make sure a
// successful call to dlsym() is "ordered before" any data read through
// the returned pointer (which occurs when the function is called).
// Our implementation mirrors that of the one in libstd, meaning that
// the use of non-Relaxed operations is probably unnecessary.
match self.addr.load(Ordering::Relaxed) {
Self::UNINIT => {
let symbol = self.name.as_ptr() as *const _;
let addr = unsafe { libc::dlsym(libc::RTLD_DEFAULT, symbol) };
// Synchronizes with the Acquire fence below
self.addr.store(addr, Ordering::Release);
NonNull::new(addr)
}
addr => {
let func = NonNull::new(addr)?;
fence(Ordering::Acquire);
Some(func)
}
}
}
}

// SAFETY: path must be null terminated, FD must be manually closed.
pub unsafe fn open_readonly(path: &str) -> Result<libc::c_int, Error> {
debug_assert_eq!(path.as_bytes().last(), Some(&0));
Expand Down

0 comments on commit bcbadc1

Please sign in to comment.