Skip to content

Commit

Permalink
netbsd: Simplify weak lookup.
Browse files Browse the repository at this point in the history
Remove LazyPtr. Avoid constructing an invalid pointer as a sentinel to
indicate that the pointer is uninitialized. Now, a null pointer means
it is uninitialized, and a non-null pointer means it is initialized.
This is less questionable from a safety perspective, and should also be
more efficient.

Reduce duplication between the "getrandom is available" and the
fallback case.
  • Loading branch information
briansmith committed Jun 18, 2024
1 parent a8712f3 commit 1a0a475
Show file tree
Hide file tree
Showing 2 changed files with 40 additions and 80 deletions.
60 changes: 1 addition & 59 deletions src/lazy.rs
Original file line number Diff line number Diff line change
@@ -1,10 +1,7 @@
//! Helpers built around pointer-sized atomics.
#![cfg(target_has_atomic = "ptr")]
#![allow(dead_code)]
use core::{
ffi::c_void,
sync::atomic::{AtomicPtr, AtomicUsize, Ordering},
};
use core::sync::atomic::{AtomicUsize, Ordering};

// This structure represents a lazily initialized static usize value. Useful
// when it is preferable to just rerun initialization instead of locking.
Expand Down Expand Up @@ -67,58 +64,3 @@ impl LazyBool {
self.0.unsync_init(|| usize::from(init())) != 0
}
}

// This structure represents a lazily initialized static pointer value.
///
/// It's intended to be used for weak linking of a C function that may
/// or may not be present at runtime.
///
/// Based off of the DlsymWeak struct in libstd:
/// https://github.com/rust-lang/rust/blob/1.61.0/library/std/src/sys/unix/weak.rs#L84
/// except that the caller must manually cast self.ptr() to a function pointer.
pub struct LazyPtr {
addr: AtomicPtr<c_void>,
}

impl LazyPtr {
/// A non-null pointer value which indicates we are uninitialized.
///
/// This constant should ideally not be a valid pointer. However,
/// if by chance initialization function passed to the `unsync_init`
/// method does return UNINIT, there will not be undefined behavior.
/// The initialization function will just be called each time `get()`
/// is called. This would be inefficient, but correct.
const UNINIT: *mut c_void = !0usize as *mut c_void;

/// Construct new `LazyPtr` in uninitialized state.
pub const fn new() -> Self {
Self {
addr: AtomicPtr::new(Self::UNINIT),
}
}

// Runs the init() function at most once, returning the value of some run of
// init(). Multiple callers can run their init() functions in parallel.
// init() should always return the same value, if it succeeds.
pub fn unsync_init(&self, init: impl Fn() -> *mut c_void) -> *mut c_void {
#[cold]
fn do_init(this: &LazyPtr, init: impl Fn() -> *mut c_void) -> *mut c_void {
let addr = init();
this.addr.store(addr, Ordering::Release);
addr
}

// Despite having only a single atomic variable (self.addr), we still
// cannot always use Ordering::Relaxed, as we need to make sure a
// successful call to `init` is "ordered before" any data read through
// the returned pointer (which occurs when the function is called).
// Our implementation mirrors that of the one in libstd, meaning that
// the use of non-Relaxed operations is probably unnecessary.
let val = self.addr.load(Ordering::Acquire);
if val != Self::UNINIT {
val
} else {
do_init(self, init)
}
}
}
60 changes: 39 additions & 21 deletions src/netbsd.rs
Original file line number Diff line number Diff line change
@@ -1,15 +1,26 @@
//! Implementation for NetBSD
use crate::{lazy::LazyPtr, util_libc::sys_fill_exact, Error};
use core::{ffi::c_void, mem::MaybeUninit, ptr};
use crate::{util_libc::sys_fill_exact, Error};
use core::{
ffi::c_void,
mem::MaybeUninit,
ptr,
sync::atomic::{AtomicPtr, Ordering},
};

unsafe extern "C" fn polyfill_using_kern_arand(
buf: *mut u8,
buflen: libc::size_t,
flags: libc::c_uint,
) -> libc::ssize_t {
debug_assert_eq!(flags, 0);

fn kern_arnd(buf: &mut [MaybeUninit<u8>]) -> libc::ssize_t {
static MIB: [libc::c_int; 2] = [libc::CTL_KERN, libc::KERN_ARND];
let mut len = buf.len();
let mut len = buflen;
let ret = unsafe {
libc::sysctl(
MIB.as_ptr(),
MIB.len() as libc::c_uint,
buf.as_mut_ptr().cast::<c_void>(),
buf.cast::<c_void>(),
&mut len,
ptr::null(),
0,
Expand All @@ -25,27 +36,34 @@ fn kern_arnd(buf: &mut [MaybeUninit<u8>]) -> libc::ssize_t {
type GetRandomFn = unsafe extern "C" fn(*mut u8, libc::size_t, libc::c_uint) -> libc::ssize_t;

// getrandom(2) was introduced in NetBSD 10.0
static GETRANDOM: LazyPtr = LazyPtr::new();
static GETRANDOM: AtomicPtr<GetRandomFn> = AtomicPtr::new(ptr::null_mut());

fn dlsym_getrandom() -> *mut c_void {
#[cold]
fn init() -> GetRandomFn {
static NAME: &[u8] = b"getrandom\0";
let name_ptr = NAME.as_ptr().cast::<libc::c_char>();
unsafe { libc::dlsym(libc::RTLD_DEFAULT, name_ptr) }
let mut ptr: *mut c_void = unsafe { libc::dlsym(libc::RTLD_DEFAULT, name_ptr) };
if !ptr.is_null() {
unsafe { core::mem::transmute::<*mut c_void, GetRandomFn>(ptr) }
} else {
polyfill_using_kern_arand
}
GETRANDOM.store(ptr, Ordering::Release);
ptr
}

pub fn getrandom_inner(dest: &mut [MaybeUninit<u8>]) -> Result<(), Error> {
let fptr = GETRANDOM.unsync_init(dlsym_getrandom);
if !fptr.is_null() {
let func: GetRandomFn = unsafe { core::mem::transmute(fptr) };
return sys_fill_exact(dest, |buf| unsafe {
func(buf.as_mut_ptr().cast::<u8>(), buf.len(), 0)
});
}

// NetBSD will only return up to 256 bytes at a time, and
// older NetBSD kernels will fail on longer buffers.
for chunk in dest.chunks_mut(256) {
sys_fill_exact(chunk, kern_arnd)?
// Despite being only a single atomic variable, we still cannot always use
// Ordering::Relaxed, as we need to make sure a successful call to `init`
// is "ordered before" any data read through the returned pointer (which
// occurs when the function is called). Our implementation mirrors that of
// the one in libstd, meaning that the use of non-Relaxed operations is
// probably unnecessary.
let mut fptr = GETRANDOM.load(Ordering::Acquire);
if fptr.is_null() {
fptr = init();
}
Ok(())
sys_fill_exact(dest, |buf| unsafe {
fptr(buf.as_mut_ptr().cast::<u8>(), buf.len(), 0)
})
}

0 comments on commit 1a0a475

Please sign in to comment.