diff --git a/src/lazy.rs b/src/lazy.rs index 5f949c7f..0b4730e6 100644 --- a/src/lazy.rs +++ b/src/lazy.rs @@ -1,10 +1,7 @@ //! Helpers built around pointer-sized atomics. #![cfg(target_has_atomic = "ptr")] #![allow(dead_code)] -use core::{ - ffi::c_void, - sync::atomic::{AtomicPtr, AtomicUsize, Ordering}, -}; +use core::sync::atomic::{AtomicUsize, Ordering}; // This structure represents a lazily initialized static usize value. Useful // when it is preferable to just rerun initialization instead of locking. @@ -67,58 +64,3 @@ impl LazyBool { self.0.unsync_init(|| usize::from(init())) != 0 } } - -// This structure represents a lazily initialized static pointer value. -/// -/// It's intended to be used for weak linking of a C function that may -/// or may not be present at runtime. -/// -/// Based off of the DlsymWeak struct in libstd: -/// https://github.com/rust-lang/rust/blob/1.61.0/library/std/src/sys/unix/weak.rs#L84 -/// except that the caller must manually cast self.ptr() to a function pointer. -pub struct LazyPtr { - addr: AtomicPtr, -} - -impl LazyPtr { - /// A non-null pointer value which indicates we are uninitialized. - /// - /// This constant should ideally not be a valid pointer. However, - /// if by chance initialization function passed to the `unsync_init` - /// method does return UNINIT, there will not be undefined behavior. - /// The initialization function will just be called each time `get()` - /// is called. This would be inefficient, but correct. - const UNINIT: *mut c_void = !0usize as *mut c_void; - - /// Construct new `LazyPtr` in uninitialized state. - pub const fn new() -> Self { - Self { - addr: AtomicPtr::new(Self::UNINIT), - } - } - - // Runs the init() function at most once, returning the value of some run of - // init(). Multiple callers can run their init() functions in parallel. - // init() should always return the same value, if it succeeds. - pub fn unsync_init(&self, init: impl Fn() -> *mut c_void) -> *mut c_void { - #[cold] - fn do_init(this: &LazyPtr, init: impl Fn() -> *mut c_void) -> *mut c_void { - let addr = init(); - this.addr.store(addr, Ordering::Release); - addr - } - - // Despite having only a single atomic variable (self.addr), we still - // cannot always use Ordering::Relaxed, as we need to make sure a - // successful call to `init` is "ordered before" any data read through - // the returned pointer (which occurs when the function is called). - // Our implementation mirrors that of the one in libstd, meaning that - // the use of non-Relaxed operations is probably unnecessary. - let val = self.addr.load(Ordering::Acquire); - if val != Self::UNINIT { - val - } else { - do_init(self, init) - } - } -} diff --git a/src/netbsd.rs b/src/netbsd.rs index c4ea75e4..2842617b 100644 --- a/src/netbsd.rs +++ b/src/netbsd.rs @@ -1,15 +1,35 @@ //! Implementation for NetBSD -use crate::{lazy::LazyPtr, util_libc::sys_fill_exact, Error}; -use core::{ffi::c_void, mem::MaybeUninit, ptr}; +//! +//! `getrandom(2)` was introduced in NetBSD 10. To support older versions we +//! implement our own weak linkage to it, and provide a fallback based on the +//! KERN_ARND sysctl. +use crate::{util_libc::sys_fill_exact, Error}; +use core::{ + cmp, + ffi::c_void, + mem::{self, MaybeUninit}, + ptr, + sync::atomic::{AtomicPtr, Ordering}, +}; + +unsafe extern "C" fn polyfill_using_kern_arand( + buf: *mut c_void, + buflen: libc::size_t, + flags: libc::c_uint, +) -> libc::ssize_t { + debug_assert_eq!(flags, 0); -fn kern_arnd(buf: &mut [MaybeUninit]) -> libc::ssize_t { static MIB: [libc::c_int; 2] = [libc::CTL_KERN, libc::KERN_ARND]; - let mut len = buf.len(); + + // NetBSD will only return up to 256 bytes at a time, and + // older NetBSD kernels will fail on longer buffers. + let mut len = cmp::min(buflen, 256); + let ret = unsafe { libc::sysctl( MIB.as_ptr(), MIB.len() as libc::c_uint, - buf.as_mut_ptr().cast::(), + buf, &mut len, ptr::null(), 0, @@ -22,30 +42,37 @@ fn kern_arnd(buf: &mut [MaybeUninit]) -> libc::ssize_t { } } -type GetRandomFn = unsafe extern "C" fn(*mut u8, libc::size_t, libc::c_uint) -> libc::ssize_t; +type GetRandomFn = unsafe extern "C" fn(*mut c_void, libc::size_t, libc::c_uint) -> libc::ssize_t; -// getrandom(2) was introduced in NetBSD 10.0 -static GETRANDOM: LazyPtr = LazyPtr::new(); +static GETRANDOM: AtomicPtr = AtomicPtr::new(ptr::null_mut()); -fn dlsym_getrandom() -> *mut c_void { +#[cold] +fn init() -> *mut c_void { static NAME: &[u8] = b"getrandom\0"; let name_ptr = NAME.as_ptr().cast::(); - unsafe { libc::dlsym(libc::RTLD_DEFAULT, name_ptr) } + let mut ptr = unsafe { libc::dlsym(libc::RTLD_DEFAULT, name_ptr) }; + if ptr.is_null() { + // Verify `polyfill_using_kern_arand` has the right signature. + const POLYFILL: GetRandomFn = polyfill_using_kern_arand; + ptr = POLYFILL as *mut c_void; + } + GETRANDOM.store(ptr, Ordering::Release); + ptr } pub fn getrandom_inner(dest: &mut [MaybeUninit]) -> Result<(), Error> { - let fptr = GETRANDOM.unsync_init(dlsym_getrandom); - if !fptr.is_null() { - let func: GetRandomFn = unsafe { core::mem::transmute(fptr) }; - return sys_fill_exact(dest, |buf| unsafe { - func(buf.as_mut_ptr().cast::(), buf.len(), 0) - }); - } - - // NetBSD will only return up to 256 bytes at a time, and - // older NetBSD kernels will fail on longer buffers. - for chunk in dest.chunks_mut(256) { - sys_fill_exact(chunk, kern_arnd)? + // Despite being only a single atomic variable, we still cannot always use + // Ordering::Relaxed, as we need to make sure a successful call to `init` + // is "ordered before" any data read through the returned pointer (which + // occurs when the function is called). Our implementation mirrors that of + // the one in libstd, meaning that the use of non-Relaxed operations is + // probably unnecessary. + let mut fptr = GETRANDOM.load(Ordering::Acquire); + if fptr.is_null() { + fptr = init(); } - Ok(()) + let fptr = unsafe { mem::transmute::<*mut c_void, GetRandomFn>(fptr) }; + sys_fill_exact(dest, |buf| unsafe { + fptr(buf.as_mut_ptr().cast::(), buf.len(), 0) + }) }