diff options
Diffstat (limited to 'rust/kernel/sync')
-rw-r--r-- | rust/kernel/sync/arc.rs | 355 | ||||
-rw-r--r-- | rust/kernel/sync/arc/std_vendor.rs | 2 | ||||
-rw-r--r-- | rust/kernel/sync/condvar.rs | 44 | ||||
-rw-r--r-- | rust/kernel/sync/lock.rs | 109 | ||||
-rw-r--r-- | rust/kernel/sync/lock/global.rs | 302 | ||||
-rw-r--r-- | rust/kernel/sync/lock/mutex.rs | 32 | ||||
-rw-r--r-- | rust/kernel/sync/lock/spinlock.rs | 32 | ||||
-rw-r--r-- | rust/kernel/sync/locked_by.rs | 22 | ||||
-rw-r--r-- | rust/kernel/sync/poll.rs | 121 | ||||
-rw-r--r-- | rust/kernel/sync/rcu.rs | 52 |
10 files changed, 941 insertions, 130 deletions
diff --git a/rust/kernel/sync/arc.rs b/rust/kernel/sync/arc.rs index 7d4c4bf58388..8484c814609a 100644 --- a/rust/kernel/sync/arc.rs +++ b/rust/kernel/sync/arc.rs @@ -12,27 +12,27 @@ //! 2. It does not support weak references, which allows it to be half the size. //! 3. It saturates the reference count instead of aborting when it goes over a threshold. //! 4. It does not provide a `get_mut` method, so the ref counted object is pinned. +//! 5. The object in [`Arc`] is pinned implicitly. //! //! [`Arc`]: https://doc.rust-lang.org/std/sync/struct.Arc.html use crate::{ + alloc::{AllocError, Flags, KBox}, bindings, - error::{self, Error}, - init::{self, InPlaceInit, Init, PinInit}, + init::InPlaceInit, try_init, types::{ForeignOwnable, Opaque}, }; -use alloc::boxed::Box; use core::{ - alloc::{AllocError, Layout}, + alloc::Layout, fmt, - marker::{PhantomData, Unsize}, + marker::PhantomData, mem::{ManuallyDrop, MaybeUninit}, ops::{Deref, DerefMut}, pin::Pin, ptr::NonNull, }; -use macros::pin_data; +use pin_init::{self, pin_data, InPlaceWrite, Init, PinInit}; mod std_vendor; @@ -57,7 +57,7 @@ mod std_vendor; /// } /// /// // Create a refcounted instance of `Example`. -/// let obj = Arc::try_new(Example { a: 10, b: 20 })?; +/// let obj = Arc::new(Example { a: 10, b: 20 }, GFP_KERNEL)?; /// /// // Get a new pointer to `obj` and increment the refcount. /// let cloned = obj.clone(); @@ -96,7 +96,7 @@ mod std_vendor; /// } /// } /// -/// let obj = Arc::try_new(Example { a: 10, b: 20 })?; +/// let obj = Arc::new(Example { a: 10, b: 20 }, GFP_KERNEL)?; /// obj.use_reference(); /// obj.take_over(); /// # Ok::<(), Error>(()) @@ -119,14 +119,24 @@ mod std_vendor; /// impl MyTrait for Example {} /// /// // `obj` has type `Arc<Example>`. -/// let obj: Arc<Example> = Arc::try_new(Example)?; +/// let obj: Arc<Example> = Arc::new(Example, GFP_KERNEL)?; /// /// // `coerced` has type `Arc<dyn MyTrait>`. /// let coerced: Arc<dyn MyTrait> = obj; /// # Ok::<(), Error>(()) /// ``` +#[repr(transparent)] +#[cfg_attr(CONFIG_RUSTC_HAS_COERCE_POINTEE, derive(core::marker::CoercePointee))] pub struct Arc<T: ?Sized> { ptr: NonNull<ArcInner<T>>, + // NB: this informs dropck that objects of type `ArcInner<T>` may be used in `<Arc<T> as + // Drop>::drop`. Note that dropck already assumes that objects of type `T` may be used in + // `<Arc<T> as Drop>::drop` and the distinction between `T` and `ArcInner<T>` is not presently + // meaningful with respect to dropck - but this may change in the future so this is left here + // out of an abundance of caution. + // + // See https://doc.rust-lang.org/nomicon/phantom-data.html#generic-parameters-and-drop-checking + // for more detail on the semantics of dropck in the presence of `PhantomData`. _p: PhantomData<ArcInner<T>>, } @@ -137,15 +147,47 @@ struct ArcInner<T: ?Sized> { data: T, } -// This is to allow [`Arc`] (and variants) to be used as the type of `self`. -impl<T: ?Sized> core::ops::Receiver for Arc<T> {} +impl<T: ?Sized> ArcInner<T> { + /// Converts a pointer to the contents of an [`Arc`] into a pointer to the [`ArcInner`]. + /// + /// # Safety + /// + /// `ptr` must have been returned by a previous call to [`Arc::into_raw`], and the `Arc` must + /// not yet have been destroyed. + unsafe fn container_of(ptr: *const T) -> NonNull<ArcInner<T>> { + let refcount_layout = Layout::new::<bindings::refcount_t>(); + // SAFETY: The caller guarantees that the pointer is valid. + let val_layout = Layout::for_value(unsafe { &*ptr }); + // SAFETY: We're computing the layout of a real struct that existed when compiling this + // binary, so its layout is not so large that it can trigger arithmetic overflow. + let val_offset = unsafe { refcount_layout.extend(val_layout).unwrap_unchecked().1 }; + + // Pointer casts leave the metadata unchanged. This is okay because the metadata of `T` and + // `ArcInner<T>` is the same since `ArcInner` is a struct with `T` as its last field. + // + // This is documented at: + // <https://doc.rust-lang.org/std/ptr/trait.Pointee.html>. + let ptr = ptr as *const ArcInner<T>; + + // SAFETY: The pointer is in-bounds of an allocation both before and after offsetting the + // pointer, since it originates from a previous call to `Arc::into_raw` on an `Arc` that is + // still valid. + let ptr = unsafe { ptr.byte_sub(val_offset) }; + + // SAFETY: The pointer can't be null since you can't have an `ArcInner<T>` value at the null + // address. + unsafe { NonNull::new_unchecked(ptr.cast_mut()) } + } +} // This is to allow coercion from `Arc<T>` to `Arc<U>` if `T` can be converted to the // dynamically-sized type (DST) `U`. -impl<T: ?Sized + Unsize<U>, U: ?Sized> core::ops::CoerceUnsized<Arc<U>> for Arc<T> {} +#[cfg(not(CONFIG_RUSTC_HAS_COERCE_POINTEE))] +impl<T: ?Sized + core::marker::Unsize<U>, U: ?Sized> core::ops::CoerceUnsized<Arc<U>> for Arc<T> {} // This is to allow `Arc<U>` to be dispatched on when `Arc<T>` can be coerced into `Arc<U>`. -impl<T: ?Sized + Unsize<U>, U: ?Sized> core::ops::DispatchFromDyn<Arc<U>> for Arc<T> {} +#[cfg(not(CONFIG_RUSTC_HAS_COERCE_POINTEE))] +impl<T: ?Sized + core::marker::Unsize<U>, U: ?Sized> core::ops::DispatchFromDyn<Arc<U>> for Arc<T> {} // SAFETY: It is safe to send `Arc<T>` to another thread when the underlying `T` is `Sync` because // it effectively means sharing `&T` (which is safe because `T` is `Sync`); additionally, it needs @@ -160,9 +202,29 @@ unsafe impl<T: ?Sized + Sync + Send> Send for Arc<T> {} // the reference count reaches zero and `T` is dropped. unsafe impl<T: ?Sized + Sync + Send> Sync for Arc<T> {} +impl<T> InPlaceInit<T> for Arc<T> { + type PinnedSelf = Self; + + #[inline] + fn try_pin_init<E>(init: impl PinInit<T, E>, flags: Flags) -> Result<Self::PinnedSelf, E> + where + E: From<AllocError>, + { + UniqueArc::try_pin_init(init, flags).map(|u| u.into()) + } + + #[inline] + fn try_init<E>(init: impl Init<T, E>, flags: Flags) -> Result<Self, E> + where + E: From<AllocError>, + { + UniqueArc::try_init(init, flags).map(|u| u.into()) + } +} + impl<T> Arc<T> { /// Constructs a new reference counted instance of `T`. - pub fn try_new(contents: T) -> Result<Self, AllocError> { + pub fn new(contents: T, flags: Flags) -> Result<Self, AllocError> { // INVARIANT: The refcount is initialised to a non-zero value. let value = ArcInner { // SAFETY: There are no safety requirements for this FFI call. @@ -170,33 +232,12 @@ impl<T> Arc<T> { data: contents, }; - let inner = Box::try_new(value)?; + let inner = KBox::new(value, flags)?; + let inner = KBox::leak(inner).into(); // SAFETY: We just created `inner` with a reference count of 1, which is owned by the new // `Arc` object. - Ok(unsafe { Self::from_inner(Box::leak(inner).into()) }) - } - - /// Use the given initializer to in-place initialize a `T`. - /// - /// If `T: !Unpin` it will not be able to move afterwards. - #[inline] - pub fn pin_init<E>(init: impl PinInit<T, E>) -> error::Result<Self> - where - Error: From<E>, - { - UniqueArc::pin_init(init).map(|u| u.into()) - } - - /// Use the given initializer to in-place initialize a `T`. - /// - /// This is equivalent to [`Arc<T>::pin_init`], since an [`Arc`] is always pinned. - #[inline] - pub fn init<E>(init: impl Init<T, E>) -> error::Result<Self> - where - Error: From<E>, - { - UniqueArc::init(init).map(|u| u.into()) + Ok(unsafe { Self::from_inner(inner) }) } } @@ -225,6 +266,15 @@ impl<T: ?Sized> Arc<T> { unsafe { core::ptr::addr_of!((*ptr).data) } } + /// Return a raw pointer to the data in this arc. + pub fn as_ptr(this: &Self) -> *const T { + let ptr = this.ptr.as_ptr(); + + // SAFETY: As `ptr` points to a valid allocation of type `ArcInner`, + // field projection to `data`is within bounds of the allocation. + unsafe { core::ptr::addr_of!((*ptr).data) } + } + /// Recreates an [`Arc`] instance previously deconstructed via [`Arc::into_raw`]. /// /// # Safety @@ -232,27 +282,13 @@ impl<T: ?Sized> Arc<T> { /// `ptr` must have been returned by a previous call to [`Arc::into_raw`]. Additionally, it /// must not be called more than once for each previous call to [`Arc::into_raw`]. pub unsafe fn from_raw(ptr: *const T) -> Self { - let refcount_layout = Layout::new::<bindings::refcount_t>(); - // SAFETY: The caller guarantees that the pointer is valid. - let val_layout = Layout::for_value(unsafe { &*ptr }); - // SAFETY: We're computing the layout of a real struct that existed when compiling this - // binary, so its layout is not so large that it can trigger arithmetic overflow. - let val_offset = unsafe { refcount_layout.extend(val_layout).unwrap_unchecked().1 }; - - // Pointer casts leave the metadata unchanged. This is okay because the metadata of `T` and - // `ArcInner<T>` is the same since `ArcInner` is a struct with `T` as its last field. - // - // This is documented at: - // <https://doc.rust-lang.org/std/ptr/trait.Pointee.html>. - let ptr = ptr as *const ArcInner<T>; - - // SAFETY: The pointer is in-bounds of an allocation both before and after offsetting the - // pointer, since it originates from a previous call to `Arc::into_raw` and is still valid. - let ptr = unsafe { ptr.byte_sub(val_offset) }; + // SAFETY: The caller promises that this pointer originates from a call to `into_raw` on an + // `Arc` that is still valid. + let ptr = unsafe { ArcInner::container_of(ptr) }; // SAFETY: By the safety requirements we know that `ptr` came from `Arc::into_raw`, so the // reference count held then will be owned by the new `Arc` object. - unsafe { Self::from_inner(NonNull::new_unchecked(ptr.cast_mut())) } + unsafe { Self::from_inner(ptr) } } /// Returns an [`ArcBorrow`] from the given [`Arc`]. @@ -271,30 +307,103 @@ impl<T: ?Sized> Arc<T> { pub fn ptr_eq(this: &Self, other: &Self) -> bool { core::ptr::eq(this.ptr.as_ptr(), other.ptr.as_ptr()) } + + /// Converts this [`Arc`] into a [`UniqueArc`], or destroys it if it is not unique. + /// + /// When this destroys the `Arc`, it does so while properly avoiding races. This means that + /// this method will never call the destructor of the value. + /// + /// # Examples + /// + /// ``` + /// use kernel::sync::{Arc, UniqueArc}; + /// + /// let arc = Arc::new(42, GFP_KERNEL)?; + /// let unique_arc = arc.into_unique_or_drop(); + /// + /// // The above conversion should succeed since refcount of `arc` is 1. + /// assert!(unique_arc.is_some()); + /// + /// assert_eq!(*(unique_arc.unwrap()), 42); + /// + /// # Ok::<(), Error>(()) + /// ``` + /// + /// ``` + /// use kernel::sync::{Arc, UniqueArc}; + /// + /// let arc = Arc::new(42, GFP_KERNEL)?; + /// let another = arc.clone(); + /// + /// let unique_arc = arc.into_unique_or_drop(); + /// + /// // The above conversion should fail since refcount of `arc` is >1. + /// assert!(unique_arc.is_none()); + /// + /// # Ok::<(), Error>(()) + /// ``` + pub fn into_unique_or_drop(self) -> Option<Pin<UniqueArc<T>>> { + // We will manually manage the refcount in this method, so we disable the destructor. + let me = ManuallyDrop::new(self); + // SAFETY: We own a refcount, so the pointer is still valid. + let refcount = unsafe { me.ptr.as_ref() }.refcount.get(); + + // If the refcount reaches a non-zero value, then we have destroyed this `Arc` and will + // return without further touching the `Arc`. If the refcount reaches zero, then there are + // no other arcs, and we can create a `UniqueArc`. + // + // SAFETY: We own a refcount, so the pointer is not dangling. + let is_zero = unsafe { bindings::refcount_dec_and_test(refcount) }; + if is_zero { + // SAFETY: We have exclusive access to the arc, so we can perform unsynchronized + // accesses to the refcount. + unsafe { core::ptr::write(refcount, bindings::REFCOUNT_INIT(1)) }; + + // INVARIANT: We own the only refcount to this arc, so we may create a `UniqueArc`. We + // must pin the `UniqueArc` because the values was previously in an `Arc`, and they pin + // their values. + Some(Pin::from(UniqueArc { + inner: ManuallyDrop::into_inner(me), + })) + } else { + None + } + } } impl<T: 'static> ForeignOwnable for Arc<T> { type Borrowed<'a> = ArcBorrow<'a, T>; + type BorrowedMut<'a> = Self::Borrowed<'a>; - fn into_foreign(self) -> *const core::ffi::c_void { - ManuallyDrop::new(self).ptr.as_ptr() as _ + fn into_foreign(self) -> *mut crate::ffi::c_void { + ManuallyDrop::new(self).ptr.as_ptr().cast() } - unsafe fn borrow<'a>(ptr: *const core::ffi::c_void) -> ArcBorrow<'a, T> { + unsafe fn from_foreign(ptr: *mut crate::ffi::c_void) -> Self { + // SAFETY: The safety requirements of this function ensure that `ptr` comes from a previous + // call to `Self::into_foreign`. + let inner = unsafe { NonNull::new_unchecked(ptr.cast::<ArcInner<T>>()) }; + // SAFETY: By the safety requirement of this function, we know that `ptr` came from - // a previous call to `Arc::into_foreign`. - let inner = NonNull::new(ptr as *mut ArcInner<T>).unwrap(); + // a previous call to `Arc::into_foreign`, which guarantees that `ptr` is valid and + // holds a reference count increment that is transferrable to us. + unsafe { Self::from_inner(inner) } + } + + unsafe fn borrow<'a>(ptr: *mut crate::ffi::c_void) -> ArcBorrow<'a, T> { + // SAFETY: The safety requirements of this function ensure that `ptr` comes from a previous + // call to `Self::into_foreign`. + let inner = unsafe { NonNull::new_unchecked(ptr.cast::<ArcInner<T>>()) }; // SAFETY: The safety requirements of `from_foreign` ensure that the object remains alive // for the lifetime of the returned value. unsafe { ArcBorrow::new(inner) } } - unsafe fn from_foreign(ptr: *const core::ffi::c_void) -> Self { - // SAFETY: By the safety requirement of this function, we know that `ptr` came from - // a previous call to `Arc::into_foreign`, which guarantees that `ptr` is valid and - // holds a reference count increment that is transferrable to us. - unsafe { Self::from_inner(NonNull::new(ptr as _).unwrap()) } + unsafe fn borrow_mut<'a>(ptr: *mut crate::ffi::c_void) -> ArcBorrow<'a, T> { + // SAFETY: The safety requirements for `borrow_mut` are a superset of the safety + // requirements for `borrow`. + unsafe { Self::borrow(ptr) } } } @@ -316,10 +425,14 @@ impl<T: ?Sized> AsRef<T> for Arc<T> { impl<T: ?Sized> Clone for Arc<T> { fn clone(&self) -> Self { + // SAFETY: By the type invariant, there is necessarily a reference to the object, so it is + // safe to dereference it. + let refcount = unsafe { self.ptr.as_ref() }.refcount.get(); + // INVARIANT: C `refcount_inc` saturates the refcount, so it cannot overflow to zero. // SAFETY: By the type invariant, there is necessarily a reference to the object, so it is // safe to increment the refcount. - unsafe { bindings::refcount_inc(self.ptr.as_ref().refcount.get()) }; + unsafe { bindings::refcount_inc(refcount) }; // SAFETY: We just incremented the refcount. This increment is now owned by the new `Arc`. unsafe { Self::from_inner(self.ptr) } @@ -341,8 +454,8 @@ impl<T: ?Sized> Drop for Arc<T> { if is_zero { // The count reached zero, we must free the memory. // - // SAFETY: The pointer was initialised from the result of `Box::leak`. - unsafe { drop(Box::from_raw(self.ptr.as_ptr())) }; + // SAFETY: The pointer was initialised from the result of `KBox::leak`. + unsafe { drop(KBox::from_raw(self.ptr.as_ptr())) }; } } } @@ -387,7 +500,7 @@ impl<T: ?Sized> From<Pin<UniqueArc<T>>> for Arc<T> { /// e.into() /// } /// -/// let obj = Arc::try_new(Example)?; +/// let obj = Arc::new(Example, GFP_KERNEL)?; /// let cloned = do_something(obj.as_arc_borrow()); /// /// // Assert that both `obj` and `cloned` point to the same underlying object. @@ -411,21 +524,21 @@ impl<T: ?Sized> From<Pin<UniqueArc<T>>> for Arc<T> { /// } /// } /// -/// let obj = Arc::try_new(Example { a: 10, b: 20 })?; +/// let obj = Arc::new(Example { a: 10, b: 20 }, GFP_KERNEL)?; /// obj.as_arc_borrow().use_reference(); /// # Ok::<(), Error>(()) /// ``` +#[repr(transparent)] +#[cfg_attr(CONFIG_RUSTC_HAS_COERCE_POINTEE, derive(core::marker::CoercePointee))] pub struct ArcBorrow<'a, T: ?Sized + 'a> { inner: NonNull<ArcInner<T>>, _p: PhantomData<&'a ()>, } -// This is to allow [`ArcBorrow`] (and variants) to be used as the type of `self`. -impl<T: ?Sized> core::ops::Receiver for ArcBorrow<'_, T> {} - // This is to allow `ArcBorrow<U>` to be dispatched on when `ArcBorrow<T>` can be coerced into // `ArcBorrow<U>`. -impl<T: ?Sized + Unsize<U>, U: ?Sized> core::ops::DispatchFromDyn<ArcBorrow<'_, U>> +#[cfg(not(CONFIG_RUSTC_HAS_COERCE_POINTEE))] +impl<T: ?Sized + core::marker::Unsize<U>, U: ?Sized> core::ops::DispatchFromDyn<ArcBorrow<'_, U>> for ArcBorrow<'_, T> { } @@ -453,6 +566,27 @@ impl<T: ?Sized> ArcBorrow<'_, T> { _p: PhantomData, } } + + /// Creates an [`ArcBorrow`] to an [`Arc`] that has previously been deconstructed with + /// [`Arc::into_raw`] or [`Arc::as_ptr`]. + /// + /// # Safety + /// + /// * The provided pointer must originate from a call to [`Arc::into_raw`] or [`Arc::as_ptr`]. + /// * For the duration of the lifetime annotated on this `ArcBorrow`, the reference count must + /// not hit zero. + /// * For the duration of the lifetime annotated on this `ArcBorrow`, there must not be a + /// [`UniqueArc`] reference to this value. + pub unsafe fn from_raw(ptr: *const T) -> Self { + // SAFETY: The caller promises that this pointer originates from a call to `into_raw` on an + // `Arc` that is still valid. + let ptr = unsafe { ArcInner::container_of(ptr) }; + + // SAFETY: The caller promises that the value remains valid since the reference count must + // not hit zero, and no mutable reference will be created since that would involve a + // `UniqueArc`. + unsafe { Self::new(ptr) } + } } impl<T: ?Sized> From<ArcBorrow<'_, T>> for Arc<T> { @@ -499,7 +633,7 @@ impl<T: ?Sized> Deref for ArcBorrow<'_, T> { /// } /// /// fn test() -> Result<Arc<Example>> { -/// let mut x = UniqueArc::try_new(Example { a: 10, b: 20 })?; +/// let mut x = UniqueArc::new(Example { a: 10, b: 20 }, GFP_KERNEL)?; /// x.a += 1; /// x.b += 1; /// Ok(x.into()) @@ -522,7 +656,7 @@ impl<T: ?Sized> Deref for ArcBorrow<'_, T> { /// } /// /// fn test() -> Result<Arc<Example>> { -/// let x = UniqueArc::try_new_uninit()?; +/// let x = UniqueArc::new_uninit(GFP_KERNEL)?; /// Ok(x.write(Example { a: 10, b: 20 }).into()) /// } /// @@ -542,7 +676,7 @@ impl<T: ?Sized> Deref for ArcBorrow<'_, T> { /// } /// /// fn test() -> Result<Arc<Example>> { -/// let mut pinned = Pin::from(UniqueArc::try_new(Example { a: 10, b: 20 })?); +/// let mut pinned = Pin::from(UniqueArc::new(Example { a: 10, b: 20 }, GFP_KERNEL)?); /// // We can modify `pinned` because it is `Unpin`. /// pinned.as_mut().a += 1; /// Ok(pinned.into()) @@ -554,27 +688,72 @@ pub struct UniqueArc<T: ?Sized> { inner: Arc<T>, } +impl<T> InPlaceInit<T> for UniqueArc<T> { + type PinnedSelf = Pin<Self>; + + #[inline] + fn try_pin_init<E>(init: impl PinInit<T, E>, flags: Flags) -> Result<Self::PinnedSelf, E> + where + E: From<AllocError>, + { + UniqueArc::new_uninit(flags)?.write_pin_init(init) + } + + #[inline] + fn try_init<E>(init: impl Init<T, E>, flags: Flags) -> Result<Self, E> + where + E: From<AllocError>, + { + UniqueArc::new_uninit(flags)?.write_init(init) + } +} + +impl<T> InPlaceWrite<T> for UniqueArc<MaybeUninit<T>> { + type Initialized = UniqueArc<T>; + + fn write_init<E>(mut self, init: impl Init<T, E>) -> Result<Self::Initialized, E> { + let slot = self.as_mut_ptr(); + // SAFETY: When init errors/panics, slot will get deallocated but not dropped, + // slot is valid. + unsafe { init.__init(slot)? }; + // SAFETY: All fields have been initialized. + Ok(unsafe { self.assume_init() }) + } + + fn write_pin_init<E>(mut self, init: impl PinInit<T, E>) -> Result<Pin<Self::Initialized>, E> { + let slot = self.as_mut_ptr(); + // SAFETY: When init errors/panics, slot will get deallocated but not dropped, + // slot is valid and will not be moved, because we pin it later. + unsafe { init.__pinned_init(slot)? }; + // SAFETY: All fields have been initialized. + Ok(unsafe { self.assume_init() }.into()) + } +} + impl<T> UniqueArc<T> { /// Tries to allocate a new [`UniqueArc`] instance. - pub fn try_new(value: T) -> Result<Self, AllocError> { + pub fn new(value: T, flags: Flags) -> Result<Self, AllocError> { Ok(Self { // INVARIANT: The newly-created object has a refcount of 1. - inner: Arc::try_new(value)?, + inner: Arc::new(value, flags)?, }) } /// Tries to allocate a new [`UniqueArc`] instance whose contents are not initialised yet. - pub fn try_new_uninit() -> Result<UniqueArc<MaybeUninit<T>>, AllocError> { + pub fn new_uninit(flags: Flags) -> Result<UniqueArc<MaybeUninit<T>>, AllocError> { // INVARIANT: The refcount is initialised to a non-zero value. - let inner = Box::try_init::<AllocError>(try_init!(ArcInner { - // SAFETY: There are no safety requirements for this FFI call. - refcount: Opaque::new(unsafe { bindings::REFCOUNT_INIT(1) }), - data <- init::uninit::<T, AllocError>(), - }? AllocError))?; + let inner = KBox::try_init::<AllocError>( + try_init!(ArcInner { + // SAFETY: There are no safety requirements for this FFI call. + refcount: Opaque::new(unsafe { bindings::REFCOUNT_INIT(1) }), + data <- pin_init::uninit::<T, AllocError>(), + }? AllocError), + flags, + )?; Ok(UniqueArc { // INVARIANT: The newly-created object has a refcount of 1. - // SAFETY: The pointer from the `Box` is valid. - inner: unsafe { Arc::from_inner(Box::leak(inner).into()) }, + // SAFETY: The pointer from the `KBox` is valid. + inner: unsafe { Arc::from_inner(KBox::leak(inner).into()) }, }) } } diff --git a/rust/kernel/sync/arc/std_vendor.rs b/rust/kernel/sync/arc/std_vendor.rs index a66a0c2831b3..11b3f4ecca5f 100644 --- a/rust/kernel/sync/arc/std_vendor.rs +++ b/rust/kernel/sync/arc/std_vendor.rs @@ -1,5 +1,7 @@ // SPDX-License-Identifier: Apache-2.0 OR MIT +//! Rust standard library vendored code. +//! //! The contents of this file come from the Rust standard library, hosted in //! the <https://github.com/rust-lang/rust> repository, licensed under //! "Apache-2.0 OR MIT" and adapted for kernel use. For copyright details, diff --git a/rust/kernel/sync/condvar.rs b/rust/kernel/sync/condvar.rs index 0c3671caffeb..caebf03f553b 100644 --- a/rust/kernel/sync/condvar.rs +++ b/rust/kernel/sync/condvar.rs @@ -7,18 +7,16 @@ use super::{lock::Backend, lock::Guard, LockClassKey}; use crate::{ - bindings, - init::PinInit, - pin_init, + ffi::{c_int, c_long}, str::CStr, - task::{MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE, TASK_NORMAL, TASK_UNINTERRUPTIBLE}, + task::{ + MAX_SCHEDULE_TIMEOUT, TASK_FREEZABLE, TASK_INTERRUPTIBLE, TASK_NORMAL, TASK_UNINTERRUPTIBLE, + }, time::Jiffies, types::Opaque, }; -use core::ffi::{c_int, c_long}; -use core::marker::PhantomPinned; -use core::ptr; -use macros::pin_data; +use core::{marker::PhantomPinned, pin::Pin, ptr}; +use pin_init::{pin_data, pin_init, PinInit}; /// Creates a [`CondVar`] initialiser with the given name and a newly-created lock class. #[macro_export] @@ -38,7 +36,7 @@ pub use new_condvar; /// spuriously. /// /// Instances of [`CondVar`] need a lock class and to be pinned. The recommended way to create such -/// instances is with the [`pin_init`](crate::pin_init) and [`new_condvar`] macros. +/// instances is with the [`pin_init`](crate::pin_init!) and [`new_condvar`] macros. /// /// # Examples /// @@ -71,11 +69,11 @@ pub use new_condvar; /// } /// /// /// Allocates a new boxed `Example`. -/// fn new_example() -> Result<Pin<Box<Example>>> { -/// Box::pin_init(pin_init!(Example { +/// fn new_example() -> Result<Pin<KBox<Example>>> { +/// KBox::pin_init(pin_init!(Example { /// value <- new_mutex!(0), /// value_changed <- new_condvar!(), -/// })) +/// }), GFP_KERNEL) /// } /// ``` /// @@ -94,7 +92,6 @@ pub struct CondVar { } // SAFETY: `CondVar` only uses a `struct wait_queue_head`, which is safe to use on any thread. -#[allow(clippy::non_send_fields_in_send_ty)] unsafe impl Send for CondVar {} // SAFETY: `CondVar` only uses a `struct wait_queue_head`, which is safe to use on multiple threads @@ -103,7 +100,7 @@ unsafe impl Sync for CondVar {} impl CondVar { /// Constructs a new condvar initialiser. - pub fn new(name: &'static CStr, key: &'static LockClassKey) -> impl PinInit<Self> { + pub fn new(name: &'static CStr, key: Pin<&'static LockClassKey>) -> impl PinInit<Self> { pin_init!(Self { _pin: PhantomPinned, // SAFETY: `slot` is valid while the closure is called and both `name` and `key` have @@ -161,6 +158,25 @@ impl CondVar { crate::current!().signal_pending() } + /// Releases the lock and waits for a notification in interruptible and freezable mode. + /// + /// The process is allowed to be frozen during this sleep. No lock should be held when calling + /// this function, and there is a lockdep assertion for this. Freezing a task that holds a lock + /// can trivially deadlock vs another task that needs that lock to complete before it too can + /// hit freezable. + #[must_use = "wait_interruptible_freezable returns if a signal is pending, so the caller must check the return value"] + pub fn wait_interruptible_freezable<T: ?Sized, B: Backend>( + &self, + guard: &mut Guard<'_, T, B>, + ) -> bool { + self.wait_internal( + TASK_INTERRUPTIBLE | TASK_FREEZABLE, + guard, + MAX_SCHEDULE_TIMEOUT, + ); + crate::current!().signal_pending() + } + /// Releases the lock and waits for a notification in interruptible mode. /// /// Atomically releases the given lock (whose ownership is proven by the guard) and puts the diff --git a/rust/kernel/sync/lock.rs b/rust/kernel/sync/lock.rs index 5b5c8efe427a..e82fa5be289c 100644 --- a/rust/kernel/sync/lock.rs +++ b/rust/kernel/sync/lock.rs @@ -6,13 +6,19 @@ //! spinlocks, raw spinlocks) to be provided with minimal effort. use super::LockClassKey; -use crate::{bindings, init::PinInit, pin_init, str::CStr, types::Opaque, types::ScopeGuard}; -use core::{cell::UnsafeCell, marker::PhantomData, marker::PhantomPinned}; -use macros::pin_data; +use crate::{ + str::CStr, + types::{NotThreadSafe, Opaque, ScopeGuard}, +}; +use core::{cell::UnsafeCell, marker::PhantomPinned, pin::Pin}; +use pin_init::{pin_data, pin_init, PinInit}; pub mod mutex; pub mod spinlock; +pub(super) mod global; +pub use global::{GlobalGuard, GlobalLock, GlobalLockBackend, GlobalLockedBy}; + /// The "backend" of a lock. /// /// It is the actual implementation of the lock, without the need to repeat patterns used in all @@ -46,7 +52,7 @@ pub unsafe trait Backend { /// remain valid for read indefinitely. unsafe fn init( ptr: *mut Self::State, - name: *const core::ffi::c_char, + name: *const crate::ffi::c_char, key: *mut bindings::lock_class_key, ); @@ -58,6 +64,13 @@ pub unsafe trait Backend { #[must_use] unsafe fn lock(ptr: *mut Self::State) -> Self::GuardState; + /// Tries to acquire the lock. + /// + /// # Safety + /// + /// Callers must ensure that [`Backend::init`] has been previously called. + unsafe fn try_lock(ptr: *mut Self::State) -> Option<Self::GuardState>; + /// Releases the lock, giving up its ownership. /// /// # Safety @@ -75,12 +88,20 @@ pub unsafe trait Backend { // SAFETY: The safety requirements ensure that the lock is initialised. *guard_state = unsafe { Self::lock(ptr) }; } + + /// Asserts that the lock is held using lockdep. + /// + /// # Safety + /// + /// Callers must ensure that [`Backend::init`] has been previously called. + unsafe fn assert_is_held(ptr: *mut Self::State); } /// A mutual exclusion primitive. /// /// Exposes one of the kernel locking primitives. Which one is exposed depends on the lock /// [`Backend`] specified as the generic parameter `B`. +#[repr(C)] #[pin_data] pub struct Lock<T: ?Sized, B: Backend> { /// The kernel lock object. @@ -106,7 +127,7 @@ unsafe impl<T: ?Sized + Send, B: Backend> Sync for Lock<T, B> {} impl<T, B: Backend> Lock<T, B> { /// Constructs a new lock initialiser. - pub fn new(t: T, name: &'static CStr, key: &'static LockClassKey) -> impl PinInit<Self> { + pub fn new(t: T, name: &'static CStr, key: Pin<&'static LockClassKey>) -> impl PinInit<Self> { pin_init!(Self { data: UnsafeCell::new(t), _pin: PhantomPinned, @@ -119,6 +140,28 @@ impl<T, B: Backend> Lock<T, B> { } } +impl<B: Backend> Lock<(), B> { + /// Constructs a [`Lock`] from a raw pointer. + /// + /// This can be useful for interacting with a lock which was initialised outside of Rust. + /// + /// # Safety + /// + /// The caller promises that `ptr` points to a valid initialised instance of [`State`] during + /// the whole lifetime of `'a`. + /// + /// [`State`]: Backend::State + pub unsafe fn from_raw<'a>(ptr: *mut B::State) -> &'a Self { + // SAFETY: + // - By the safety contract `ptr` must point to a valid initialised instance of `B::State` + // - Since the lock data type is `()` which is a ZST, `state` is the only non-ZST member of + // the struct + // - Combined with `#[repr(C)]`, this guarantees `Self` has an equivalent data layout to + // `B::State`. + unsafe { &*ptr.cast() } + } +} + impl<T: ?Sized, B: Backend> Lock<T, B> { /// Acquires the lock and gives the caller access to the data protected by it. pub fn lock(&self) -> Guard<'_, T, B> { @@ -128,6 +171,15 @@ impl<T: ?Sized, B: Backend> Lock<T, B> { // SAFETY: The lock was just acquired. unsafe { Guard::new(self, state) } } + + /// Tries to acquire the lock. + /// + /// Returns a guard that can be used to access the data protected by the lock if successful. + pub fn try_lock(&self) -> Option<Guard<'_, T, B>> { + // SAFETY: The constructor of the type calls `init`, so the existence of the object proves + // that `init` was called. + unsafe { B::try_lock(self.state.get()).map(|state| Guard::new(self, state)) } + } } /// A lock guard. @@ -139,20 +191,50 @@ impl<T: ?Sized, B: Backend> Lock<T, B> { pub struct Guard<'a, T: ?Sized, B: Backend> { pub(crate) lock: &'a Lock<T, B>, pub(crate) state: B::GuardState, - _not_send: PhantomData<*mut ()>, + _not_send: NotThreadSafe, } // SAFETY: `Guard` is sync when the data protected by the lock is also sync. unsafe impl<T: Sync + ?Sized, B: Backend> Sync for Guard<'_, T, B> {} -impl<T: ?Sized, B: Backend> Guard<'_, T, B> { +impl<'a, T: ?Sized, B: Backend> Guard<'a, T, B> { + /// Returns the lock that this guard originates from. + /// + /// # Examples + /// + /// The following example shows how to use [`Guard::lock_ref()`] to assert the corresponding + /// lock is held. + /// + /// ``` + /// # use kernel::{new_spinlock, sync::lock::{Backend, Guard, Lock}}; + /// # use pin_init::stack_pin_init; + /// + /// fn assert_held<T, B: Backend>(guard: &Guard<'_, T, B>, lock: &Lock<T, B>) { + /// // Address-equal means the same lock. + /// assert!(core::ptr::eq(guard.lock_ref(), lock)); + /// } + /// + /// // Creates a new lock on the stack. + /// stack_pin_init!{ + /// let l = new_spinlock!(42) + /// } + /// + /// let g = l.lock(); + /// + /// // `g` originates from `l`. + /// assert_held(&g, &l); + /// ``` + pub fn lock_ref(&self) -> &'a Lock<T, B> { + self.lock + } + pub(crate) fn do_unlocked<U>(&mut self, cb: impl FnOnce() -> U) -> U { // SAFETY: The caller owns the lock, so it is safe to unlock it. unsafe { B::unlock(self.lock.state.get(), &self.state) }; - // SAFETY: The lock was just unlocked above and is being relocked now. - let _relock = - ScopeGuard::new(|| unsafe { B::relock(self.lock.state.get(), &mut self.state) }); + let _relock = ScopeGuard::new(|| + // SAFETY: The lock was just unlocked above and is being relocked now. + unsafe { B::relock(self.lock.state.get(), &mut self.state) }); cb() } @@ -187,11 +269,14 @@ impl<'a, T: ?Sized, B: Backend> Guard<'a, T, B> { /// # Safety /// /// The caller must ensure that it owns the lock. - pub(crate) unsafe fn new(lock: &'a Lock<T, B>, state: B::GuardState) -> Self { + pub unsafe fn new(lock: &'a Lock<T, B>, state: B::GuardState) -> Self { + // SAFETY: The caller can only hold the lock if `Backend::init` has already been called. + unsafe { B::assert_is_held(lock.state.get()) }; + Self { lock, state, - _not_send: PhantomData, + _not_send: NotThreadSafe, } } } diff --git a/rust/kernel/sync/lock/global.rs b/rust/kernel/sync/lock/global.rs new file mode 100644 index 000000000000..d65f94b5caf2 --- /dev/null +++ b/rust/kernel/sync/lock/global.rs @@ -0,0 +1,302 @@ +// SPDX-License-Identifier: GPL-2.0 + +// Copyright (C) 2024 Google LLC. + +//! Support for defining statics containing locks. + +use crate::{ + str::CStr, + sync::lock::{Backend, Guard, Lock}, + sync::{LockClassKey, LockedBy}, + types::Opaque, +}; +use core::{ + cell::UnsafeCell, + marker::{PhantomData, PhantomPinned}, + pin::Pin, +}; + +/// Trait implemented for marker types for global locks. +/// +/// See [`global_lock!`] for examples. +pub trait GlobalLockBackend { + /// The name for this global lock. + const NAME: &'static CStr; + /// Item type stored in this global lock. + type Item: 'static; + /// The backend used for this global lock. + type Backend: Backend + 'static; + /// The class for this global lock. + fn get_lock_class() -> Pin<&'static LockClassKey>; +} + +/// Type used for global locks. +/// +/// See [`global_lock!`] for examples. +pub struct GlobalLock<B: GlobalLockBackend> { + inner: Lock<B::Item, B::Backend>, +} + +impl<B: GlobalLockBackend> GlobalLock<B> { + /// Creates a global lock. + /// + /// # Safety + /// + /// * Before any other method on this lock is called, [`Self::init`] must be called. + /// * The type `B` must not be used with any other lock. + pub const unsafe fn new(data: B::Item) -> Self { + Self { + inner: Lock { + state: Opaque::uninit(), + data: UnsafeCell::new(data), + _pin: PhantomPinned, + }, + } + } + + /// Initializes a global lock. + /// + /// # Safety + /// + /// Must not be called more than once on a given lock. + pub unsafe fn init(&'static self) { + // SAFETY: The pointer to `state` is valid for the duration of this call, and both `name` + // and `key` are valid indefinitely. The `state` is pinned since we have a `'static` + // reference to `self`. + // + // We have exclusive access to the `state` since the caller of `new` promised to call + // `init` before using any other methods. As `init` can only be called once, all other + // uses of this lock must happen after this call. + unsafe { + B::Backend::init( + self.inner.state.get(), + B::NAME.as_char_ptr(), + B::get_lock_class().as_ptr(), + ) + } + } + + /// Lock this global lock. + pub fn lock(&'static self) -> GlobalGuard<B> { + GlobalGuard { + inner: self.inner.lock(), + } + } + + /// Try to lock this global lock. + pub fn try_lock(&'static self) -> Option<GlobalGuard<B>> { + Some(GlobalGuard { + inner: self.inner.try_lock()?, + }) + } +} + +/// A guard for a [`GlobalLock`]. +/// +/// See [`global_lock!`] for examples. +pub struct GlobalGuard<B: GlobalLockBackend> { + inner: Guard<'static, B::Item, B::Backend>, +} + +impl<B: GlobalLockBackend> core::ops::Deref for GlobalGuard<B> { + type Target = B::Item; + + fn deref(&self) -> &Self::Target { + &self.inner + } +} + +impl<B: GlobalLockBackend> core::ops::DerefMut for GlobalGuard<B> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} + +/// A version of [`LockedBy`] for a [`GlobalLock`]. +/// +/// See [`global_lock!`] for examples. +pub struct GlobalLockedBy<T: ?Sized, B: GlobalLockBackend> { + _backend: PhantomData<B>, + value: UnsafeCell<T>, +} + +// SAFETY: The same thread-safety rules as `LockedBy` apply to `GlobalLockedBy`. +unsafe impl<T, B> Send for GlobalLockedBy<T, B> +where + T: ?Sized, + B: GlobalLockBackend, + LockedBy<T, B::Item>: Send, +{ +} + +// SAFETY: The same thread-safety rules as `LockedBy` apply to `GlobalLockedBy`. +unsafe impl<T, B> Sync for GlobalLockedBy<T, B> +where + T: ?Sized, + B: GlobalLockBackend, + LockedBy<T, B::Item>: Sync, +{ +} + +impl<T, B: GlobalLockBackend> GlobalLockedBy<T, B> { + /// Create a new [`GlobalLockedBy`]. + /// + /// The provided value will be protected by the global lock indicated by `B`. + pub fn new(val: T) -> Self { + Self { + value: UnsafeCell::new(val), + _backend: PhantomData, + } + } +} + +impl<T: ?Sized, B: GlobalLockBackend> GlobalLockedBy<T, B> { + /// Access the value immutably. + /// + /// The caller must prove shared access to the lock. + pub fn as_ref<'a>(&'a self, _guard: &'a GlobalGuard<B>) -> &'a T { + // SAFETY: The lock is globally unique, so there can only be one guard. + unsafe { &*self.value.get() } + } + + /// Access the value mutably. + /// + /// The caller must prove shared exclusive to the lock. + pub fn as_mut<'a>(&'a self, _guard: &'a mut GlobalGuard<B>) -> &'a mut T { + // SAFETY: The lock is globally unique, so there can only be one guard. + unsafe { &mut *self.value.get() } + } + + /// Access the value mutably directly. + /// + /// The caller has exclusive access to this `GlobalLockedBy`, so they do not need to hold the + /// lock. + pub fn get_mut(&mut self) -> &mut T { + self.value.get_mut() + } +} + +/// Defines a global lock. +/// +/// The global mutex must be initialized before first use. Usually this is done by calling +/// [`GlobalLock::init`] in the module initializer. +/// +/// # Examples +/// +/// A global counter: +/// +/// ``` +/// # mod ex { +/// # use kernel::prelude::*; +/// kernel::sync::global_lock! { +/// // SAFETY: Initialized in module initializer before first use. +/// unsafe(uninit) static MY_COUNTER: Mutex<u32> = 0; +/// } +/// +/// fn increment_counter() -> u32 { +/// let mut guard = MY_COUNTER.lock(); +/// *guard += 1; +/// *guard +/// } +/// +/// impl kernel::Module for MyModule { +/// fn init(_module: &'static ThisModule) -> Result<Self> { +/// // SAFETY: Called exactly once. +/// unsafe { MY_COUNTER.init() }; +/// +/// Ok(MyModule {}) +/// } +/// } +/// # struct MyModule {} +/// # } +/// ``` +/// +/// A global mutex used to protect all instances of a given struct: +/// +/// ``` +/// # mod ex { +/// # use kernel::prelude::*; +/// use kernel::sync::{GlobalGuard, GlobalLockedBy}; +/// +/// kernel::sync::global_lock! { +/// // SAFETY: Initialized in module initializer before first use. +/// unsafe(uninit) static MY_MUTEX: Mutex<()> = (); +/// } +/// +/// /// All instances of this struct are protected by `MY_MUTEX`. +/// struct MyStruct { +/// my_counter: GlobalLockedBy<u32, MY_MUTEX>, +/// } +/// +/// impl MyStruct { +/// /// Increment the counter in this instance. +/// /// +/// /// The caller must hold the `MY_MUTEX` mutex. +/// fn increment(&self, guard: &mut GlobalGuard<MY_MUTEX>) -> u32 { +/// let my_counter = self.my_counter.as_mut(guard); +/// *my_counter += 1; +/// *my_counter +/// } +/// } +/// +/// impl kernel::Module for MyModule { +/// fn init(_module: &'static ThisModule) -> Result<Self> { +/// // SAFETY: Called exactly once. +/// unsafe { MY_MUTEX.init() }; +/// +/// Ok(MyModule {}) +/// } +/// } +/// # struct MyModule {} +/// # } +/// ``` +#[macro_export] +macro_rules! global_lock { + { + $(#[$meta:meta])* $pub:vis + unsafe(uninit) static $name:ident: $kind:ident<$valuety:ty> = $value:expr; + } => { + #[doc = ::core::concat!( + "Backend type used by [`", + ::core::stringify!($name), + "`](static@", + ::core::stringify!($name), + ")." + )] + #[allow(non_camel_case_types, unreachable_pub)] + $pub enum $name {} + + impl $crate::sync::lock::GlobalLockBackend for $name { + const NAME: &'static $crate::str::CStr = $crate::c_str!(::core::stringify!($name)); + type Item = $valuety; + type Backend = $crate::global_lock_inner!(backend $kind); + + fn get_lock_class() -> Pin<&'static $crate::sync::LockClassKey> { + $crate::static_lock_class!() + } + } + + $(#[$meta])* + $pub static $name: $crate::sync::lock::GlobalLock<$name> = { + // Defined here to be outside the unsafe scope. + let init: $valuety = $value; + + // SAFETY: + // * The user of this macro promises to initialize the macro before use. + // * We are only generating one static with this backend type. + unsafe { $crate::sync::lock::GlobalLock::new(init) } + }; + }; +} +pub use global_lock; + +#[doc(hidden)] +#[macro_export] +macro_rules! global_lock_inner { + (backend Mutex) => { + $crate::sync::lock::mutex::MutexBackend + }; + (backend SpinLock) => { + $crate::sync::lock::spinlock::SpinLockBackend + }; +} diff --git a/rust/kernel/sync/lock/mutex.rs b/rust/kernel/sync/lock/mutex.rs index ef4c4634d294..581cee7ab842 100644 --- a/rust/kernel/sync/lock/mutex.rs +++ b/rust/kernel/sync/lock/mutex.rs @@ -4,8 +4,6 @@ //! //! This module allows Rust code to use the kernel's `struct mutex`. -use crate::bindings; - /// Creates a [`Mutex`] initialiser with the given name and a newly-created lock class. /// /// It uses the name if one is given, otherwise it generates one based on the file name and line @@ -28,7 +26,7 @@ pub use new_mutex; /// Since it may block, [`Mutex`] needs to be used with care in atomic contexts. /// /// Instances of [`Mutex`] need a lock class and to be pinned. The recommended way to create such -/// instances is with the [`pin_init`](crate::pin_init) and [`new_mutex`] macros. +/// instances is with the [`pin_init`](pin_init::pin_init) and [`new_mutex`] macros. /// /// # Examples /// @@ -60,7 +58,7 @@ pub use new_mutex; /// } /// /// // Allocate a boxed `Example`. -/// let e = Box::pin_init(Example::new())?; +/// let e = KBox::pin_init(Example::new(), GFP_KERNEL)?; /// assert_eq!(e.c, 10); /// assert_eq!(e.d.lock().a, 20); /// assert_eq!(e.d.lock().b, 30); @@ -88,6 +86,14 @@ pub use new_mutex; /// [`struct mutex`]: srctree/include/linux/mutex.h pub type Mutex<T> = super::Lock<T, MutexBackend>; +/// A [`Guard`] acquired from locking a [`Mutex`]. +/// +/// This is simply a type alias for a [`Guard`] returned from locking a [`Mutex`]. It will unlock +/// the [`Mutex`] upon being dropped. +/// +/// [`Guard`]: super::Guard +pub type MutexGuard<'a, T> = super::Guard<'a, T, MutexBackend>; + /// A kernel `struct mutex` lock backend. pub struct MutexBackend; @@ -98,7 +104,7 @@ unsafe impl super::Backend for MutexBackend { unsafe fn init( ptr: *mut Self::State, - name: *const core::ffi::c_char, + name: *const crate::ffi::c_char, key: *mut bindings::lock_class_key, ) { // SAFETY: The safety requirements ensure that `ptr` is valid for writes, and `name` and @@ -117,4 +123,20 @@ unsafe impl super::Backend for MutexBackend { // caller is the owner of the mutex. unsafe { bindings::mutex_unlock(ptr) }; } + + unsafe fn try_lock(ptr: *mut Self::State) -> Option<Self::GuardState> { + // SAFETY: The `ptr` pointer is guaranteed to be valid and initialized before use. + let result = unsafe { bindings::mutex_trylock(ptr) }; + + if result != 0 { + Some(()) + } else { + None + } + } + + unsafe fn assert_is_held(ptr: *mut Self::State) { + // SAFETY: The `ptr` pointer is guaranteed to be valid and initialized before use. + unsafe { bindings::mutex_assert_is_held(ptr) } + } } diff --git a/rust/kernel/sync/lock/spinlock.rs b/rust/kernel/sync/lock/spinlock.rs index 0b22c635634f..d7be38ccbdc7 100644 --- a/rust/kernel/sync/lock/spinlock.rs +++ b/rust/kernel/sync/lock/spinlock.rs @@ -4,8 +4,6 @@ //! //! This module allows Rust code to use the kernel's `spinlock_t`. -use crate::bindings; - /// Creates a [`SpinLock`] initialiser with the given name and a newly-created lock class. /// /// It uses the name if one is given, otherwise it generates one based on the file name and line @@ -26,7 +24,7 @@ pub use new_spinlock; /// unlocked, at which point another CPU will be allowed to make progress. /// /// Instances of [`SpinLock`] need a lock class and to be pinned. The recommended way to create such -/// instances is with the [`pin_init`](crate::pin_init) and [`new_spinlock`] macros. +/// instances is with the [`pin_init`](pin_init::pin_init) and [`new_spinlock`] macros. /// /// # Examples /// @@ -58,7 +56,7 @@ pub use new_spinlock; /// } /// /// // Allocate a boxed `Example`. -/// let e = Box::pin_init(Example::new())?; +/// let e = KBox::pin_init(Example::new(), GFP_KERNEL)?; /// assert_eq!(e.c, 10); /// assert_eq!(e.d.lock().a, 20); /// assert_eq!(e.d.lock().b, 30); @@ -89,6 +87,14 @@ pub type SpinLock<T> = super::Lock<T, SpinLockBackend>; /// A kernel `spinlock_t` lock backend. pub struct SpinLockBackend; +/// A [`Guard`] acquired from locking a [`SpinLock`]. +/// +/// This is simply a type alias for a [`Guard`] returned from locking a [`SpinLock`]. It will unlock +/// the [`SpinLock`] upon being dropped. +/// +/// [`Guard`]: super::Guard +pub type SpinLockGuard<'a, T> = super::Guard<'a, T, SpinLockBackend>; + // SAFETY: The underlying kernel `spinlock_t` object ensures mutual exclusion. `relock` uses the // default implementation that always calls the same locking method. unsafe impl super::Backend for SpinLockBackend { @@ -97,7 +103,7 @@ unsafe impl super::Backend for SpinLockBackend { unsafe fn init( ptr: *mut Self::State, - name: *const core::ffi::c_char, + name: *const crate::ffi::c_char, key: *mut bindings::lock_class_key, ) { // SAFETY: The safety requirements ensure that `ptr` is valid for writes, and `name` and @@ -116,4 +122,20 @@ unsafe impl super::Backend for SpinLockBackend { // caller is the owner of the spinlock. unsafe { bindings::spin_unlock(ptr) } } + + unsafe fn try_lock(ptr: *mut Self::State) -> Option<Self::GuardState> { + // SAFETY: The `ptr` pointer is guaranteed to be valid and initialized before use. + let result = unsafe { bindings::spin_trylock(ptr) }; + + if result != 0 { + Some(()) + } else { + None + } + } + + unsafe fn assert_is_held(ptr: *mut Self::State) { + // SAFETY: The `ptr` pointer is guaranteed to be valid and initialized before use. + unsafe { bindings::spin_assert_is_held(ptr) } + } } diff --git a/rust/kernel/sync/locked_by.rs b/rust/kernel/sync/locked_by.rs index babc731bd5f6..61f100a45b35 100644 --- a/rust/kernel/sync/locked_by.rs +++ b/rust/kernel/sync/locked_by.rs @@ -43,7 +43,7 @@ use core::{cell::UnsafeCell, mem::size_of, ptr}; /// struct InnerDirectory { /// /// The sum of the bytes used by all files. /// bytes_used: u64, -/// _files: Vec<File>, +/// _files: KVec<File>, /// } /// /// struct Directory { @@ -55,7 +55,7 @@ use core::{cell::UnsafeCell, mem::size_of, ptr}; /// fn print_bytes_used(dir: &Directory, file: &File) { /// let guard = dir.inner.lock(); /// let inner_file = file.inner.access(&guard); -/// pr_info!("{} {}", guard.bytes_used, inner_file.bytes_used); +/// pr_info!("{} {}\n", guard.bytes_used, inner_file.bytes_used); /// } /// /// /// Increments `bytes_used` for both the directory and file. @@ -83,8 +83,12 @@ pub struct LockedBy<T: ?Sized, U: ?Sized> { // SAFETY: `LockedBy` can be transferred across thread boundaries iff the data it protects can. unsafe impl<T: ?Sized + Send, U: ?Sized> Send for LockedBy<T, U> {} -// SAFETY: `LockedBy` serialises the interior mutability it provides, so it is `Sync` as long as the -// data it protects is `Send`. +// SAFETY: If `T` is not `Sync`, then parallel shared access to this `LockedBy` allows you to use +// `access_mut` to hand out `&mut T` on one thread at the time. The requirement that `T: Send` is +// sufficient to allow that. +// +// If `T` is `Sync`, then the `access` method also becomes available, which allows you to obtain +// several `&T` from several threads at once. However, this is okay as `T` is `Sync`. unsafe impl<T: ?Sized + Send, U: ?Sized> Sync for LockedBy<T, U> {} impl<T, U> LockedBy<T, U> { @@ -118,7 +122,10 @@ impl<T: ?Sized, U> LockedBy<T, U> { /// /// Panics if `owner` is different from the data protected by the lock used in /// [`new`](LockedBy::new). - pub fn access<'a>(&'a self, owner: &'a U) -> &'a T { + pub fn access<'a>(&'a self, owner: &'a U) -> &'a T + where + T: Sync, + { build_assert!( size_of::<U>() > 0, "`U` cannot be a ZST because `owner` wouldn't be unique" @@ -127,7 +134,10 @@ impl<T: ?Sized, U> LockedBy<T, U> { panic!("mismatched owners"); } - // SAFETY: `owner` is evidence that the owner is locked. + // SAFETY: `owner` is evidence that there are only shared references to the owner for the + // duration of 'a, so it's not possible to use `Self::access_mut` to obtain a mutable + // reference to the inner value that aliases with this shared reference. The type is `Sync` + // so there are no other requirements. unsafe { &*self.data.get() } } diff --git a/rust/kernel/sync/poll.rs b/rust/kernel/sync/poll.rs new file mode 100644 index 000000000000..d7e6e59e124b --- /dev/null +++ b/rust/kernel/sync/poll.rs @@ -0,0 +1,121 @@ +// SPDX-License-Identifier: GPL-2.0 + +// Copyright (C) 2024 Google LLC. + +//! Utilities for working with `struct poll_table`. + +use crate::{ + bindings, + fs::File, + prelude::*, + sync::{CondVar, LockClassKey}, + types::Opaque, +}; +use core::ops::Deref; + +/// Creates a [`PollCondVar`] initialiser with the given name and a newly-created lock class. +#[macro_export] +macro_rules! new_poll_condvar { + ($($name:literal)?) => { + $crate::sync::poll::PollCondVar::new( + $crate::optional_name!($($name)?), $crate::static_lock_class!() + ) + }; +} + +/// Wraps the kernel's `struct poll_table`. +/// +/// # Invariants +/// +/// This struct contains a valid `struct poll_table`. +/// +/// For a `struct poll_table` to be valid, its `_qproc` function must follow the safety +/// requirements of `_qproc` functions: +/// +/// * The `_qproc` function is given permission to enqueue a waiter to the provided `poll_table` +/// during the call. Once the waiter is removed and an rcu grace period has passed, it must no +/// longer access the `wait_queue_head`. +#[repr(transparent)] +pub struct PollTable(Opaque<bindings::poll_table>); + +impl PollTable { + /// Creates a reference to a [`PollTable`] from a valid pointer. + /// + /// # Safety + /// + /// The caller must ensure that for the duration of `'a`, the pointer will point at a valid poll + /// table (as defined in the type invariants). + /// + /// The caller must also ensure that the `poll_table` is only accessed via the returned + /// reference for the duration of `'a`. + pub unsafe fn from_ptr<'a>(ptr: *mut bindings::poll_table) -> &'a mut PollTable { + // SAFETY: The safety requirements guarantee the validity of the dereference, while the + // `PollTable` type being transparent makes the cast ok. + unsafe { &mut *ptr.cast() } + } + + fn get_qproc(&self) -> bindings::poll_queue_proc { + let ptr = self.0.get(); + // SAFETY: The `ptr` is valid because it originates from a reference, and the `_qproc` + // field is not modified concurrently with this call since we have an immutable reference. + unsafe { (*ptr)._qproc } + } + + /// Register this [`PollTable`] with the provided [`PollCondVar`], so that it can be notified + /// using the condition variable. + pub fn register_wait(&mut self, file: &File, cv: &PollCondVar) { + if let Some(qproc) = self.get_qproc() { + // SAFETY: The pointers to `file` and `self` need to be valid for the duration of this + // call to `qproc`, which they are because they are references. + // + // The `cv.wait_queue_head` pointer must be valid until an rcu grace period after the + // waiter is removed. The `PollCondVar` is pinned, so before `cv.wait_queue_head` can + // be destroyed, the destructor must run. That destructor first removes all waiters, + // and then waits for an rcu grace period. Therefore, `cv.wait_queue_head` is valid for + // long enough. + unsafe { qproc(file.as_ptr() as _, cv.wait_queue_head.get(), self.0.get()) }; + } + } +} + +/// A wrapper around [`CondVar`] that makes it usable with [`PollTable`]. +/// +/// [`CondVar`]: crate::sync::CondVar +#[pin_data(PinnedDrop)] +pub struct PollCondVar { + #[pin] + inner: CondVar, +} + +impl PollCondVar { + /// Constructs a new condvar initialiser. + pub fn new(name: &'static CStr, key: Pin<&'static LockClassKey>) -> impl PinInit<Self> { + pin_init!(Self { + inner <- CondVar::new(name, key), + }) + } +} + +// Make the `CondVar` methods callable on `PollCondVar`. +impl Deref for PollCondVar { + type Target = CondVar; + + fn deref(&self) -> &CondVar { + &self.inner + } +} + +#[pinned_drop] +impl PinnedDrop for PollCondVar { + fn drop(self: Pin<&mut Self>) { + // Clear anything registered using `register_wait`. + // + // SAFETY: The pointer points at a valid `wait_queue_head`. + unsafe { bindings::__wake_up_pollfree(self.inner.wait_queue_head.get()) }; + + // Wait for epoll items to be properly removed. + // + // SAFETY: Just an FFI call. + unsafe { bindings::synchronize_rcu() }; + } +} diff --git a/rust/kernel/sync/rcu.rs b/rust/kernel/sync/rcu.rs new file mode 100644 index 000000000000..a32bef6e490b --- /dev/null +++ b/rust/kernel/sync/rcu.rs @@ -0,0 +1,52 @@ +// SPDX-License-Identifier: GPL-2.0 + +//! RCU support. +//! +//! C header: [`include/linux/rcupdate.h`](srctree/include/linux/rcupdate.h) + +use crate::{bindings, types::NotThreadSafe}; + +/// Evidence that the RCU read side lock is held on the current thread/CPU. +/// +/// The type is explicitly not `Send` because this property is per-thread/CPU. +/// +/// # Invariants +/// +/// The RCU read side lock is actually held while instances of this guard exist. +pub struct Guard(NotThreadSafe); + +impl Guard { + /// Acquires the RCU read side lock and returns a guard. + #[inline] + pub fn new() -> Self { + // SAFETY: An FFI call with no additional requirements. + unsafe { bindings::rcu_read_lock() }; + // INVARIANT: The RCU read side lock was just acquired above. + Self(NotThreadSafe) + } + + /// Explicitly releases the RCU read side lock. + #[inline] + pub fn unlock(self) {} +} + +impl Default for Guard { + #[inline] + fn default() -> Self { + Self::new() + } +} + +impl Drop for Guard { + #[inline] + fn drop(&mut self) { + // SAFETY: By the type invariants, the RCU read side is locked, so it is ok to unlock it. + unsafe { bindings::rcu_read_unlock() }; + } +} + +/// Acquires the RCU read side lock. +#[inline] +pub fn read_lock() -> Guard { + Guard::new() +} |