summaryrefslogtreecommitdiff
path: root/rust/kernel/sync/lock.rs
diff options
context:
space:
mode:
Diffstat (limited to 'rust/kernel/sync/lock.rs')
-rw-r--r--rust/kernel/sync/lock.rs168
1 files changed, 147 insertions, 21 deletions
diff --git a/rust/kernel/sync/lock.rs b/rust/kernel/sync/lock.rs
index 70a785f04754..46a57d1fc309 100644
--- a/rust/kernel/sync/lock.rs
+++ b/rust/kernel/sync/lock.rs
@@ -6,13 +6,19 @@
//! spinlocks, raw spinlocks) to be provided with minimal effort.
use super::LockClassKey;
-use crate::{bindings, init::PinInit, pin_init, str::CStr, types::Opaque, types::ScopeGuard};
-use core::{cell::UnsafeCell, marker::PhantomData, marker::PhantomPinned};
-use macros::pin_data;
+use crate::{
+ str::{CStr, CStrExt as _},
+ types::{NotThreadSafe, Opaque, ScopeGuard},
+};
+use core::{cell::UnsafeCell, marker::PhantomPinned, pin::Pin};
+use pin_init::{pin_data, pin_init, PinInit, Wrapper};
pub mod mutex;
pub mod spinlock;
+pub(super) mod global;
+pub use global::{GlobalGuard, GlobalLock, GlobalLockBackend, GlobalLockedBy};
+
/// The "backend" of a lock.
///
/// It is the actual implementation of the lock, without the need to repeat patterns used in all
@@ -21,14 +27,21 @@ pub mod spinlock;
/// # Safety
///
/// - Implementers must ensure that only one thread/CPU may access the protected data once the lock
-/// is owned, that is, between calls to `lock` and `unlock`.
-/// - Implementers must also ensure that `relock` uses the same locking method as the original
-/// lock operation.
+/// is owned, that is, between calls to [`lock`] and [`unlock`].
+/// - Implementers must also ensure that [`relock`] uses the same locking method as the original
+/// lock operation.
+///
+/// [`lock`]: Backend::lock
+/// [`unlock`]: Backend::unlock
+/// [`relock`]: Backend::relock
pub unsafe trait Backend {
/// The state required by the lock.
type State;
- /// The state required to be kept between lock and unlock.
+ /// The state required to be kept between [`lock`] and [`unlock`].
+ ///
+ /// [`lock`]: Backend::lock
+ /// [`unlock`]: Backend::unlock
type GuardState;
/// Initialises the lock.
@@ -39,7 +52,7 @@ pub unsafe trait Backend {
/// remain valid for read indefinitely.
unsafe fn init(
ptr: *mut Self::State,
- name: *const core::ffi::c_char,
+ name: *const crate::ffi::c_char,
key: *mut bindings::lock_class_key,
);
@@ -51,6 +64,13 @@ pub unsafe trait Backend {
#[must_use]
unsafe fn lock(ptr: *mut Self::State) -> Self::GuardState;
+ /// Tries to acquire the lock.
+ ///
+ /// # Safety
+ ///
+ /// Callers must ensure that [`Backend::init`] has been previously called.
+ unsafe fn try_lock(ptr: *mut Self::State) -> Option<Self::GuardState>;
+
/// Releases the lock, giving up its ownership.
///
/// # Safety
@@ -68,12 +88,20 @@ pub unsafe trait Backend {
// SAFETY: The safety requirements ensure that the lock is initialised.
*guard_state = unsafe { Self::lock(ptr) };
}
+
+ /// Asserts that the lock is held using lockdep.
+ ///
+ /// # Safety
+ ///
+ /// Callers must ensure that [`Backend::init`] has been previously called.
+ unsafe fn assert_is_held(ptr: *mut Self::State);
}
/// A mutual exclusion primitive.
///
/// Exposes one of the kernel locking primitives. Which one is exposed depends on the lock
/// [`Backend`] specified as the generic parameter `B`.
+#[repr(C)]
#[pin_data]
pub struct Lock<T: ?Sized, B: Backend> {
/// The kernel lock object.
@@ -87,6 +115,7 @@ pub struct Lock<T: ?Sized, B: Backend> {
_pin: PhantomPinned,
/// The data protected by the lock.
+ #[pin]
pub(crate) data: UnsafeCell<T>,
}
@@ -99,10 +128,13 @@ unsafe impl<T: ?Sized + Send, B: Backend> Sync for Lock<T, B> {}
impl<T, B: Backend> Lock<T, B> {
/// Constructs a new lock initialiser.
- #[allow(clippy::new_ret_no_self)]
- pub fn new(t: T, name: &'static CStr, key: &'static LockClassKey) -> impl PinInit<Self> {
+ pub fn new(
+ t: impl PinInit<T>,
+ name: &'static CStr,
+ key: Pin<&'static LockClassKey>,
+ ) -> impl PinInit<Self> {
pin_init!(Self {
- data: UnsafeCell::new(t),
+ data <- UnsafeCell::pin_init(t),
_pin: PhantomPinned,
// SAFETY: `slot` is valid while the closure is called and both `name` and `key` have
// static lifetimes so they live indefinitely.
@@ -113,6 +145,28 @@ impl<T, B: Backend> Lock<T, B> {
}
}
+impl<B: Backend> Lock<(), B> {
+ /// Constructs a [`Lock`] from a raw pointer.
+ ///
+ /// This can be useful for interacting with a lock which was initialised outside of Rust.
+ ///
+ /// # Safety
+ ///
+ /// The caller promises that `ptr` points to a valid initialised instance of [`State`] during
+ /// the whole lifetime of `'a`.
+ ///
+ /// [`State`]: Backend::State
+ pub unsafe fn from_raw<'a>(ptr: *mut B::State) -> &'a Self {
+ // SAFETY:
+ // - By the safety contract `ptr` must point to a valid initialised instance of `B::State`
+ // - Since the lock data type is `()` which is a ZST, `state` is the only non-ZST member of
+ // the struct
+ // - Combined with `#[repr(C)]`, this guarantees `Self` has an equivalent data layout to
+ // `B::State`.
+ unsafe { &*ptr.cast() }
+ }
+}
+
impl<T: ?Sized, B: Backend> Lock<T, B> {
/// Acquires the lock and gives the caller access to the data protected by it.
pub fn lock(&self) -> Guard<'_, T, B> {
@@ -122,6 +176,17 @@ impl<T: ?Sized, B: Backend> Lock<T, B> {
// SAFETY: The lock was just acquired.
unsafe { Guard::new(self, state) }
}
+
+ /// Tries to acquire the lock.
+ ///
+ /// Returns a guard that can be used to access the data protected by the lock if successful.
+ // `Option<T>` is not `#[must_use]` even if `T` is, thus the attribute is needed here.
+ #[must_use = "if unused, the lock will be immediately unlocked"]
+ pub fn try_lock(&self) -> Option<Guard<'_, T, B>> {
+ // SAFETY: The constructor of the type calls `init`, so the existence of the object proves
+ // that `init` was called.
+ unsafe { B::try_lock(self.state.get()).map(|state| Guard::new(self, state)) }
+ }
}
/// A lock guard.
@@ -133,22 +198,77 @@ impl<T: ?Sized, B: Backend> Lock<T, B> {
pub struct Guard<'a, T: ?Sized, B: Backend> {
pub(crate) lock: &'a Lock<T, B>,
pub(crate) state: B::GuardState,
- _not_send: PhantomData<*mut ()>,
+ _not_send: NotThreadSafe,
}
// SAFETY: `Guard` is sync when the data protected by the lock is also sync.
unsafe impl<T: Sync + ?Sized, B: Backend> Sync for Guard<'_, T, B> {}
-impl<T: ?Sized, B: Backend> Guard<'_, T, B> {
- pub(crate) fn do_unlocked(&mut self, cb: impl FnOnce()) {
+impl<'a, T: ?Sized, B: Backend> Guard<'a, T, B> {
+ /// Returns the lock that this guard originates from.
+ ///
+ /// # Examples
+ ///
+ /// The following example shows how to use [`Guard::lock_ref()`] to assert the corresponding
+ /// lock is held.
+ ///
+ /// ```
+ /// # use kernel::{new_spinlock, sync::lock::{Backend, Guard, Lock}};
+ /// # use pin_init::stack_pin_init;
+ ///
+ /// fn assert_held<T, B: Backend>(guard: &Guard<'_, T, B>, lock: &Lock<T, B>) {
+ /// // Address-equal means the same lock.
+ /// assert!(core::ptr::eq(guard.lock_ref(), lock));
+ /// }
+ ///
+ /// // Creates a new lock on the stack.
+ /// stack_pin_init!{
+ /// let l = new_spinlock!(42)
+ /// }
+ ///
+ /// let g = l.lock();
+ ///
+ /// // `g` originates from `l`.
+ /// assert_held(&g, &l);
+ /// ```
+ pub fn lock_ref(&self) -> &'a Lock<T, B> {
+ self.lock
+ }
+
+ pub(crate) fn do_unlocked<U>(&mut self, cb: impl FnOnce() -> U) -> U {
// SAFETY: The caller owns the lock, so it is safe to unlock it.
unsafe { B::unlock(self.lock.state.get(), &self.state) };
- // SAFETY: The lock was just unlocked above and is being relocked now.
- let _relock =
- ScopeGuard::new(|| unsafe { B::relock(self.lock.state.get(), &mut self.state) });
+ let _relock = ScopeGuard::new(||
+ // SAFETY: The lock was just unlocked above and is being relocked now.
+ unsafe { B::relock(self.lock.state.get(), &mut self.state) });
- cb();
+ cb()
+ }
+
+ /// Returns a pinned mutable reference to the protected data.
+ ///
+ /// The guard implements [`DerefMut`] when `T: Unpin`, so for [`Unpin`]
+ /// types [`DerefMut`] should be used instead of this function.
+ ///
+ /// [`DerefMut`]: core::ops::DerefMut
+ /// [`Unpin`]: core::marker::Unpin
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # use kernel::sync::{Mutex, MutexGuard};
+ /// # use core::{pin::Pin, marker::PhantomPinned};
+ /// struct Data(PhantomPinned);
+ ///
+ /// fn example(mutex: &Mutex<Data>) {
+ /// let mut data: MutexGuard<'_, Data> = mutex.lock();
+ /// let mut data: Pin<&mut Data> = data.as_mut();
+ /// }
+ /// ```
+ pub fn as_mut(&mut self) -> Pin<&mut T> {
+ // SAFETY: `self.lock.data` is structurally pinned.
+ unsafe { Pin::new_unchecked(&mut *self.lock.data.get()) }
}
}
@@ -161,7 +281,10 @@ impl<T: ?Sized, B: Backend> core::ops::Deref for Guard<'_, T, B> {
}
}
-impl<T: ?Sized, B: Backend> core::ops::DerefMut for Guard<'_, T, B> {
+impl<T: ?Sized, B: Backend> core::ops::DerefMut for Guard<'_, T, B>
+where
+ T: Unpin,
+{
fn deref_mut(&mut self) -> &mut Self::Target {
// SAFETY: The caller owns the lock, so it is safe to deref the protected data.
unsafe { &mut *self.lock.data.get() }
@@ -181,11 +304,14 @@ impl<'a, T: ?Sized, B: Backend> Guard<'a, T, B> {
/// # Safety
///
/// The caller must ensure that it owns the lock.
- pub(crate) unsafe fn new(lock: &'a Lock<T, B>, state: B::GuardState) -> Self {
+ pub unsafe fn new(lock: &'a Lock<T, B>, state: B::GuardState) -> Self {
+ // SAFETY: The caller can only hold the lock if `Backend::init` has already been called.
+ unsafe { B::assert_is_held(lock.state.get()) };
+
Self {
lock,
state,
- _not_send: PhantomData,
+ _not_send: NotThreadSafe,
}
}
}