summaryrefslogtreecommitdiff
path: root/rust/kernel/sync
diff options
context:
space:
mode:
Diffstat (limited to 'rust/kernel/sync')
-rw-r--r--rust/kernel/sync/arc.rs102
-rw-r--r--rust/kernel/sync/aref.rs154
-rw-r--r--rust/kernel/sync/completion.rs112
-rw-r--r--rust/kernel/sync/condvar.rs3
-rw-r--r--rust/kernel/sync/lock.rs2
-rw-r--r--rust/kernel/sync/poll.rs69
6 files changed, 388 insertions, 54 deletions
diff --git a/rust/kernel/sync/arc.rs b/rust/kernel/sync/arc.rs
index c7af0aa48a0a..63a66761d0c7 100644
--- a/rust/kernel/sync/arc.rs
+++ b/rust/kernel/sync/arc.rs
@@ -19,12 +19,14 @@
use crate::{
alloc::{AllocError, Flags, KBox},
bindings,
+ ffi::c_void,
init::InPlaceInit,
try_init,
types::{ForeignOwnable, Opaque},
};
use core::{
alloc::Layout,
+ borrow::{Borrow, BorrowMut},
fmt,
marker::PhantomData,
mem::{ManuallyDrop, MaybeUninit},
@@ -140,10 +142,9 @@ pub struct Arc<T: ?Sized> {
_p: PhantomData<ArcInner<T>>,
}
-#[doc(hidden)]
#[pin_data]
#[repr(C)]
-pub struct ArcInner<T: ?Sized> {
+struct ArcInner<T: ?Sized> {
refcount: Opaque<bindings::refcount_t>,
data: T,
}
@@ -372,20 +373,22 @@ impl<T: ?Sized> Arc<T> {
}
}
-// SAFETY: The `into_foreign` function returns a pointer that is well-aligned.
+// SAFETY: The pointer returned by `into_foreign` comes from a well aligned
+// pointer to `ArcInner<T>`.
unsafe impl<T: 'static> ForeignOwnable for Arc<T> {
- type PointedTo = ArcInner<T>;
+ const FOREIGN_ALIGN: usize = core::mem::align_of::<ArcInner<T>>();
+
type Borrowed<'a> = ArcBorrow<'a, T>;
type BorrowedMut<'a> = Self::Borrowed<'a>;
- fn into_foreign(self) -> *mut Self::PointedTo {
- ManuallyDrop::new(self).ptr.as_ptr()
+ fn into_foreign(self) -> *mut c_void {
+ ManuallyDrop::new(self).ptr.as_ptr().cast()
}
- unsafe fn from_foreign(ptr: *mut Self::PointedTo) -> Self {
+ unsafe fn from_foreign(ptr: *mut c_void) -> Self {
// SAFETY: The safety requirements of this function ensure that `ptr` comes from a previous
// call to `Self::into_foreign`.
- let inner = unsafe { NonNull::new_unchecked(ptr) };
+ let inner = unsafe { NonNull::new_unchecked(ptr.cast::<ArcInner<T>>()) };
// SAFETY: By the safety requirement of this function, we know that `ptr` came from
// a previous call to `Arc::into_foreign`, which guarantees that `ptr` is valid and
@@ -393,20 +396,20 @@ unsafe impl<T: 'static> ForeignOwnable for Arc<T> {
unsafe { Self::from_inner(inner) }
}
- unsafe fn borrow<'a>(ptr: *mut Self::PointedTo) -> ArcBorrow<'a, T> {
+ unsafe fn borrow<'a>(ptr: *mut c_void) -> ArcBorrow<'a, T> {
// SAFETY: The safety requirements of this function ensure that `ptr` comes from a previous
// call to `Self::into_foreign`.
- let inner = unsafe { NonNull::new_unchecked(ptr) };
+ let inner = unsafe { NonNull::new_unchecked(ptr.cast::<ArcInner<T>>()) };
// SAFETY: The safety requirements of `from_foreign` ensure that the object remains alive
// for the lifetime of the returned value.
unsafe { ArcBorrow::new(inner) }
}
- unsafe fn borrow_mut<'a>(ptr: *mut Self::PointedTo) -> ArcBorrow<'a, T> {
+ unsafe fn borrow_mut<'a>(ptr: *mut c_void) -> ArcBorrow<'a, T> {
// SAFETY: The safety requirements for `borrow_mut` are a superset of the safety
// requirements for `borrow`.
- unsafe { Self::borrow(ptr) }
+ unsafe { <Self as ForeignOwnable>::borrow(ptr) }
}
}
@@ -426,6 +429,31 @@ impl<T: ?Sized> AsRef<T> for Arc<T> {
}
}
+/// # Examples
+///
+/// ```
+/// # use core::borrow::Borrow;
+/// # use kernel::sync::Arc;
+/// struct Foo<B: Borrow<u32>>(B);
+///
+/// // Owned instance.
+/// let owned = Foo(1);
+///
+/// // Shared instance.
+/// let arc = Arc::new(1, GFP_KERNEL)?;
+/// let shared = Foo(arc.clone());
+///
+/// let i = 1;
+/// // Borrowed from `i`.
+/// let borrowed = Foo(&i);
+/// # Ok::<(), Error>(())
+/// ```
+impl<T: ?Sized> Borrow<T> for Arc<T> {
+ fn borrow(&self) -> &T {
+ self.deref()
+ }
+}
+
impl<T: ?Sized> Clone for Arc<T> {
fn clone(&self) -> Self {
// SAFETY: By the type invariant, there is necessarily a reference to the object, so it is
@@ -834,6 +862,56 @@ impl<T: ?Sized> DerefMut for UniqueArc<T> {
}
}
+/// # Examples
+///
+/// ```
+/// # use core::borrow::Borrow;
+/// # use kernel::sync::UniqueArc;
+/// struct Foo<B: Borrow<u32>>(B);
+///
+/// // Owned instance.
+/// let owned = Foo(1);
+///
+/// // Owned instance using `UniqueArc`.
+/// let arc = UniqueArc::new(1, GFP_KERNEL)?;
+/// let shared = Foo(arc);
+///
+/// let i = 1;
+/// // Borrowed from `i`.
+/// let borrowed = Foo(&i);
+/// # Ok::<(), Error>(())
+/// ```
+impl<T: ?Sized> Borrow<T> for UniqueArc<T> {
+ fn borrow(&self) -> &T {
+ self.deref()
+ }
+}
+
+/// # Examples
+///
+/// ```
+/// # use core::borrow::BorrowMut;
+/// # use kernel::sync::UniqueArc;
+/// struct Foo<B: BorrowMut<u32>>(B);
+///
+/// // Owned instance.
+/// let owned = Foo(1);
+///
+/// // Owned instance using `UniqueArc`.
+/// let arc = UniqueArc::new(1, GFP_KERNEL)?;
+/// let shared = Foo(arc);
+///
+/// let mut i = 1;
+/// // Borrowed from `i`.
+/// let borrowed = Foo(&mut i);
+/// # Ok::<(), Error>(())
+/// ```
+impl<T: ?Sized> BorrowMut<T> for UniqueArc<T> {
+ fn borrow_mut(&mut self) -> &mut T {
+ self.deref_mut()
+ }
+}
+
impl<T: fmt::Display + ?Sized> fmt::Display for UniqueArc<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Display::fmt(self.deref(), f)
diff --git a/rust/kernel/sync/aref.rs b/rust/kernel/sync/aref.rs
new file mode 100644
index 000000000000..dbd77bb68617
--- /dev/null
+++ b/rust/kernel/sync/aref.rs
@@ -0,0 +1,154 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Internal reference counting support.
+
+use core::{marker::PhantomData, mem::ManuallyDrop, ops::Deref, ptr::NonNull};
+
+/// Types that are _always_ reference counted.
+///
+/// It allows such types to define their own custom ref increment and decrement functions.
+/// Additionally, it allows users to convert from a shared reference `&T` to an owned reference
+/// [`ARef<T>`].
+///
+/// This is usually implemented by wrappers to existing structures on the C side of the code. For
+/// Rust code, the recommendation is to use [`Arc`](crate::sync::Arc) to create reference-counted
+/// instances of a type.
+///
+/// # Safety
+///
+/// Implementers must ensure that increments to the reference count keep the object alive in memory
+/// at least until matching decrements are performed.
+///
+/// Implementers must also ensure that all instances are reference-counted. (Otherwise they
+/// won't be able to honour the requirement that [`AlwaysRefCounted::inc_ref`] keep the object
+/// alive.)
+pub unsafe trait AlwaysRefCounted {
+ /// Increments the reference count on the object.
+ fn inc_ref(&self);
+
+ /// Decrements the reference count on the object.
+ ///
+ /// Frees the object when the count reaches zero.
+ ///
+ /// # Safety
+ ///
+ /// Callers must ensure that there was a previous matching increment to the reference count,
+ /// and that the object is no longer used after its reference count is decremented (as it may
+ /// result in the object being freed), unless the caller owns another increment on the refcount
+ /// (e.g., it calls [`AlwaysRefCounted::inc_ref`] twice, then calls
+ /// [`AlwaysRefCounted::dec_ref`] once).
+ unsafe fn dec_ref(obj: NonNull<Self>);
+}
+
+/// An owned reference to an always-reference-counted object.
+///
+/// The object's reference count is automatically decremented when an instance of [`ARef`] is
+/// dropped. It is also automatically incremented when a new instance is created via
+/// [`ARef::clone`].
+///
+/// # Invariants
+///
+/// The pointer stored in `ptr` is non-null and valid for the lifetime of the [`ARef`] instance. In
+/// particular, the [`ARef`] instance owns an increment on the underlying object's reference count.
+pub struct ARef<T: AlwaysRefCounted> {
+ ptr: NonNull<T>,
+ _p: PhantomData<T>,
+}
+
+// SAFETY: It is safe to send `ARef<T>` to another thread when the underlying `T` is `Sync` because
+// it effectively means sharing `&T` (which is safe because `T` is `Sync`); additionally, it needs
+// `T` to be `Send` because any thread that has an `ARef<T>` may ultimately access `T` using a
+// mutable reference, for example, when the reference count reaches zero and `T` is dropped.
+unsafe impl<T: AlwaysRefCounted + Sync + Send> Send for ARef<T> {}
+
+// SAFETY: It is safe to send `&ARef<T>` to another thread when the underlying `T` is `Sync`
+// because it effectively means sharing `&T` (which is safe because `T` is `Sync`); additionally,
+// it needs `T` to be `Send` because any thread that has a `&ARef<T>` may clone it and get an
+// `ARef<T>` on that thread, so the thread may ultimately access `T` using a mutable reference, for
+// example, when the reference count reaches zero and `T` is dropped.
+unsafe impl<T: AlwaysRefCounted + Sync + Send> Sync for ARef<T> {}
+
+impl<T: AlwaysRefCounted> ARef<T> {
+ /// Creates a new instance of [`ARef`].
+ ///
+ /// It takes over an increment of the reference count on the underlying object.
+ ///
+ /// # Safety
+ ///
+ /// Callers must ensure that the reference count was incremented at least once, and that they
+ /// are properly relinquishing one increment. That is, if there is only one increment, callers
+ /// must not use the underlying object anymore -- it is only safe to do so via the newly
+ /// created [`ARef`].
+ pub unsafe fn from_raw(ptr: NonNull<T>) -> Self {
+ // INVARIANT: The safety requirements guarantee that the new instance now owns the
+ // increment on the refcount.
+ Self {
+ ptr,
+ _p: PhantomData,
+ }
+ }
+
+ /// Consumes the `ARef`, returning a raw pointer.
+ ///
+ /// This function does not change the refcount. After calling this function, the caller is
+ /// responsible for the refcount previously managed by the `ARef`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use core::ptr::NonNull;
+ /// use kernel::types::{ARef, AlwaysRefCounted};
+ ///
+ /// struct Empty {}
+ ///
+ /// # // SAFETY: TODO.
+ /// unsafe impl AlwaysRefCounted for Empty {
+ /// fn inc_ref(&self) {}
+ /// unsafe fn dec_ref(_obj: NonNull<Self>) {}
+ /// }
+ ///
+ /// let mut data = Empty {};
+ /// let ptr = NonNull::<Empty>::new(&mut data).unwrap();
+ /// # // SAFETY: TODO.
+ /// let data_ref: ARef<Empty> = unsafe { ARef::from_raw(ptr) };
+ /// let raw_ptr: NonNull<Empty> = ARef::into_raw(data_ref);
+ ///
+ /// assert_eq!(ptr, raw_ptr);
+ /// ```
+ pub fn into_raw(me: Self) -> NonNull<T> {
+ ManuallyDrop::new(me).ptr
+ }
+}
+
+impl<T: AlwaysRefCounted> Clone for ARef<T> {
+ fn clone(&self) -> Self {
+ self.inc_ref();
+ // SAFETY: We just incremented the refcount above.
+ unsafe { Self::from_raw(self.ptr) }
+ }
+}
+
+impl<T: AlwaysRefCounted> Deref for ARef<T> {
+ type Target = T;
+
+ fn deref(&self) -> &Self::Target {
+ // SAFETY: The type invariants guarantee that the object is valid.
+ unsafe { self.ptr.as_ref() }
+ }
+}
+
+impl<T: AlwaysRefCounted> From<&T> for ARef<T> {
+ fn from(b: &T) -> Self {
+ b.inc_ref();
+ // SAFETY: We just incremented the refcount above.
+ unsafe { Self::from_raw(NonNull::from(b)) }
+ }
+}
+
+impl<T: AlwaysRefCounted> Drop for ARef<T> {
+ fn drop(&mut self) {
+ // SAFETY: The type invariants guarantee that the `ARef` owns the reference we're about to
+ // decrement.
+ unsafe { T::dec_ref(self.ptr) };
+ }
+}
diff --git a/rust/kernel/sync/completion.rs b/rust/kernel/sync/completion.rs
new file mode 100644
index 000000000000..c50012a940a3
--- /dev/null
+++ b/rust/kernel/sync/completion.rs
@@ -0,0 +1,112 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Completion support.
+//!
+//! Reference: <https://docs.kernel.org/scheduler/completion.html>
+//!
+//! C header: [`include/linux/completion.h`](srctree/include/linux/completion.h)
+
+use crate::{bindings, prelude::*, types::Opaque};
+
+/// Synchronization primitive to signal when a certain task has been completed.
+///
+/// The [`Completion`] synchronization primitive signals when a certain task has been completed by
+/// waking up other tasks that have been queued up to wait for the [`Completion`] to be completed.
+///
+/// # Examples
+///
+/// ```
+/// use kernel::sync::{Arc, Completion};
+/// use kernel::workqueue::{self, impl_has_work, new_work, Work, WorkItem};
+///
+/// #[pin_data]
+/// struct MyTask {
+/// #[pin]
+/// work: Work<MyTask>,
+/// #[pin]
+/// done: Completion,
+/// }
+///
+/// impl_has_work! {
+/// impl HasWork<Self> for MyTask { self.work }
+/// }
+///
+/// impl MyTask {
+/// fn new() -> Result<Arc<Self>> {
+/// let this = Arc::pin_init(pin_init!(MyTask {
+/// work <- new_work!("MyTask::work"),
+/// done <- Completion::new(),
+/// }), GFP_KERNEL)?;
+///
+/// let _ = workqueue::system().enqueue(this.clone());
+///
+/// Ok(this)
+/// }
+///
+/// fn wait_for_completion(&self) {
+/// self.done.wait_for_completion();
+///
+/// pr_info!("Completion: task complete\n");
+/// }
+/// }
+///
+/// impl WorkItem for MyTask {
+/// type Pointer = Arc<MyTask>;
+///
+/// fn run(this: Arc<MyTask>) {
+/// // process this task
+/// this.done.complete_all();
+/// }
+/// }
+///
+/// let task = MyTask::new()?;
+/// task.wait_for_completion();
+/// # Ok::<(), Error>(())
+/// ```
+#[pin_data]
+pub struct Completion {
+ #[pin]
+ inner: Opaque<bindings::completion>,
+}
+
+// SAFETY: `Completion` is safe to be send to any task.
+unsafe impl Send for Completion {}
+
+// SAFETY: `Completion` is safe to be accessed concurrently.
+unsafe impl Sync for Completion {}
+
+impl Completion {
+ /// Create an initializer for a new [`Completion`].
+ pub fn new() -> impl PinInit<Self> {
+ pin_init!(Self {
+ inner <- Opaque::ffi_init(|slot: *mut bindings::completion| {
+ // SAFETY: `slot` is a valid pointer to an uninitialized `struct completion`.
+ unsafe { bindings::init_completion(slot) };
+ }),
+ })
+ }
+
+ fn as_raw(&self) -> *mut bindings::completion {
+ self.inner.get()
+ }
+
+ /// Signal all tasks waiting on this completion.
+ ///
+ /// This method wakes up all tasks waiting on this completion; after this operation the
+ /// completion is permanently done, i.e. signals all current and future waiters.
+ pub fn complete_all(&self) {
+ // SAFETY: `self.as_raw()` is a pointer to a valid `struct completion`.
+ unsafe { bindings::complete_all(self.as_raw()) };
+ }
+
+ /// Wait for completion of a task.
+ ///
+ /// This method waits for the completion of a task; it is not interruptible and there is no
+ /// timeout.
+ ///
+ /// See also [`Completion::complete_all`].
+ pub fn wait_for_completion(&self) {
+ // SAFETY: `self.as_raw()` is a pointer to a valid `struct completion`.
+ unsafe { bindings::wait_for_completion(self.as_raw()) };
+ }
+}
diff --git a/rust/kernel/sync/condvar.rs b/rust/kernel/sync/condvar.rs
index caebf03f553b..c6ec64295c9f 100644
--- a/rust/kernel/sync/condvar.rs
+++ b/rust/kernel/sync/condvar.rs
@@ -216,6 +216,7 @@ impl CondVar {
/// This method behaves like `notify_one`, except that it hints to the scheduler that the
/// current thread is about to go to sleep, so it should schedule the target thread on the same
/// CPU.
+ #[inline]
pub fn notify_sync(&self) {
// SAFETY: `wait_queue_head` points to valid memory.
unsafe { bindings::__wake_up_sync(self.wait_queue_head.get(), TASK_NORMAL) };
@@ -225,6 +226,7 @@ impl CondVar {
///
/// This is not 'sticky' in the sense that if no thread is waiting, the notification is lost
/// completely (as opposed to automatically waking up the next waiter).
+ #[inline]
pub fn notify_one(&self) {
self.notify(1);
}
@@ -233,6 +235,7 @@ impl CondVar {
///
/// This is not 'sticky' in the sense that if no thread is waiting, the notification is lost
/// completely (as opposed to automatically waking up the next waiter).
+ #[inline]
pub fn notify_all(&self) {
self.notify(0);
}
diff --git a/rust/kernel/sync/lock.rs b/rust/kernel/sync/lock.rs
index e82fa5be289c..27202beef90c 100644
--- a/rust/kernel/sync/lock.rs
+++ b/rust/kernel/sync/lock.rs
@@ -175,6 +175,8 @@ impl<T: ?Sized, B: Backend> Lock<T, B> {
/// Tries to acquire the lock.
///
/// Returns a guard that can be used to access the data protected by the lock if successful.
+ // `Option<T>` is not `#[must_use]` even if `T` is, thus the attribute is needed here.
+ #[must_use = "if unused, the lock will be immediately unlocked"]
pub fn try_lock(&self) -> Option<Guard<'_, T, B>> {
// SAFETY: The constructor of the type calls `init`, so the existence of the object proves
// that `init` was called.
diff --git a/rust/kernel/sync/poll.rs b/rust/kernel/sync/poll.rs
index d7e6e59e124b..0ec985d560c8 100644
--- a/rust/kernel/sync/poll.rs
+++ b/rust/kernel/sync/poll.rs
@@ -9,9 +9,8 @@ use crate::{
fs::File,
prelude::*,
sync::{CondVar, LockClassKey},
- types::Opaque,
};
-use core::ops::Deref;
+use core::{marker::PhantomData, ops::Deref};
/// Creates a [`PollCondVar`] initialiser with the given name and a newly-created lock class.
#[macro_export]
@@ -23,58 +22,43 @@ macro_rules! new_poll_condvar {
};
}
-/// Wraps the kernel's `struct poll_table`.
+/// Wraps the kernel's `poll_table`.
///
/// # Invariants
///
-/// This struct contains a valid `struct poll_table`.
-///
-/// For a `struct poll_table` to be valid, its `_qproc` function must follow the safety
-/// requirements of `_qproc` functions:
-///
-/// * The `_qproc` function is given permission to enqueue a waiter to the provided `poll_table`
-/// during the call. Once the waiter is removed and an rcu grace period has passed, it must no
-/// longer access the `wait_queue_head`.
+/// The pointer must be null or reference a valid `poll_table`.
#[repr(transparent)]
-pub struct PollTable(Opaque<bindings::poll_table>);
+pub struct PollTable<'a> {
+ table: *mut bindings::poll_table,
+ _lifetime: PhantomData<&'a bindings::poll_table>,
+}
-impl PollTable {
- /// Creates a reference to a [`PollTable`] from a valid pointer.
+impl<'a> PollTable<'a> {
+ /// Creates a [`PollTable`] from a valid pointer.
///
/// # Safety
///
- /// The caller must ensure that for the duration of `'a`, the pointer will point at a valid poll
- /// table (as defined in the type invariants).
- ///
- /// The caller must also ensure that the `poll_table` is only accessed via the returned
- /// reference for the duration of `'a`.
- pub unsafe fn from_ptr<'a>(ptr: *mut bindings::poll_table) -> &'a mut PollTable {
- // SAFETY: The safety requirements guarantee the validity of the dereference, while the
- // `PollTable` type being transparent makes the cast ok.
- unsafe { &mut *ptr.cast() }
- }
-
- fn get_qproc(&self) -> bindings::poll_queue_proc {
- let ptr = self.0.get();
- // SAFETY: The `ptr` is valid because it originates from a reference, and the `_qproc`
- // field is not modified concurrently with this call since we have an immutable reference.
- unsafe { (*ptr)._qproc }
+ /// The pointer must be null or reference a valid `poll_table` for the duration of `'a`.
+ pub unsafe fn from_raw(table: *mut bindings::poll_table) -> Self {
+ // INVARIANTS: The safety requirements are the same as the struct invariants.
+ PollTable {
+ table,
+ _lifetime: PhantomData,
+ }
}
/// Register this [`PollTable`] with the provided [`PollCondVar`], so that it can be notified
/// using the condition variable.
- pub fn register_wait(&mut self, file: &File, cv: &PollCondVar) {
- if let Some(qproc) = self.get_qproc() {
- // SAFETY: The pointers to `file` and `self` need to be valid for the duration of this
- // call to `qproc`, which they are because they are references.
- //
- // The `cv.wait_queue_head` pointer must be valid until an rcu grace period after the
- // waiter is removed. The `PollCondVar` is pinned, so before `cv.wait_queue_head` can
- // be destroyed, the destructor must run. That destructor first removes all waiters,
- // and then waits for an rcu grace period. Therefore, `cv.wait_queue_head` is valid for
- // long enough.
- unsafe { qproc(file.as_ptr() as _, cv.wait_queue_head.get(), self.0.get()) };
- }
+ pub fn register_wait(&self, file: &File, cv: &PollCondVar) {
+ // SAFETY:
+ // * `file.as_ptr()` references a valid file for the duration of this call.
+ // * `self.table` is null or references a valid poll_table for the duration of this call.
+ // * Since `PollCondVar` is pinned, its destructor is guaranteed to run before the memory
+ // containing `cv.wait_queue_head` is invalidated. Since the destructor clears all
+ // waiters and then waits for an rcu grace period, it's guaranteed that
+ // `cv.wait_queue_head` remains valid for at least an rcu grace period after the removal
+ // of the last waiter.
+ unsafe { bindings::poll_wait(file.as_ptr(), cv.wait_queue_head.get(), self.table) }
}
}
@@ -107,6 +91,7 @@ impl Deref for PollCondVar {
#[pinned_drop]
impl PinnedDrop for PollCondVar {
+ #[inline]
fn drop(self: Pin<&mut Self>) {
// Clear anything registered using `register_wait`.
//