summaryrefslogtreecommitdiff
path: root/rust
diff options
context:
space:
mode:
Diffstat (limited to 'rust')
-rw-r--r--rust/Makefile8
-rw-r--r--rust/bindings/lib.rs3
-rw-r--r--rust/helpers/bug.c5
-rw-r--r--rust/helpers/helpers.c5
-rw-r--r--rust/helpers/time.c35
-rw-r--r--rust/kernel/.gitignore2
-rw-r--r--rust/kernel/alloc/allocator_test.rs2
-rw-r--r--rust/kernel/alloc/kbox.rs98
-rw-r--r--rust/kernel/alloc/kvec.rs59
-rw-r--r--rust/kernel/bits.rs203
-rw-r--r--rust/kernel/block/mq.rs2
-rw-r--r--rust/kernel/block/mq/operations.rs2
-rw-r--r--rust/kernel/block/mq/request.rs11
-rw-r--r--rust/kernel/bug.rs126
-rw-r--r--rust/kernel/clk.rs6
-rw-r--r--rust/kernel/configfs.rs30
-rw-r--r--rust/kernel/cpufreq.rs10
-rw-r--r--rust/kernel/cpumask.rs4
-rw-r--r--rust/kernel/device.rs4
-rw-r--r--rust/kernel/device_id.rs4
-rw-r--r--rust/kernel/devres.rs10
-rw-r--r--rust/kernel/dma.rs10
-rw-r--r--rust/kernel/drm/device.rs10
-rw-r--r--rust/kernel/drm/gem/mod.rs4
-rw-r--r--rust/kernel/error.rs10
-rw-r--r--rust/kernel/firmware.rs9
-rw-r--r--rust/kernel/fmt.rs7
-rw-r--r--rust/kernel/fs/file.rs2
-rw-r--r--rust/kernel/generated_arch_reachable_asm.rs.S7
-rw-r--r--rust/kernel/generated_arch_warn_asm.rs.S7
-rw-r--r--rust/kernel/init.rs34
-rw-r--r--rust/kernel/io.rs20
-rw-r--r--rust/kernel/kunit.rs13
-rw-r--r--rust/kernel/lib.rs10
-rw-r--r--rust/kernel/list.rs63
-rw-r--r--rust/kernel/list/impl_list_item_mod.rs239
-rw-r--r--rust/kernel/miscdevice.rs12
-rw-r--r--rust/kernel/mm/virt.rs52
-rw-r--r--rust/kernel/net/phy.rs4
-rw-r--r--rust/kernel/of.rs6
-rw-r--r--rust/kernel/opp.rs20
-rw-r--r--rust/kernel/pci.rs13
-rw-r--r--rust/kernel/platform.rs2
-rw-r--r--rust/kernel/prelude.rs4
-rw-r--r--rust/kernel/print.rs12
-rw-r--r--rust/kernel/rbtree.rs29
-rw-r--r--rust/kernel/revocable.rs4
-rw-r--r--rust/kernel/seq_file.rs2
-rw-r--r--rust/kernel/str.rs111
-rw-r--r--rust/kernel/sync.rs10
-rw-r--r--rust/kernel/sync/arc.rs102
-rw-r--r--rust/kernel/sync/aref.rs154
-rw-r--r--rust/kernel/time.rs233
-rw-r--r--rust/kernel/time/delay.rs49
-rw-r--r--rust/kernel/time/hrtimer.rs304
-rw-r--r--rust/kernel/time/hrtimer/arc.rs8
-rw-r--r--rust/kernel/time/hrtimer/pin.rs10
-rw-r--r--rust/kernel/time/hrtimer/pin_mut.rs10
-rw-r--r--rust/kernel/time/hrtimer/tbox.rs8
-rw-r--r--rust/kernel/types.rs229
-rw-r--r--rust/kernel/uaccess.rs167
-rw-r--r--rust/kernel/workqueue.rs342
-rw-r--r--rust/kernel/xarray.rs9
-rw-r--r--rust/macros/module.rs6
-rw-r--r--rust/pin-init/README.md2
-rw-r--r--rust/pin-init/examples/big_struct_in_place.rs28
-rw-r--r--rust/pin-init/examples/linked_list.rs10
-rw-r--r--rust/pin-init/examples/mutex.rs97
-rw-r--r--rust/pin-init/examples/pthread_mutex.rs4
-rw-r--r--rust/pin-init/examples/static_init.rs75
-rw-r--r--rust/pin-init/src/__internal.rs1
-rw-r--r--rust/pin-init/src/lib.rs120
-rw-r--r--rust/pin-init/src/macros.rs16
-rw-r--r--rust/uapi/lib.rs3
74 files changed, 2435 insertions, 907 deletions
diff --git a/rust/Makefile b/rust/Makefile
index 115b63b7d1e3..4263462b8470 100644
--- a/rust/Makefile
+++ b/rust/Makefile
@@ -34,6 +34,9 @@ obj-$(CONFIG_RUST_KERNEL_DOCTESTS) += doctests_kernel_generated.o
obj-$(CONFIG_RUST_KERNEL_DOCTESTS) += doctests_kernel_generated_kunit.o
always-$(subst y,$(CONFIG_RUST),$(CONFIG_JUMP_LABEL)) += kernel/generated_arch_static_branch_asm.rs
+ifndef CONFIG_UML
+always-$(subst y,$(CONFIG_RUST),$(CONFIG_BUG)) += kernel/generated_arch_warn_asm.rs kernel/generated_arch_reachable_asm.rs
+endif
# Avoids running `$(RUSTC)` when it may not be available.
ifdef CONFIG_RUST
@@ -541,5 +544,10 @@ $(obj)/kernel.o: $(src)/kernel/lib.rs $(obj)/build_error.o $(obj)/pin_init.o \
ifdef CONFIG_JUMP_LABEL
$(obj)/kernel.o: $(obj)/kernel/generated_arch_static_branch_asm.rs
endif
+ifndef CONFIG_UML
+ifdef CONFIG_BUG
+$(obj)/kernel.o: $(obj)/kernel/generated_arch_warn_asm.rs $(obj)/kernel/generated_arch_reachable_asm.rs
+endif
+endif
endif # CONFIG_RUST
diff --git a/rust/bindings/lib.rs b/rust/bindings/lib.rs
index a08eb5518cac..474cc98c48a3 100644
--- a/rust/bindings/lib.rs
+++ b/rust/bindings/lib.rs
@@ -25,6 +25,9 @@
)]
#[allow(dead_code)]
+#[allow(clippy::cast_lossless)]
+#[allow(clippy::ptr_as_ptr)]
+#[allow(clippy::ref_as_ptr)]
#[allow(clippy::undocumented_unsafe_blocks)]
#[cfg_attr(CONFIG_RUSTC_HAS_UNNECESSARY_TRANSMUTES, allow(unnecessary_transmutes))]
mod bindings_raw {
diff --git a/rust/helpers/bug.c b/rust/helpers/bug.c
index e2d13babc737..a62c96f507d1 100644
--- a/rust/helpers/bug.c
+++ b/rust/helpers/bug.c
@@ -6,3 +6,8 @@ __noreturn void rust_helper_BUG(void)
{
BUG();
}
+
+bool rust_helper_WARN_ON(bool cond)
+{
+ return WARN_ON(cond);
+}
diff --git a/rust/helpers/helpers.c b/rust/helpers/helpers.c
index 2bb13285825b..7cf7fe95e41d 100644
--- a/rust/helpers/helpers.c
+++ b/rust/helpers/helpers.c
@@ -30,21 +30,22 @@
#include "mutex.c"
#include "of.c"
#include "page.c"
-#include "platform.c"
#include "pci.c"
#include "pid_namespace.c"
+#include "platform.c"
#include "poll.c"
#include "property.c"
#include "rbtree.c"
-#include "regulator.c"
#include "rcu.c"
#include "refcount.c"
+#include "regulator.c"
#include "security.c"
#include "signal.c"
#include "slab.c"
#include "spinlock.c"
#include "sync.c"
#include "task.c"
+#include "time.c"
#include "uaccess.c"
#include "vmalloc.c"
#include "wait.c"
diff --git a/rust/helpers/time.c b/rust/helpers/time.c
new file mode 100644
index 000000000000..a318e9fa4408
--- /dev/null
+++ b/rust/helpers/time.c
@@ -0,0 +1,35 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/delay.h>
+#include <linux/ktime.h>
+#include <linux/timekeeping.h>
+
+void rust_helper_fsleep(unsigned long usecs)
+{
+ fsleep(usecs);
+}
+
+ktime_t rust_helper_ktime_get_real(void)
+{
+ return ktime_get_real();
+}
+
+ktime_t rust_helper_ktime_get_boottime(void)
+{
+ return ktime_get_boottime();
+}
+
+ktime_t rust_helper_ktime_get_clocktai(void)
+{
+ return ktime_get_clocktai();
+}
+
+s64 rust_helper_ktime_to_us(const ktime_t kt)
+{
+ return ktime_to_us(kt);
+}
+
+s64 rust_helper_ktime_to_ms(const ktime_t kt)
+{
+ return ktime_to_ms(kt);
+}
diff --git a/rust/kernel/.gitignore b/rust/kernel/.gitignore
index 6ba39a178f30..f636ad95aaf3 100644
--- a/rust/kernel/.gitignore
+++ b/rust/kernel/.gitignore
@@ -1,3 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
/generated_arch_static_branch_asm.rs
+/generated_arch_warn_asm.rs
+/generated_arch_reachable_asm.rs
diff --git a/rust/kernel/alloc/allocator_test.rs b/rust/kernel/alloc/allocator_test.rs
index d19c06ef0498..a3074480bd8d 100644
--- a/rust/kernel/alloc/allocator_test.rs
+++ b/rust/kernel/alloc/allocator_test.rs
@@ -82,7 +82,7 @@ unsafe impl Allocator for Cmalloc {
// SAFETY: Returns either NULL or a pointer to a memory allocation that satisfies or
// exceeds the given size and alignment requirements.
- let dst = unsafe { libc_aligned_alloc(layout.align(), layout.size()) } as *mut u8;
+ let dst = unsafe { libc_aligned_alloc(layout.align(), layout.size()) }.cast::<u8>();
let dst = NonNull::new(dst).ok_or(AllocError)?;
if flags.contains(__GFP_ZERO) {
diff --git a/rust/kernel/alloc/kbox.rs b/rust/kernel/alloc/kbox.rs
index c386ff771d50..856d05aa60f1 100644
--- a/rust/kernel/alloc/kbox.rs
+++ b/rust/kernel/alloc/kbox.rs
@@ -6,6 +6,7 @@
use super::allocator::{KVmalloc, Kmalloc, Vmalloc};
use super::{AllocError, Allocator, Flags};
use core::alloc::Layout;
+use core::borrow::{Borrow, BorrowMut};
use core::fmt;
use core::marker::PhantomData;
use core::mem::ManuallyDrop;
@@ -15,6 +16,7 @@ use core::pin::Pin;
use core::ptr::NonNull;
use core::result::Result;
+use crate::ffi::c_void;
use crate::init::InPlaceInit;
use crate::types::ForeignOwnable;
use pin_init::{InPlaceWrite, Init, PinInit, ZeroableOption};
@@ -398,70 +400,74 @@ where
}
}
-// SAFETY: The `into_foreign` function returns a pointer that is well-aligned.
+// SAFETY: The pointer returned by `into_foreign` comes from a well aligned
+// pointer to `T`.
unsafe impl<T: 'static, A> ForeignOwnable for Box<T, A>
where
A: Allocator,
{
- type PointedTo = T;
+ const FOREIGN_ALIGN: usize = core::mem::align_of::<T>();
type Borrowed<'a> = &'a T;
type BorrowedMut<'a> = &'a mut T;
- fn into_foreign(self) -> *mut Self::PointedTo {
- Box::into_raw(self)
+ fn into_foreign(self) -> *mut c_void {
+ Box::into_raw(self).cast()
}
- unsafe fn from_foreign(ptr: *mut Self::PointedTo) -> Self {
+ unsafe fn from_foreign(ptr: *mut c_void) -> Self {
// SAFETY: The safety requirements of this function ensure that `ptr` comes from a previous
// call to `Self::into_foreign`.
- unsafe { Box::from_raw(ptr) }
+ unsafe { Box::from_raw(ptr.cast()) }
}
- unsafe fn borrow<'a>(ptr: *mut Self::PointedTo) -> &'a T {
+ unsafe fn borrow<'a>(ptr: *mut c_void) -> &'a T {
// SAFETY: The safety requirements of this method ensure that the object remains alive and
// immutable for the duration of 'a.
- unsafe { &*ptr }
+ unsafe { &*ptr.cast() }
}
- unsafe fn borrow_mut<'a>(ptr: *mut Self::PointedTo) -> &'a mut T {
+ unsafe fn borrow_mut<'a>(ptr: *mut c_void) -> &'a mut T {
+ let ptr = ptr.cast();
// SAFETY: The safety requirements of this method ensure that the pointer is valid and that
// nothing else will access the value for the duration of 'a.
unsafe { &mut *ptr }
}
}
-// SAFETY: The `into_foreign` function returns a pointer that is well-aligned.
+// SAFETY: The pointer returned by `into_foreign` comes from a well aligned
+// pointer to `T`.
unsafe impl<T: 'static, A> ForeignOwnable for Pin<Box<T, A>>
where
A: Allocator,
{
- type PointedTo = T;
+ const FOREIGN_ALIGN: usize = core::mem::align_of::<T>();
type Borrowed<'a> = Pin<&'a T>;
type BorrowedMut<'a> = Pin<&'a mut T>;
- fn into_foreign(self) -> *mut Self::PointedTo {
+ fn into_foreign(self) -> *mut c_void {
// SAFETY: We are still treating the box as pinned.
- Box::into_raw(unsafe { Pin::into_inner_unchecked(self) })
+ Box::into_raw(unsafe { Pin::into_inner_unchecked(self) }).cast()
}
- unsafe fn from_foreign(ptr: *mut Self::PointedTo) -> Self {
+ unsafe fn from_foreign(ptr: *mut c_void) -> Self {
// SAFETY: The safety requirements of this function ensure that `ptr` comes from a previous
// call to `Self::into_foreign`.
- unsafe { Pin::new_unchecked(Box::from_raw(ptr)) }
+ unsafe { Pin::new_unchecked(Box::from_raw(ptr.cast())) }
}
- unsafe fn borrow<'a>(ptr: *mut Self::PointedTo) -> Pin<&'a T> {
+ unsafe fn borrow<'a>(ptr: *mut c_void) -> Pin<&'a T> {
// SAFETY: The safety requirements for this function ensure that the object is still alive,
// so it is safe to dereference the raw pointer.
// The safety requirements of `from_foreign` also ensure that the object remains alive for
// the lifetime of the returned value.
- let r = unsafe { &*ptr };
+ let r = unsafe { &*ptr.cast() };
// SAFETY: This pointer originates from a `Pin<Box<T>>`.
unsafe { Pin::new_unchecked(r) }
}
- unsafe fn borrow_mut<'a>(ptr: *mut Self::PointedTo) -> Pin<&'a mut T> {
+ unsafe fn borrow_mut<'a>(ptr: *mut c_void) -> Pin<&'a mut T> {
+ let ptr = ptr.cast();
// SAFETY: The safety requirements for this function ensure that the object is still alive,
// so it is safe to dereference the raw pointer.
// The safety requirements of `from_foreign` also ensure that the object remains alive for
@@ -499,6 +505,62 @@ where
}
}
+/// # Examples
+///
+/// ```
+/// # use core::borrow::Borrow;
+/// # use kernel::alloc::KBox;
+/// struct Foo<B: Borrow<u32>>(B);
+///
+/// // Owned instance.
+/// let owned = Foo(1);
+///
+/// // Owned instance using `KBox`.
+/// let owned_kbox = Foo(KBox::new(1, GFP_KERNEL)?);
+///
+/// let i = 1;
+/// // Borrowed from `i`.
+/// let borrowed = Foo(&i);
+/// # Ok::<(), Error>(())
+/// ```
+impl<T, A> Borrow<T> for Box<T, A>
+where
+ T: ?Sized,
+ A: Allocator,
+{
+ fn borrow(&self) -> &T {
+ self.deref()
+ }
+}
+
+/// # Examples
+///
+/// ```
+/// # use core::borrow::BorrowMut;
+/// # use kernel::alloc::KBox;
+/// struct Foo<B: BorrowMut<u32>>(B);
+///
+/// // Owned instance.
+/// let owned = Foo(1);
+///
+/// // Owned instance using `KBox`.
+/// let owned_kbox = Foo(KBox::new(1, GFP_KERNEL)?);
+///
+/// let mut i = 1;
+/// // Borrowed from `i`.
+/// let borrowed = Foo(&mut i);
+/// # Ok::<(), Error>(())
+/// ```
+impl<T, A> BorrowMut<T> for Box<T, A>
+where
+ T: ?Sized,
+ A: Allocator,
+{
+ fn borrow_mut(&mut self) -> &mut T {
+ self.deref_mut()
+ }
+}
+
impl<T, A> fmt::Display for Box<T, A>
where
T: ?Sized + fmt::Display,
diff --git a/rust/kernel/alloc/kvec.rs b/rust/kernel/alloc/kvec.rs
index 1a0dd852a468..3c72e0bdddb8 100644
--- a/rust/kernel/alloc/kvec.rs
+++ b/rust/kernel/alloc/kvec.rs
@@ -8,6 +8,7 @@ use super::{
AllocError, Allocator, Box, Flags,
};
use core::{
+ borrow::{Borrow, BorrowMut},
fmt,
marker::PhantomData,
mem::{ManuallyDrop, MaybeUninit},
@@ -288,7 +289,7 @@ where
// - `self.len` is smaller than `self.capacity` by the type invariant and hence, the
// resulting pointer is guaranteed to be part of the same allocated object.
// - `self.len` can not overflow `isize`.
- let ptr = unsafe { self.as_mut_ptr().add(self.len) } as *mut MaybeUninit<T>;
+ let ptr = unsafe { self.as_mut_ptr().add(self.len) }.cast::<MaybeUninit<T>>();
// SAFETY: The memory between `self.len` and `self.capacity` is guaranteed to be allocated
// and valid, but uninitialized.
@@ -847,11 +848,11 @@ where
// - `ptr` points to memory with at least a size of `size_of::<T>() * len`,
// - all elements within `b` are initialized values of `T`,
// - `len` does not exceed `isize::MAX`.
- unsafe { Vec::from_raw_parts(ptr as _, len, len) }
+ unsafe { Vec::from_raw_parts(ptr.cast(), len, len) }
}
}
-impl<T> Default for KVec<T> {
+impl<T, A: Allocator> Default for Vec<T, A> {
#[inline]
fn default() -> Self {
Self::new()
@@ -890,6 +891,58 @@ where
}
}
+/// # Examples
+///
+/// ```
+/// # use core::borrow::Borrow;
+/// struct Foo<B: Borrow<[u32]>>(B);
+///
+/// // Owned array.
+/// let owned_array = Foo([1, 2, 3]);
+///
+/// // Owned vector.
+/// let owned_vec = Foo(KVec::from_elem(0, 3, GFP_KERNEL)?);
+///
+/// let arr = [1, 2, 3];
+/// // Borrowed slice from `arr`.
+/// let borrowed_slice = Foo(&arr[..]);
+/// # Ok::<(), Error>(())
+/// ```
+impl<T, A> Borrow<[T]> for Vec<T, A>
+where
+ A: Allocator,
+{
+ fn borrow(&self) -> &[T] {
+ self.as_slice()
+ }
+}
+
+/// # Examples
+///
+/// ```
+/// # use core::borrow::BorrowMut;
+/// struct Foo<B: BorrowMut<[u32]>>(B);
+///
+/// // Owned array.
+/// let owned_array = Foo([1, 2, 3]);
+///
+/// // Owned vector.
+/// let owned_vec = Foo(KVec::from_elem(0, 3, GFP_KERNEL)?);
+///
+/// let mut arr = [1, 2, 3];
+/// // Borrowed slice from `arr`.
+/// let borrowed_slice = Foo(&mut arr[..]);
+/// # Ok::<(), Error>(())
+/// ```
+impl<T, A> BorrowMut<[T]> for Vec<T, A>
+where
+ A: Allocator,
+{
+ fn borrow_mut(&mut self) -> &mut [T] {
+ self.as_mut_slice()
+ }
+}
+
impl<T: Eq, A> Eq for Vec<T, A> where A: Allocator {}
impl<T, I: SliceIndex<[T]>, A> Index<I> for Vec<T, A>
diff --git a/rust/kernel/bits.rs b/rust/kernel/bits.rs
new file mode 100644
index 000000000000..553d50265883
--- /dev/null
+++ b/rust/kernel/bits.rs
@@ -0,0 +1,203 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Bit manipulation macros.
+//!
+//! C header: [`include/linux/bits.h`](srctree/include/linux/bits.h)
+
+use crate::prelude::*;
+use core::ops::RangeInclusive;
+use macros::paste;
+
+macro_rules! impl_bit_fn {
+ (
+ $ty:ty
+ ) => {
+ paste! {
+ /// Computes `1 << n` if `n` is in bounds, i.e.: if `n` is smaller than
+ /// the maximum number of bits supported by the type.
+ ///
+ /// Returns [`None`] otherwise.
+ #[inline]
+ pub fn [<checked_bit_ $ty>](n: u32) -> Option<$ty> {
+ (1 as $ty).checked_shl(n)
+ }
+
+ /// Computes `1 << n` by performing a compile-time assertion that `n` is
+ /// in bounds.
+ ///
+ /// This version is the default and should be used if `n` is known at
+ /// compile time.
+ #[inline]
+ pub const fn [<bit_ $ty>](n: u32) -> $ty {
+ build_assert!(n < <$ty>::BITS);
+ (1 as $ty) << n
+ }
+ }
+ };
+}
+
+impl_bit_fn!(u64);
+impl_bit_fn!(u32);
+impl_bit_fn!(u16);
+impl_bit_fn!(u8);
+
+macro_rules! impl_genmask_fn {
+ (
+ $ty:ty,
+ $(#[$genmask_checked_ex:meta])*,
+ $(#[$genmask_ex:meta])*
+ ) => {
+ paste! {
+ /// Creates a contiguous bitmask for the given range by validating
+ /// the range at runtime.
+ ///
+ /// Returns [`None`] if the range is invalid, i.e.: if the start is
+ /// greater than the end or if the range is outside of the
+ /// representable range for the type.
+ $(#[$genmask_checked_ex])*
+ #[inline]
+ pub fn [<genmask_checked_ $ty>](range: RangeInclusive<u32>) -> Option<$ty> {
+ let start = *range.start();
+ let end = *range.end();
+
+ if start > end {
+ return None;
+ }
+
+ let high = [<checked_bit_ $ty>](end)?;
+ let low = [<checked_bit_ $ty>](start)?;
+ Some((high | (high - 1)) & !(low - 1))
+ }
+
+ /// Creates a compile-time contiguous bitmask for the given range by
+ /// performing a compile-time assertion that the range is valid.
+ ///
+ /// This version is the default and should be used if the range is known
+ /// at compile time.
+ $(#[$genmask_ex])*
+ #[inline]
+ pub const fn [<genmask_ $ty>](range: RangeInclusive<u32>) -> $ty {
+ let start = *range.start();
+ let end = *range.end();
+
+ build_assert!(start <= end);
+
+ let high = [<bit_ $ty>](end);
+ let low = [<bit_ $ty>](start);
+ (high | (high - 1)) & !(low - 1)
+ }
+ }
+ };
+}
+
+impl_genmask_fn!(
+ u64,
+ /// # Examples
+ ///
+ /// ```
+ /// # #![expect(clippy::reversed_empty_ranges)]
+ /// # use kernel::bits::genmask_checked_u64;
+ /// assert_eq!(genmask_checked_u64(0..=0), Some(0b1));
+ /// assert_eq!(genmask_checked_u64(0..=63), Some(u64::MAX));
+ /// assert_eq!(genmask_checked_u64(21..=39), Some(0x0000_00ff_ffe0_0000));
+ ///
+ /// // `80` is out of the supported bit range.
+ /// assert_eq!(genmask_checked_u64(21..=80), None);
+ ///
+ /// // Invalid range where the start is bigger than the end.
+ /// assert_eq!(genmask_checked_u64(15..=8), None);
+ /// ```
+ ,
+ /// # Examples
+ ///
+ /// ```
+ /// # use kernel::bits::genmask_u64;
+ /// assert_eq!(genmask_u64(21..=39), 0x0000_00ff_ffe0_0000);
+ /// assert_eq!(genmask_u64(0..=0), 0b1);
+ /// assert_eq!(genmask_u64(0..=63), u64::MAX);
+ /// ```
+);
+
+impl_genmask_fn!(
+ u32,
+ /// # Examples
+ ///
+ /// ```
+ /// # #![expect(clippy::reversed_empty_ranges)]
+ /// # use kernel::bits::genmask_checked_u32;
+ /// assert_eq!(genmask_checked_u32(0..=0), Some(0b1));
+ /// assert_eq!(genmask_checked_u32(0..=31), Some(u32::MAX));
+ /// assert_eq!(genmask_checked_u32(21..=31), Some(0xffe0_0000));
+ ///
+ /// // `40` is out of the supported bit range.
+ /// assert_eq!(genmask_checked_u32(21..=40), None);
+ ///
+ /// // Invalid range where the start is bigger than the end.
+ /// assert_eq!(genmask_checked_u32(15..=8), None);
+ /// ```
+ ,
+ /// # Examples
+ ///
+ /// ```
+ /// # use kernel::bits::genmask_u32;
+ /// assert_eq!(genmask_u32(21..=31), 0xffe0_0000);
+ /// assert_eq!(genmask_u32(0..=0), 0b1);
+ /// assert_eq!(genmask_u32(0..=31), u32::MAX);
+ /// ```
+);
+
+impl_genmask_fn!(
+ u16,
+ /// # Examples
+ ///
+ /// ```
+ /// # #![expect(clippy::reversed_empty_ranges)]
+ /// # use kernel::bits::genmask_checked_u16;
+ /// assert_eq!(genmask_checked_u16(0..=0), Some(0b1));
+ /// assert_eq!(genmask_checked_u16(0..=15), Some(u16::MAX));
+ /// assert_eq!(genmask_checked_u16(6..=15), Some(0xffc0));
+ ///
+ /// // `20` is out of the supported bit range.
+ /// assert_eq!(genmask_checked_u16(6..=20), None);
+ ///
+ /// // Invalid range where the start is bigger than the end.
+ /// assert_eq!(genmask_checked_u16(10..=5), None);
+ /// ```
+ ,
+ /// # Examples
+ ///
+ /// ```
+ /// # use kernel::bits::genmask_u16;
+ /// assert_eq!(genmask_u16(6..=15), 0xffc0);
+ /// assert_eq!(genmask_u16(0..=0), 0b1);
+ /// assert_eq!(genmask_u16(0..=15), u16::MAX);
+ /// ```
+);
+
+impl_genmask_fn!(
+ u8,
+ /// # Examples
+ ///
+ /// ```
+ /// # #![expect(clippy::reversed_empty_ranges)]
+ /// # use kernel::bits::genmask_checked_u8;
+ /// assert_eq!(genmask_checked_u8(0..=0), Some(0b1));
+ /// assert_eq!(genmask_checked_u8(0..=7), Some(u8::MAX));
+ /// assert_eq!(genmask_checked_u8(6..=7), Some(0xc0));
+ ///
+ /// // `10` is out of the supported bit range.
+ /// assert_eq!(genmask_checked_u8(6..=10), None);
+ ///
+ /// // Invalid range where the start is bigger than the end.
+ /// assert_eq!(genmask_checked_u8(5..=2), None);
+ /// ```
+ ,
+ /// # Examples
+ ///
+ /// ```
+ /// # use kernel::bits::genmask_u8;
+ /// assert_eq!(genmask_u8(6..=7), 0xc0);
+ /// assert_eq!(genmask_u8(0..=0), 0b1);
+ /// assert_eq!(genmask_u8(0..=7), u8::MAX);
+ /// ```
+);
diff --git a/rust/kernel/block/mq.rs b/rust/kernel/block/mq.rs
index fb0f393c1cea..831445d37181 100644
--- a/rust/kernel/block/mq.rs
+++ b/rust/kernel/block/mq.rs
@@ -53,7 +53,7 @@
//! [`GenDiskBuilder`]: gen_disk::GenDiskBuilder
//! [`GenDiskBuilder::build`]: gen_disk::GenDiskBuilder::build
//!
-//! # Example
+//! # Examples
//!
//! ```rust
//! use kernel::{
diff --git a/rust/kernel/block/mq/operations.rs b/rust/kernel/block/mq/operations.rs
index 864ff379dc91..c2b98f507bcb 100644
--- a/rust/kernel/block/mq/operations.rs
+++ b/rust/kernel/block/mq/operations.rs
@@ -101,7 +101,7 @@ impl<T: Operations> OperationsVTable<T> {
if let Err(e) = ret {
e.to_blk_status()
} else {
- bindings::BLK_STS_OK as _
+ bindings::BLK_STS_OK as bindings::blk_status_t
}
}
diff --git a/rust/kernel/block/mq/request.rs b/rust/kernel/block/mq/request.rs
index 4a5b7ec914ef..fefd394f064a 100644
--- a/rust/kernel/block/mq/request.rs
+++ b/rust/kernel/block/mq/request.rs
@@ -69,7 +69,7 @@ impl<T: Operations> Request<T> {
// INVARIANT: By the safety requirements of this function, invariants are upheld.
// SAFETY: By the safety requirement of this function, we own a
// reference count that we can pass to `ARef`.
- unsafe { ARef::from_raw(NonNull::new_unchecked(ptr as *const Self as *mut Self)) }
+ unsafe { ARef::from_raw(NonNull::new_unchecked(ptr.cast())) }
}
/// Notify the block layer that a request is going to be processed now.
@@ -125,7 +125,12 @@ impl<T: Operations> Request<T> {
// success of the call to `try_set_end` guarantees that there are no
// `ARef`s pointing to this request. Therefore it is safe to hand it
// back to the block layer.
- unsafe { bindings::blk_mq_end_request(request_ptr, bindings::BLK_STS_OK as _) };
+ unsafe {
+ bindings::blk_mq_end_request(
+ request_ptr,
+ bindings::BLK_STS_OK as bindings::blk_status_t,
+ )
+ };
Ok(())
}
@@ -155,7 +160,7 @@ impl<T: Operations> Request<T> {
// the private data associated with this request is initialized and
// valid. The existence of `&self` guarantees that the private data is
// valid as a shared reference.
- unsafe { Self::wrapper_ptr(self as *const Self as *mut Self).as_ref() }
+ unsafe { Self::wrapper_ptr(core::ptr::from_ref(self).cast_mut()).as_ref() }
}
}
diff --git a/rust/kernel/bug.rs b/rust/kernel/bug.rs
new file mode 100644
index 000000000000..36aef43e5ebe
--- /dev/null
+++ b/rust/kernel/bug.rs
@@ -0,0 +1,126 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2024, 2025 FUJITA Tomonori <fujita.tomonori@gmail.com>
+
+//! Support for BUG and WARN functionality.
+//!
+//! C header: [`include/asm-generic/bug.h`](srctree/include/asm-generic/bug.h)
+
+#[macro_export]
+#[doc(hidden)]
+#[cfg(all(CONFIG_BUG, not(CONFIG_UML), not(CONFIG_LOONGARCH), not(CONFIG_ARM)))]
+#[cfg(CONFIG_DEBUG_BUGVERBOSE)]
+macro_rules! warn_flags {
+ ($flags:expr) => {
+ const FLAGS: u32 = $crate::bindings::BUGFLAG_WARNING | $flags;
+ const _FILE: &[u8] = file!().as_bytes();
+ // Plus one for null-terminator.
+ static FILE: [u8; _FILE.len() + 1] = {
+ let mut bytes = [0; _FILE.len() + 1];
+ let mut i = 0;
+ while i < _FILE.len() {
+ bytes[i] = _FILE[i];
+ i += 1;
+ }
+ bytes
+ };
+
+ // SAFETY:
+ // - `file`, `line`, `flags`, and `size` are all compile-time constants or
+ // symbols, preventing any invalid memory access.
+ // - The asm block has no side effects and does not modify any registers
+ // or memory. It is purely for embedding metadata into the ELF section.
+ unsafe {
+ $crate::asm!(
+ concat!(
+ "/* {size} */",
+ include!(concat!(env!("OBJTREE"), "/rust/kernel/generated_arch_warn_asm.rs")),
+ include!(concat!(env!("OBJTREE"), "/rust/kernel/generated_arch_reachable_asm.rs")));
+ file = sym FILE,
+ line = const line!(),
+ flags = const FLAGS,
+ size = const ::core::mem::size_of::<$crate::bindings::bug_entry>(),
+ );
+ }
+ }
+}
+
+#[macro_export]
+#[doc(hidden)]
+#[cfg(all(CONFIG_BUG, not(CONFIG_UML), not(CONFIG_LOONGARCH), not(CONFIG_ARM)))]
+#[cfg(not(CONFIG_DEBUG_BUGVERBOSE))]
+macro_rules! warn_flags {
+ ($flags:expr) => {
+ const FLAGS: u32 = $crate::bindings::BUGFLAG_WARNING | $flags;
+
+ // SAFETY:
+ // - `flags` and `size` are all compile-time constants, preventing
+ // any invalid memory access.
+ // - The asm block has no side effects and does not modify any registers
+ // or memory. It is purely for embedding metadata into the ELF section.
+ unsafe {
+ $crate::asm!(
+ concat!(
+ "/* {size} */",
+ include!(concat!(env!("OBJTREE"), "/rust/kernel/generated_arch_warn_asm.rs")),
+ include!(concat!(env!("OBJTREE"), "/rust/kernel/generated_arch_reachable_asm.rs")));
+ flags = const FLAGS,
+ size = const ::core::mem::size_of::<$crate::bindings::bug_entry>(),
+ );
+ }
+ }
+}
+
+#[macro_export]
+#[doc(hidden)]
+#[cfg(all(CONFIG_BUG, CONFIG_UML))]
+macro_rules! warn_flags {
+ ($flags:expr) => {
+ // SAFETY: It is always safe to call `warn_slowpath_fmt()`
+ // with a valid null-terminated string.
+ unsafe {
+ $crate::bindings::warn_slowpath_fmt(
+ $crate::c_str!(::core::file!()).as_char_ptr(),
+ line!() as $crate::ffi::c_int,
+ $flags as $crate::ffi::c_uint,
+ ::core::ptr::null(),
+ );
+ }
+ };
+}
+
+#[macro_export]
+#[doc(hidden)]
+#[cfg(all(CONFIG_BUG, any(CONFIG_LOONGARCH, CONFIG_ARM)))]
+macro_rules! warn_flags {
+ ($flags:expr) => {
+ // SAFETY: It is always safe to call `WARN_ON()`.
+ unsafe { $crate::bindings::WARN_ON(true) }
+ };
+}
+
+#[macro_export]
+#[doc(hidden)]
+#[cfg(not(CONFIG_BUG))]
+macro_rules! warn_flags {
+ ($flags:expr) => {};
+}
+
+#[doc(hidden)]
+pub const fn bugflag_taint(value: u32) -> u32 {
+ value << 8
+}
+
+/// Report a warning if `cond` is true and return the condition's evaluation result.
+#[macro_export]
+macro_rules! warn_on {
+ ($cond:expr) => {{
+ let cond = $cond;
+ if cond {
+ const WARN_ON_FLAGS: u32 = $crate::bug::bugflag_taint($crate::bindings::TAINT_WARN);
+
+ $crate::warn_flags!(WARN_ON_FLAGS);
+ }
+ cond
+ }};
+}
diff --git a/rust/kernel/clk.rs b/rust/kernel/clk.rs
index fbcea31dbcca..1e6c8c42fb3a 100644
--- a/rust/kernel/clk.rs
+++ b/rust/kernel/clk.rs
@@ -12,7 +12,7 @@ use crate::ffi::c_ulong;
///
/// Represents a frequency in hertz, wrapping a [`c_ulong`] value.
///
-/// ## Examples
+/// # Examples
///
/// ```
/// use kernel::clk::Hertz;
@@ -99,7 +99,7 @@ mod common_clk {
/// Instances of this type are reference-counted. Calling [`Clk::get`] ensures that the
/// allocation remains valid for the lifetime of the [`Clk`].
///
- /// ## Examples
+ /// # Examples
///
/// The following example demonstrates how to obtain and configure a clock for a device.
///
@@ -266,7 +266,7 @@ mod common_clk {
/// Instances of this type are reference-counted. Calling [`OptionalClk::get`] ensures that the
/// allocation remains valid for the lifetime of the [`OptionalClk`].
///
- /// ## Examples
+ /// # Examples
///
/// The following example demonstrates how to obtain and configure an optional clock for a
/// device. The code functions correctly whether or not the clock is available.
diff --git a/rust/kernel/configfs.rs b/rust/kernel/configfs.rs
index 34d0bea4f9a5..2736b798cdc6 100644
--- a/rust/kernel/configfs.rs
+++ b/rust/kernel/configfs.rs
@@ -17,7 +17,7 @@
//!
//! C header: [`include/linux/configfs.h`](srctree/include/linux/configfs.h)
//!
-//! # Example
+//! # Examples
//!
//! ```ignore
//! use kernel::alloc::flags;
@@ -151,7 +151,7 @@ impl<Data> Subsystem<Data> {
data: impl PinInit<Data, Error>,
) -> impl PinInit<Self, Error> {
try_pin_init!(Self {
- subsystem <- pin_init::zeroed().chain(
+ subsystem <- pin_init::init_zeroed().chain(
|place: &mut Opaque<bindings::configfs_subsystem>| {
// SAFETY: We initialized the required fields of `place.group` above.
unsafe {
@@ -261,7 +261,7 @@ impl<Data> Group<Data> {
data: impl PinInit<Data, Error>,
) -> impl PinInit<Self, Error> {
try_pin_init!(Self {
- group <- pin_init::zeroed().chain(|v: &mut Opaque<bindings::config_group>| {
+ group <- pin_init::init_zeroed().chain(|v: &mut Opaque<bindings::config_group>| {
let place = v.get();
let name = name.as_bytes_with_nul().as_ptr();
// SAFETY: It is safe to initialize a group once it has been zeroed.
@@ -279,7 +279,7 @@ impl<Data> Group<Data> {
// within the `group` field.
unsafe impl<Data> HasGroup<Data> for Group<Data> {
unsafe fn group(this: *const Self) -> *const bindings::config_group {
- Opaque::raw_get(
+ Opaque::cast_into(
// SAFETY: By impl and function safety requirements this field
// projection is within bounds of the allocation.
unsafe { &raw const (*this).group },
@@ -426,7 +426,7 @@ where
};
const fn vtable_ptr() -> *const bindings::configfs_group_operations {
- &Self::VTABLE as *const bindings::configfs_group_operations
+ &Self::VTABLE
}
}
@@ -464,7 +464,7 @@ where
};
const fn vtable_ptr() -> *const bindings::configfs_item_operations {
- &Self::VTABLE as *const bindings::configfs_item_operations
+ &Self::VTABLE
}
}
@@ -476,7 +476,7 @@ impl<Data> ItemOperationsVTable<Subsystem<Data>, Data> {
};
const fn vtable_ptr() -> *const bindings::configfs_item_operations {
- &Self::VTABLE as *const bindings::configfs_item_operations
+ &Self::VTABLE
}
}
@@ -561,7 +561,7 @@ where
let data: &Data = unsafe { get_group_data(c_group) };
// SAFETY: By function safety requirements, `page` is writable for `PAGE_SIZE`.
- let ret = O::show(data, unsafe { &mut *(page as *mut [u8; PAGE_SIZE]) });
+ let ret = O::show(data, unsafe { &mut *(page.cast::<[u8; PAGE_SIZE]>()) });
match ret {
Ok(size) => size as isize,
@@ -717,11 +717,7 @@ impl<const N: usize, Data> AttributeList<N, Data> {
// SAFETY: By function safety requirements, we have exclusive access to
// `self` and the reference created below will be exclusive.
- unsafe {
- (&mut *self.0.get())[I] = (attribute as *const Attribute<ID, O, Data>)
- .cast_mut()
- .cast()
- };
+ unsafe { (&mut *self.0.get())[I] = core::ptr::from_ref(attribute).cast_mut().cast() };
}
}
@@ -761,9 +757,7 @@ macro_rules! impl_item_type {
ct_owner: owner.as_ptr(),
ct_group_ops: GroupOperationsVTable::<Data, Child>::vtable_ptr().cast_mut(),
ct_item_ops: ItemOperationsVTable::<$tpe, Data>::vtable_ptr().cast_mut(),
- ct_attrs: (attributes as *const AttributeList<N, Data>)
- .cast_mut()
- .cast(),
+ ct_attrs: core::ptr::from_ref(attributes).cast_mut().cast(),
ct_bin_attrs: core::ptr::null_mut(),
}),
_p: PhantomData,
@@ -780,9 +774,7 @@ macro_rules! impl_item_type {
ct_owner: owner.as_ptr(),
ct_group_ops: core::ptr::null_mut(),
ct_item_ops: ItemOperationsVTable::<$tpe, Data>::vtable_ptr().cast_mut(),
- ct_attrs: (attributes as *const AttributeList<N, Data>)
- .cast_mut()
- .cast(),
+ ct_attrs: core::ptr::from_ref(attributes).cast_mut().cast(),
ct_bin_attrs: core::ptr::null_mut(),
}),
_p: PhantomData,
diff --git a/rust/kernel/cpufreq.rs b/rust/kernel/cpufreq.rs
index d0ea24236ae4..afc15e72a7c3 100644
--- a/rust/kernel/cpufreq.rs
+++ b/rust/kernel/cpufreq.rs
@@ -202,7 +202,7 @@ impl From<TableIndex> for usize {
/// The callers must ensure that the `struct cpufreq_frequency_table` is valid for access and
/// remains valid for the lifetime of the returned reference.
///
-/// ## Examples
+/// # Examples
///
/// The following example demonstrates how to read a frequency value from [`Table`].
///
@@ -318,7 +318,7 @@ impl Deref for TableBox {
///
/// This is used by the CPU frequency drivers to build a frequency table dynamically.
///
-/// ## Examples
+/// # Examples
///
/// The following example demonstrates how to create a CPU frequency table.
///
@@ -395,7 +395,7 @@ impl TableBuilder {
/// The callers must ensure that the `struct cpufreq_policy` is valid for access and remains valid
/// for the lifetime of the returned reference.
///
-/// ## Examples
+/// # Examples
///
/// The following example demonstrates how to create a CPU frequency table.
///
@@ -649,7 +649,7 @@ impl Policy {
fn set_data<T: ForeignOwnable>(&mut self, data: T) -> Result {
if self.as_ref().driver_data.is_null() {
// Transfer the ownership of the data to the foreign interface.
- self.as_mut_ref().driver_data = <T as ForeignOwnable>::into_foreign(data) as _;
+ self.as_mut_ref().driver_data = <T as ForeignOwnable>::into_foreign(data).cast();
Ok(())
} else {
Err(EBUSY)
@@ -834,7 +834,7 @@ pub trait Driver {
/// CPU frequency driver Registration.
///
-/// ## Examples
+/// # Examples
///
/// The following example demonstrates how to register a cpufreq driver.
///
diff --git a/rust/kernel/cpumask.rs b/rust/kernel/cpumask.rs
index e07f8ff5e3fd..3fcbff438670 100644
--- a/rust/kernel/cpumask.rs
+++ b/rust/kernel/cpumask.rs
@@ -27,7 +27,7 @@ use core::ops::{Deref, DerefMut};
/// The callers must ensure that the `struct cpumask` is valid for access and
/// remains valid for the lifetime of the returned reference.
///
-/// ## Examples
+/// # Examples
///
/// The following example demonstrates how to update a [`Cpumask`].
///
@@ -172,7 +172,7 @@ impl Cpumask {
/// The callers must ensure that the `struct cpumask_var_t` is valid for access and remains valid
/// for the lifetime of [`CpumaskVar`].
///
-/// ## Examples
+/// # Examples
///
/// The following example demonstrates how to create and update a [`CpumaskVar`].
///
diff --git a/rust/kernel/device.rs b/rust/kernel/device.rs
index ca82926fd67f..b8613289de8e 100644
--- a/rust/kernel/device.rs
+++ b/rust/kernel/device.rs
@@ -262,10 +262,10 @@ impl<Ctx: DeviceContext> Device<Ctx> {
#[cfg(CONFIG_PRINTK)]
unsafe {
bindings::_dev_printk(
- klevel as *const _ as *const crate::ffi::c_char,
+ klevel.as_ptr().cast::<crate::ffi::c_char>(),
self.as_raw(),
c_str!("%pA").as_char_ptr(),
- &msg as *const _ as *const crate::ffi::c_void,
+ core::ptr::from_ref(&msg).cast::<crate::ffi::c_void>(),
)
};
}
diff --git a/rust/kernel/device_id.rs b/rust/kernel/device_id.rs
index 8ed2c946144c..70d57814ff79 100644
--- a/rust/kernel/device_id.rs
+++ b/rust/kernel/device_id.rs
@@ -100,7 +100,7 @@ impl<T: RawDeviceId, U, const N: usize> IdArray<T, U, N> {
unsafe {
raw_ids[i]
.as_mut_ptr()
- .byte_offset(data_offset as _)
+ .byte_add(data_offset)
.cast::<usize>()
.write(i);
}
@@ -177,7 +177,7 @@ impl<T: RawDeviceId, U, const N: usize> IdTable<T, U> for IdArray<T, U, N> {
fn as_ptr(&self) -> *const T::RawType {
// This cannot be `self.ids.as_ptr()`, as the return pointer must have correct provenance
// to access the sentinel.
- (self as *const Self).cast()
+ core::ptr::from_ref(self).cast()
}
fn id(&self, index: usize) -> &T::RawType {
diff --git a/rust/kernel/devres.rs b/rust/kernel/devres.rs
index 152a89b78943..da18091143a6 100644
--- a/rust/kernel/devres.rs
+++ b/rust/kernel/devres.rs
@@ -49,7 +49,7 @@ struct Inner<T: Send> {
/// [`Devres`] users should make sure to simply free the corresponding backing resource in `T`'s
/// [`Drop`] implementation.
///
-/// # Example
+/// # Examples
///
/// ```no_run
/// # use kernel::{bindings, device::{Bound, Device}, devres::Devres, io::{Io, IoRaw}};
@@ -66,19 +66,19 @@ struct Inner<T: Send> {
/// unsafe fn new(paddr: usize) -> Result<Self>{
/// // SAFETY: By the safety requirements of this function [`paddr`, `paddr` + `SIZE`) is
/// // valid for `ioremap`.
-/// let addr = unsafe { bindings::ioremap(paddr as _, SIZE as _) };
+/// let addr = unsafe { bindings::ioremap(paddr as bindings::phys_addr_t, SIZE) };
/// if addr.is_null() {
/// return Err(ENOMEM);
/// }
///
-/// Ok(IoMem(IoRaw::new(addr as _, SIZE)?))
+/// Ok(IoMem(IoRaw::new(addr as usize, SIZE)?))
/// }
/// }
///
/// impl<const SIZE: usize> Drop for IoMem<SIZE> {
/// fn drop(&mut self) {
/// // SAFETY: `self.0.addr()` is guaranteed to be properly mapped by `Self::new`.
-/// unsafe { bindings::iounmap(self.0.addr() as _); };
+/// unsafe { bindings::iounmap(self.0.addr() as *mut c_void); };
/// }
/// }
///
@@ -219,7 +219,7 @@ impl<T: Send> Devres<T> {
/// An error is returned if `dev` does not match the same [`Device`] this [`Devres`] instance
/// has been created with.
///
- /// # Example
+ /// # Examples
///
/// ```no_run
/// # #![cfg(CONFIG_PCI)]
diff --git a/rust/kernel/dma.rs b/rust/kernel/dma.rs
index b320779ea26f..2bc8ab51ec28 100644
--- a/rust/kernel/dma.rs
+++ b/rust/kernel/dma.rs
@@ -180,7 +180,7 @@ pub struct Attrs(u32);
impl Attrs {
/// Get the raw representation of this attribute.
pub(crate) fn as_raw(self) -> crate::ffi::c_ulong {
- self.0 as _
+ self.0 as crate::ffi::c_ulong
}
/// Check whether `flags` is contained in `self`.
@@ -333,7 +333,7 @@ impl<T: AsBytes + FromBytes> CoherentAllocation<T> {
dev: dev.into(),
dma_handle,
count,
- cpu_addr: ret as *mut T,
+ cpu_addr: ret.cast::<T>(),
dma_attrs,
})
}
@@ -436,7 +436,7 @@ impl<T: AsBytes + FromBytes> CoherentAllocation<T> {
/// slice is live.
/// * Callers must ensure that this call does not race with a read or write to the same region
/// while the returned slice is live.
- pub unsafe fn as_slice_mut(&self, offset: usize, count: usize) -> Result<&mut [T]> {
+ pub unsafe fn as_slice_mut(&mut self, offset: usize, count: usize) -> Result<&mut [T]> {
self.validate_range(offset, count)?;
// SAFETY:
// - The pointer is valid due to type invariant on `CoherentAllocation`,
@@ -468,7 +468,7 @@ impl<T: AsBytes + FromBytes> CoherentAllocation<T> {
/// unsafe { alloc.write(buf, 0)?; }
/// # Ok::<(), Error>(()) }
/// ```
- pub unsafe fn write(&self, src: &[T], offset: usize) -> Result {
+ pub unsafe fn write(&mut self, src: &[T], offset: usize) -> Result {
self.validate_range(offset, src.len())?;
// SAFETY:
// - The pointer is valid due to type invariant on `CoherentAllocation`
@@ -556,7 +556,7 @@ impl<T: AsBytes + FromBytes> Drop for CoherentAllocation<T> {
bindings::dma_free_attrs(
self.dev.as_raw(),
size,
- self.cpu_addr as _,
+ self.cpu_addr.cast(),
self.dma_handle,
self.dma_attrs.as_raw(),
)
diff --git a/rust/kernel/drm/device.rs b/rust/kernel/drm/device.rs
index 32029fde55eb..3bb7c83966cf 100644
--- a/rust/kernel/drm/device.rs
+++ b/rust/kernel/drm/device.rs
@@ -83,13 +83,13 @@ impl<T: drm::Driver> Device<T> {
major: T::INFO.major,
minor: T::INFO.minor,
patchlevel: T::INFO.patchlevel,
- name: T::INFO.name.as_char_ptr() as *mut _,
- desc: T::INFO.desc.as_char_ptr() as *mut _,
+ name: T::INFO.name.as_char_ptr().cast_mut(),
+ desc: T::INFO.desc.as_char_ptr().cast_mut(),
driver_features: drm::driver::FEAT_GEM,
ioctls: T::IOCTLS.as_ptr(),
num_ioctls: T::IOCTLS.len() as i32,
- fops: &Self::GEM_FOPS as _,
+ fops: &Self::GEM_FOPS,
};
const GEM_FOPS: bindings::file_operations = drm::gem::create_fops();
@@ -135,11 +135,9 @@ impl<T: drm::Driver> Device<T> {
///
/// `ptr` must be a valid pointer to a `struct device` embedded in `Self`.
unsafe fn from_drm_device(ptr: *const bindings::drm_device) -> *mut Self {
- let ptr: *const Opaque<bindings::drm_device> = ptr.cast();
-
// SAFETY: By the safety requirements of this function `ptr` is a valid pointer to a
// `struct drm_device` embedded in `Self`.
- unsafe { crate::container_of!(ptr, Self, dev) }.cast_mut()
+ unsafe { crate::container_of!(Opaque::cast_from(ptr), Self, dev) }.cast_mut()
}
/// Not intended to be called externally, except via declare_drm_ioctls!()
diff --git a/rust/kernel/drm/gem/mod.rs b/rust/kernel/drm/gem/mod.rs
index a24c9a2fc201..b71821cfb5ea 100644
--- a/rust/kernel/drm/gem/mod.rs
+++ b/rust/kernel/drm/gem/mod.rs
@@ -125,11 +125,9 @@ impl<T: DriverObject> IntoGEMObject for Object<T> {
}
unsafe fn from_raw<'a>(self_ptr: *mut bindings::drm_gem_object) -> &'a Self {
- let self_ptr: *mut Opaque<bindings::drm_gem_object> = self_ptr.cast();
-
// SAFETY: `obj` is guaranteed to be in an `Object<T>` via the safety contract of this
// function
- unsafe { &*crate::container_of!(self_ptr, Object<T>, obj) }
+ unsafe { &*crate::container_of!(Opaque::cast_from(self_ptr), Object<T>, obj) }
}
}
diff --git a/rust/kernel/error.rs b/rust/kernel/error.rs
index 083c7b068cf4..a41de293dcd1 100644
--- a/rust/kernel/error.rs
+++ b/rust/kernel/error.rs
@@ -6,10 +6,10 @@
use crate::{
alloc::{layout::LayoutError, AllocError},
+ fmt,
str::CStr,
};
-use core::fmt;
use core::num::NonZeroI32;
use core::num::TryFromIntError;
use core::str::Utf8Error;
@@ -154,7 +154,7 @@ impl Error {
/// Returns the error encoded as a pointer.
pub fn to_ptr<T>(self) -> *mut T {
// SAFETY: `self.0` is a valid error due to its invariant.
- unsafe { bindings::ERR_PTR(self.0.get() as _) as *mut _ }
+ unsafe { bindings::ERR_PTR(self.0.get() as crate::ffi::c_long).cast() }
}
/// Returns a string representing the error, if one exists.
@@ -189,7 +189,7 @@ impl fmt::Debug for Error {
Some(name) => f
.debug_tuple(
// SAFETY: These strings are ASCII-only.
- unsafe { core::str::from_utf8_unchecked(name) },
+ unsafe { core::str::from_utf8_unchecked(name.to_bytes()) },
)
.finish(),
}
@@ -220,8 +220,8 @@ impl From<LayoutError> for Error {
}
}
-impl From<core::fmt::Error> for Error {
- fn from(_: core::fmt::Error) -> Error {
+impl From<fmt::Error> for Error {
+ fn from(_: fmt::Error) -> Error {
code::EINVAL
}
}
diff --git a/rust/kernel/firmware.rs b/rust/kernel/firmware.rs
index 4fe621f35716..1abab5b2f052 100644
--- a/rust/kernel/firmware.rs
+++ b/rust/kernel/firmware.rs
@@ -62,10 +62,11 @@ impl Firmware {
fn request_internal(name: &CStr, dev: &Device, func: FwFunc) -> Result<Self> {
let mut fw: *mut bindings::firmware = core::ptr::null_mut();
let pfw: *mut *mut bindings::firmware = &mut fw;
+ let pfw: *mut *const bindings::firmware = pfw.cast();
// SAFETY: `pfw` is a valid pointer to a NULL initialized `bindings::firmware` pointer.
// `name` and `dev` are valid as by their type invariants.
- let ret = unsafe { func.0(pfw as _, name.as_char_ptr(), dev.as_raw()) };
+ let ret = unsafe { func.0(pfw, name.as_char_ptr(), dev.as_raw()) };
if ret != 0 {
return Err(Error::from_errno(ret));
}
@@ -139,7 +140,7 @@ unsafe impl Sync for Firmware {}
/// Typically, such contracts would be enforced by a trait, however traits do not (yet) support
/// const functions.
///
-/// # Example
+/// # Examples
///
/// ```
/// # mod module_firmware_test {
@@ -181,7 +182,7 @@ unsafe impl Sync for Firmware {}
/// module! {
/// type: MyModule,
/// name: "module_firmware_test",
-/// author: "Rust for Linux",
+/// authors: ["Rust for Linux"],
/// description: "module_firmware! test module",
/// license: "GPL",
/// }
@@ -261,7 +262,7 @@ impl<const N: usize> ModInfoBuilder<N> {
/// Append path components to the [`ModInfoBuilder`] instance. Paths need to be separated
/// with [`ModInfoBuilder::new_entry`].
///
- /// # Example
+ /// # Examples
///
/// ```
/// use kernel::firmware::ModInfoBuilder;
diff --git a/rust/kernel/fmt.rs b/rust/kernel/fmt.rs
new file mode 100644
index 000000000000..0306e8388968
--- /dev/null
+++ b/rust/kernel/fmt.rs
@@ -0,0 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Formatting utilities.
+//!
+//! This module is intended to be used in place of `core::fmt` in kernel code.
+
+pub use core::fmt::{Arguments, Debug, Display, Error, Formatter, Result, Write};
diff --git a/rust/kernel/fs/file.rs b/rust/kernel/fs/file.rs
index 72d84fb0e266..35fd5db35c46 100644
--- a/rust/kernel/fs/file.rs
+++ b/rust/kernel/fs/file.rs
@@ -366,7 +366,7 @@ impl core::ops::Deref for File {
//
// By the type invariants, there are no `fdget_pos` calls that did not take the
// `f_pos_lock` mutex.
- unsafe { LocalFile::from_raw_file(self as *const File as *const bindings::file) }
+ unsafe { LocalFile::from_raw_file(core::ptr::from_ref(self).cast()) }
}
}
diff --git a/rust/kernel/generated_arch_reachable_asm.rs.S b/rust/kernel/generated_arch_reachable_asm.rs.S
new file mode 100644
index 000000000000..3886a9ad3a99
--- /dev/null
+++ b/rust/kernel/generated_arch_reachable_asm.rs.S
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#include <linux/bug.h>
+
+// Cut here.
+
+::kernel::concat_literals!(ARCH_WARN_REACHABLE)
diff --git a/rust/kernel/generated_arch_warn_asm.rs.S b/rust/kernel/generated_arch_warn_asm.rs.S
new file mode 100644
index 000000000000..409eb4c2d3a1
--- /dev/null
+++ b/rust/kernel/generated_arch_warn_asm.rs.S
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#include <linux/bug.h>
+
+// Cut here.
+
+::kernel::concat_literals!(ARCH_WARN_ASM("{file}", "{line}", "{flags}", "{size}"))
diff --git a/rust/kernel/init.rs b/rust/kernel/init.rs
index 21ef202ab0db..4949047af8d7 100644
--- a/rust/kernel/init.rs
+++ b/rust/kernel/init.rs
@@ -29,15 +29,15 @@
//!
//! ## General Examples
//!
-//! ```rust,ignore
-//! # #![allow(clippy::disallowed_names)]
+//! ```rust
+//! # #![expect(clippy::disallowed_names, clippy::undocumented_unsafe_blocks)]
//! use kernel::types::Opaque;
//! use pin_init::pin_init_from_closure;
//!
//! // assume we have some `raw_foo` type in C:
//! #[repr(C)]
//! struct RawFoo([u8; 16]);
-//! extern {
+//! extern "C" {
//! fn init_foo(_: *mut RawFoo);
//! }
//!
@@ -66,25 +66,17 @@
//! });
//! ```
//!
-//! ```rust,ignore
-//! # #![allow(unreachable_pub, clippy::disallowed_names)]
+//! ```rust
+//! # #![expect(unreachable_pub, clippy::disallowed_names)]
//! use kernel::{prelude::*, types::Opaque};
//! use core::{ptr::addr_of_mut, marker::PhantomPinned, pin::Pin};
//! # mod bindings {
-//! # #![allow(non_camel_case_types)]
+//! # #![expect(non_camel_case_types, clippy::missing_safety_doc)]
//! # pub struct foo;
//! # pub unsafe fn init_foo(_ptr: *mut foo) {}
//! # pub unsafe fn destroy_foo(_ptr: *mut foo) {}
//! # pub unsafe fn enable_foo(_ptr: *mut foo, _flags: u32) -> i32 { 0 }
//! # }
-//! # // `Error::from_errno` is `pub(crate)` in the `kernel` crate, thus provide a workaround.
-//! # trait FromErrno {
-//! # fn from_errno(errno: core::ffi::c_int) -> Error {
-//! # // Dummy error that can be constructed outside the `kernel` crate.
-//! # Error::from(core::fmt::Error)
-//! # }
-//! # }
-//! # impl FromErrno for Error {}
//! /// # Invariants
//! ///
//! /// `foo` is always initialized
@@ -108,13 +100,13 @@
//! let foo = addr_of_mut!((*slot).foo);
//!
//! // Initialize the `foo`
-//! bindings::init_foo(Opaque::raw_get(foo));
+//! bindings::init_foo(Opaque::cast_into(foo));
//!
//! // Try to enable it.
-//! let err = bindings::enable_foo(Opaque::raw_get(foo), flags);
+//! let err = bindings::enable_foo(Opaque::cast_into(foo), flags);
//! if err != 0 {
//! // Enabling has failed, first clean up the foo and then return the error.
-//! bindings::destroy_foo(Opaque::raw_get(foo));
+//! bindings::destroy_foo(Opaque::cast_into(foo));
//! return Err(Error::from_errno(err));
//! }
//!
@@ -206,7 +198,7 @@ pub trait InPlaceInit<T>: Sized {
///
/// ```rust
/// use kernel::error::Error;
-/// use pin_init::zeroed;
+/// use pin_init::init_zeroed;
/// struct BigBuf {
/// big: KBox<[u8; 1024 * 1024 * 1024]>,
/// small: [u8; 1024 * 1024],
@@ -215,7 +207,7 @@ pub trait InPlaceInit<T>: Sized {
/// impl BigBuf {
/// fn new() -> impl Init<Self, Error> {
/// try_init!(Self {
-/// big: KBox::init(zeroed(), GFP_KERNEL)?,
+/// big: KBox::init(init_zeroed(), GFP_KERNEL)?,
/// small: [0; 1024 * 1024],
/// }? Error)
/// }
@@ -264,7 +256,7 @@ macro_rules! try_init {
/// ```rust
/// # #![feature(new_uninit)]
/// use kernel::error::Error;
-/// use pin_init::zeroed;
+/// use pin_init::init_zeroed;
/// #[pin_data]
/// struct BigBuf {
/// big: KBox<[u8; 1024 * 1024 * 1024]>,
@@ -275,7 +267,7 @@ macro_rules! try_init {
/// impl BigBuf {
/// fn new() -> impl PinInit<Self, Error> {
/// try_pin_init!(Self {
-/// big: KBox::init(zeroed(), GFP_KERNEL)?,
+/// big: KBox::init(init_zeroed(), GFP_KERNEL)?,
/// small: [0; 1024 * 1024],
/// ptr: core::ptr::null_mut(),
/// }? Error)
diff --git a/rust/kernel/io.rs b/rust/kernel/io.rs
index b7fc759f8b5d..03b467722b86 100644
--- a/rust/kernel/io.rs
+++ b/rust/kernel/io.rs
@@ -5,7 +5,7 @@
//! C header: [`include/asm-generic/io.h`](srctree/include/asm-generic/io.h)
use crate::error::{code::EINVAL, Result};
-use crate::{bindings, build_assert};
+use crate::{bindings, build_assert, ffi::c_void};
pub mod mem;
pub mod resource;
@@ -48,7 +48,7 @@ impl<const SIZE: usize> IoRaw<SIZE> {
}
}
-/// IO-mapped memory, starting at the base address @addr and spanning @maxlen bytes.
+/// IO-mapped memory region.
///
/// The creator (usually a subsystem / bus such as PCI) is responsible for creating the
/// mapping, performing an additional region request etc.
@@ -61,7 +61,7 @@ impl<const SIZE: usize> IoRaw<SIZE> {
/// # Examples
///
/// ```no_run
-/// # use kernel::{bindings, io::{Io, IoRaw}};
+/// # use kernel::{bindings, ffi::c_void, io::{Io, IoRaw}};
/// # use core::ops::Deref;
///
/// // See also [`pci::Bar`] for a real example.
@@ -75,19 +75,19 @@ impl<const SIZE: usize> IoRaw<SIZE> {
/// unsafe fn new(paddr: usize) -> Result<Self>{
/// // SAFETY: By the safety requirements of this function [`paddr`, `paddr` + `SIZE`) is
/// // valid for `ioremap`.
-/// let addr = unsafe { bindings::ioremap(paddr as _, SIZE as _) };
+/// let addr = unsafe { bindings::ioremap(paddr as bindings::phys_addr_t, SIZE) };
/// if addr.is_null() {
/// return Err(ENOMEM);
/// }
///
-/// Ok(IoMem(IoRaw::new(addr as _, SIZE)?))
+/// Ok(IoMem(IoRaw::new(addr as usize, SIZE)?))
/// }
/// }
///
/// impl<const SIZE: usize> Drop for IoMem<SIZE> {
/// fn drop(&mut self) {
/// // SAFETY: `self.0.addr()` is guaranteed to be properly mapped by `Self::new`.
-/// unsafe { bindings::iounmap(self.0.addr() as _); };
+/// unsafe { bindings::iounmap(self.0.addr() as *mut c_void); };
/// }
/// }
///
@@ -124,7 +124,7 @@ macro_rules! define_read {
let addr = self.io_addr_assert::<$type_name>(offset);
// SAFETY: By the type invariant `addr` is a valid address for MMIO operations.
- unsafe { bindings::$c_fn(addr as _) }
+ unsafe { bindings::$c_fn(addr as *const c_void) }
}
/// Read IO data from a given offset.
@@ -136,7 +136,7 @@ macro_rules! define_read {
let addr = self.io_addr::<$type_name>(offset)?;
// SAFETY: By the type invariant `addr` is a valid address for MMIO operations.
- Ok(unsafe { bindings::$c_fn(addr as _) })
+ Ok(unsafe { bindings::$c_fn(addr as *const c_void) })
}
};
}
@@ -153,7 +153,7 @@ macro_rules! define_write {
let addr = self.io_addr_assert::<$type_name>(offset);
// SAFETY: By the type invariant `addr` is a valid address for MMIO operations.
- unsafe { bindings::$c_fn(value, addr as _, ) }
+ unsafe { bindings::$c_fn(value, addr as *mut c_void) }
}
/// Write IO data from a given offset.
@@ -165,7 +165,7 @@ macro_rules! define_write {
let addr = self.io_addr::<$type_name>(offset)?;
// SAFETY: By the type invariant `addr` is a valid address for MMIO operations.
- unsafe { bindings::$c_fn(value, addr as _) }
+ unsafe { bindings::$c_fn(value, addr as *mut c_void) }
Ok(())
}
};
diff --git a/rust/kernel/kunit.rs b/rust/kernel/kunit.rs
index b9e65905e121..41efd87595d6 100644
--- a/rust/kernel/kunit.rs
+++ b/rust/kernel/kunit.rs
@@ -7,7 +7,10 @@
//! Reference: <https://docs.kernel.org/dev-tools/kunit/index.html>
use crate::prelude::*;
-use core::{ffi::c_void, fmt};
+use core::fmt;
+
+#[cfg(CONFIG_PRINTK)]
+use crate::c_str;
/// Prints a KUnit error-level message.
///
@@ -19,8 +22,8 @@ pub fn err(args: fmt::Arguments<'_>) {
#[cfg(CONFIG_PRINTK)]
unsafe {
bindings::_printk(
- c"\x013%pA".as_ptr() as _,
- &args as *const _ as *const c_void,
+ c_str!("\x013%pA").as_char_ptr(),
+ core::ptr::from_ref(&args).cast::<c_void>(),
);
}
}
@@ -35,8 +38,8 @@ pub fn info(args: fmt::Arguments<'_>) {
#[cfg(CONFIG_PRINTK)]
unsafe {
bindings::_printk(
- c"\x016%pA".as_ptr() as _,
- &args as *const _ as *const c_void,
+ c_str!("\x016%pA").as_char_ptr(),
+ core::ptr::from_ref(&args).cast::<c_void>(),
);
}
}
diff --git a/rust/kernel/lib.rs b/rust/kernel/lib.rs
index c2d1b9375205..ed53169e795c 100644
--- a/rust/kernel/lib.rs
+++ b/rust/kernel/lib.rs
@@ -62,8 +62,10 @@ pub mod acpi;
pub mod alloc;
#[cfg(CONFIG_AUXILIARY_BUS)]
pub mod auxiliary;
+pub mod bits;
#[cfg(CONFIG_BLOCK)]
pub mod block;
+pub mod bug;
#[doc(hidden)]
pub mod build_assert;
pub mod clk;
@@ -85,6 +87,7 @@ pub mod error;
pub mod faux;
#[cfg(CONFIG_RUST_FW_LOADER_ABSTRACTIONS)]
pub mod firmware;
+pub mod fmt;
pub mod fs;
pub mod init;
pub mod io;
@@ -213,6 +216,13 @@ fn panic(info: &core::panic::PanicInfo<'_>) -> ! {
/// Produces a pointer to an object from a pointer to one of its fields.
///
+/// If you encounter a type mismatch due to the [`Opaque`] type, then use [`Opaque::cast_into`] or
+/// [`Opaque::cast_from`] to resolve the mismatch.
+///
+/// [`Opaque`]: crate::types::Opaque
+/// [`Opaque::cast_into`]: crate::types::Opaque::cast_into
+/// [`Opaque::cast_from`]: crate::types::Opaque::cast_from
+///
/// # Safety
///
/// The pointer passed to this macro, and the pointer returned by this macro, must both be in
diff --git a/rust/kernel/list.rs b/rust/kernel/list.rs
index c391c30b80f8..44e5219cfcbc 100644
--- a/rust/kernel/list.rs
+++ b/rust/kernel/list.rs
@@ -57,14 +57,11 @@ pub use self::arc_field::{define_list_arc_field_getter, ListArcField};
/// }
/// }
///
-/// impl_has_list_links! {
-/// impl HasListLinks<0> for BasicItem { self.links }
-/// }
/// impl_list_arc_safe! {
/// impl ListArcSafe<0> for BasicItem { untracked; }
/// }
/// impl_list_item! {
-/// impl ListItem<0> for BasicItem { using ListLinks; }
+/// impl ListItem<0> for BasicItem { using ListLinks { self.links }; }
/// }
///
/// // Create a new empty list.
@@ -82,9 +79,9 @@ pub use self::arc_field::{define_list_arc_field_getter, ListArcField};
/// // [15, 10, 30]
/// {
/// let mut iter = list.iter();
-/// assert_eq!(iter.next().unwrap().value, 15);
-/// assert_eq!(iter.next().unwrap().value, 10);
-/// assert_eq!(iter.next().unwrap().value, 30);
+/// assert_eq!(iter.next().ok_or(EINVAL)?.value, 15);
+/// assert_eq!(iter.next().ok_or(EINVAL)?.value, 10);
+/// assert_eq!(iter.next().ok_or(EINVAL)?.value, 30);
/// assert!(iter.next().is_none());
///
/// // Verify the length of the list.
@@ -93,9 +90,9 @@ pub use self::arc_field::{define_list_arc_field_getter, ListArcField};
///
/// // Pop the items from the list using `pop_back()` and verify the content.
/// {
-/// assert_eq!(list.pop_back().unwrap().value, 30);
-/// assert_eq!(list.pop_back().unwrap().value, 10);
-/// assert_eq!(list.pop_back().unwrap().value, 15);
+/// assert_eq!(list.pop_back().ok_or(EINVAL)?.value, 30);
+/// assert_eq!(list.pop_back().ok_or(EINVAL)?.value, 10);
+/// assert_eq!(list.pop_back().ok_or(EINVAL)?.value, 15);
/// }
///
/// // Insert 3 elements using `push_front()`.
@@ -107,9 +104,9 @@ pub use self::arc_field::{define_list_arc_field_getter, ListArcField};
/// // [30, 10, 15]
/// {
/// let mut iter = list.iter();
-/// assert_eq!(iter.next().unwrap().value, 30);
-/// assert_eq!(iter.next().unwrap().value, 10);
-/// assert_eq!(iter.next().unwrap().value, 15);
+/// assert_eq!(iter.next().ok_or(EINVAL)?.value, 30);
+/// assert_eq!(iter.next().ok_or(EINVAL)?.value, 10);
+/// assert_eq!(iter.next().ok_or(EINVAL)?.value, 15);
/// assert!(iter.next().is_none());
///
/// // Verify the length of the list.
@@ -118,8 +115,8 @@ pub use self::arc_field::{define_list_arc_field_getter, ListArcField};
///
/// // Pop the items from the list using `pop_front()` and verify the content.
/// {
-/// assert_eq!(list.pop_front().unwrap().value, 30);
-/// assert_eq!(list.pop_front().unwrap().value, 10);
+/// assert_eq!(list.pop_front().ok_or(EINVAL)?.value, 30);
+/// assert_eq!(list.pop_front().ok_or(EINVAL)?.value, 10);
/// }
///
/// // Push `list2` to `list` through `push_all_back()`.
@@ -135,9 +132,9 @@ pub use self::arc_field::{define_list_arc_field_getter, ListArcField};
/// // list: [15, 25, 35]
/// // list2: []
/// let mut iter = list.iter();
-/// assert_eq!(iter.next().unwrap().value, 15);
-/// assert_eq!(iter.next().unwrap().value, 25);
-/// assert_eq!(iter.next().unwrap().value, 35);
+/// assert_eq!(iter.next().ok_or(EINVAL)?.value, 15);
+/// assert_eq!(iter.next().ok_or(EINVAL)?.value, 25);
+/// assert_eq!(iter.next().ok_or(EINVAL)?.value, 35);
/// assert!(iter.next().is_none());
/// assert!(list2.is_empty());
/// }
@@ -284,7 +281,7 @@ impl<const ID: u64> ListLinks<ID> {
#[inline]
unsafe fn fields(me: *mut Self) -> *mut ListLinksFields {
// SAFETY: The caller promises that the pointer is valid.
- unsafe { Opaque::raw_get(ptr::addr_of!((*me).inner)) }
+ unsafe { Opaque::cast_into(ptr::addr_of!((*me).inner)) }
}
/// # Safety
@@ -320,9 +317,6 @@ unsafe impl<T: ?Sized + Send, const ID: u64> Send for ListLinksSelfPtr<T, ID> {}
unsafe impl<T: ?Sized + Sync, const ID: u64> Sync for ListLinksSelfPtr<T, ID> {}
impl<T: ?Sized, const ID: u64> ListLinksSelfPtr<T, ID> {
- /// The offset from the [`ListLinks`] to the self pointer field.
- pub const LIST_LINKS_SELF_PTR_OFFSET: usize = core::mem::offset_of!(Self, self_ptr);
-
/// Creates a new initializer for this type.
pub fn new() -> impl PinInit<Self> {
// INVARIANT: Pin-init initializers can't be used on an existing `Arc`, so this value will
@@ -337,6 +331,16 @@ impl<T: ?Sized, const ID: u64> ListLinksSelfPtr<T, ID> {
self_ptr: Opaque::uninit(),
}
}
+
+ /// Returns a pointer to the self pointer.
+ ///
+ /// # Safety
+ ///
+ /// The provided pointer must point at a valid struct of type `Self`.
+ pub unsafe fn raw_get_self_ptr(me: *const Self) -> *const Opaque<*const T> {
+ // SAFETY: The caller promises that the pointer is valid.
+ unsafe { ptr::addr_of!((*me).self_ptr) }
+ }
}
impl<T: ?Sized + ListItem<ID>, const ID: u64> List<T, ID> {
@@ -711,14 +715,11 @@ impl<'a, T: ?Sized + ListItem<ID>, const ID: u64> Iterator for Iter<'a, T, ID> {
/// }
/// }
///
-/// kernel::list::impl_has_list_links! {
-/// impl HasListLinks<0> for ListItem { self.links }
-/// }
/// kernel::list::impl_list_arc_safe! {
/// impl ListArcSafe<0> for ListItem { untracked; }
/// }
/// kernel::list::impl_list_item! {
-/// impl ListItem<0> for ListItem { using ListLinks; }
+/// impl ListItem<0> for ListItem { using ListLinks { self.links }; }
/// }
///
/// // Use a cursor to remove the first element with the given value.
@@ -809,11 +810,11 @@ impl<'a, T: ?Sized + ListItem<ID>, const ID: u64> Iterator for Iter<'a, T, ID> {
/// merge_sorted(&mut list, list2);
///
/// let mut items = list.into_iter();
-/// assert_eq!(items.next().unwrap().value, 10);
-/// assert_eq!(items.next().unwrap().value, 11);
-/// assert_eq!(items.next().unwrap().value, 12);
-/// assert_eq!(items.next().unwrap().value, 13);
-/// assert_eq!(items.next().unwrap().value, 14);
+/// assert_eq!(items.next().ok_or(EINVAL)?.value, 10);
+/// assert_eq!(items.next().ok_or(EINVAL)?.value, 11);
+/// assert_eq!(items.next().ok_or(EINVAL)?.value, 12);
+/// assert_eq!(items.next().ok_or(EINVAL)?.value, 13);
+/// assert_eq!(items.next().ok_or(EINVAL)?.value, 14);
/// assert!(items.next().is_none());
/// # Result::<(), Error>::Ok(())
/// ```
diff --git a/rust/kernel/list/impl_list_item_mod.rs b/rust/kernel/list/impl_list_item_mod.rs
index a0438537cee1..202bc6f97c13 100644
--- a/rust/kernel/list/impl_list_item_mod.rs
+++ b/rust/kernel/list/impl_list_item_mod.rs
@@ -4,60 +4,48 @@
//! Helpers for implementing list traits safely.
-use crate::list::ListLinks;
-
-/// Declares that this type has a `ListLinks<ID>` field at a fixed offset.
+/// Declares that this type has a [`ListLinks<ID>`] field.
///
-/// This trait is only used to help implement `ListItem` safely. If `ListItem` is implemented
+/// This trait is only used to help implement [`ListItem`] safely. If [`ListItem`] is implemented
/// manually, then this trait is not needed. Use the [`impl_has_list_links!`] macro to implement
/// this trait.
///
/// # Safety
///
-/// All values of this type must have a `ListLinks<ID>` field at the given offset.
+/// The methods on this trait must have exactly the behavior that the definitions given below have.
///
-/// The behavior of `raw_get_list_links` must not be changed.
+/// [`ListLinks<ID>`]: crate::list::ListLinks
+/// [`ListItem`]: crate::list::ListItem
pub unsafe trait HasListLinks<const ID: u64 = 0> {
- /// The offset of the `ListLinks` field.
- const OFFSET: usize;
-
- /// Returns a pointer to the [`ListLinks<T, ID>`] field.
+ /// Returns a pointer to the [`ListLinks<ID>`] field.
///
/// # Safety
///
/// The provided pointer must point at a valid struct of type `Self`.
///
- /// [`ListLinks<T, ID>`]: ListLinks
- // We don't really need this method, but it's necessary for the implementation of
- // `impl_has_list_links!` to be correct.
- #[inline]
- unsafe fn raw_get_list_links(ptr: *mut Self) -> *mut ListLinks<ID> {
- // SAFETY: The caller promises that the pointer is valid. The implementer promises that the
- // `OFFSET` constant is correct.
- unsafe { (ptr as *mut u8).add(Self::OFFSET) as *mut ListLinks<ID> }
- }
+ /// [`ListLinks<ID>`]: crate::list::ListLinks
+ unsafe fn raw_get_list_links(ptr: *mut Self) -> *mut crate::list::ListLinks<ID>;
}
/// Implements the [`HasListLinks`] trait for the given type.
#[macro_export]
macro_rules! impl_has_list_links {
- ($(impl$(<$($implarg:ident),*>)?
+ ($(impl$({$($generics:tt)*})?
HasListLinks$(<$id:tt>)?
- for $self:ident $(<$($selfarg:ty),*>)?
+ for $self:ty
{ self$(.$field:ident)* }
)*) => {$(
// SAFETY: The implementation of `raw_get_list_links` only compiles if the field has the
// right type.
- //
- // The behavior of `raw_get_list_links` is not changed since the `addr_of_mut!` macro is
- // equivalent to the pointer offset operation in the trait definition.
- unsafe impl$(<$($implarg),*>)? $crate::list::HasListLinks$(<$id>)? for
- $self $(<$($selfarg),*>)?
- {
- const OFFSET: usize = ::core::mem::offset_of!(Self, $($field).*) as usize;
-
+ unsafe impl$(<$($generics)*>)? $crate::list::HasListLinks$(<$id>)? for $self {
#[inline]
unsafe fn raw_get_list_links(ptr: *mut Self) -> *mut $crate::list::ListLinks$(<$id>)? {
+ // Statically ensure that `$(.field)*` doesn't follow any pointers.
+ //
+ // Cannot be `const` because `$self` may contain generics and E0401 says constants
+ // "can't use {`Self`,generic parameters} from outer item".
+ if false { let _: usize = ::core::mem::offset_of!(Self, $($field).*); }
+
// SAFETY: The caller promises that the pointer is not dangling. We know that this
// expression doesn't follow any pointers, as the `offset_of!` invocation above
// would otherwise not compile.
@@ -68,12 +56,16 @@ macro_rules! impl_has_list_links {
}
pub use impl_has_list_links;
-/// Declares that the `ListLinks<ID>` field in this struct is inside a `ListLinksSelfPtr<T, ID>`.
+/// Declares that the [`ListLinks<ID>`] field in this struct is inside a
+/// [`ListLinksSelfPtr<T, ID>`].
///
/// # Safety
///
-/// The `ListLinks<ID>` field of this struct at the offset `HasListLinks<ID>::OFFSET` must be
-/// inside a `ListLinksSelfPtr<T, ID>`.
+/// The [`ListLinks<ID>`] field of this struct at [`HasListLinks<ID>::raw_get_list_links`] must be
+/// inside a [`ListLinksSelfPtr<T, ID>`].
+///
+/// [`ListLinks<ID>`]: crate::list::ListLinks
+/// [`ListLinksSelfPtr<T, ID>`]: crate::list::ListLinksSelfPtr
pub unsafe trait HasSelfPtr<T: ?Sized, const ID: u64 = 0>
where
Self: HasListLinks<ID>,
@@ -83,27 +75,21 @@ where
/// Implements the [`HasListLinks`] and [`HasSelfPtr`] traits for the given type.
#[macro_export]
macro_rules! impl_has_list_links_self_ptr {
- ($(impl$({$($implarg:tt)*})?
+ ($(impl$({$($generics:tt)*})?
HasSelfPtr<$item_type:ty $(, $id:tt)?>
- for $self:ident $(<$($selfarg:ty),*>)?
- { self.$field:ident }
+ for $self:ty
+ { self$(.$field:ident)* }
)*) => {$(
// SAFETY: The implementation of `raw_get_list_links` only compiles if the field has the
// right type.
- unsafe impl$(<$($implarg)*>)? $crate::list::HasSelfPtr<$item_type $(, $id)?> for
- $self $(<$($selfarg),*>)?
- {}
-
- unsafe impl$(<$($implarg)*>)? $crate::list::HasListLinks$(<$id>)? for
- $self $(<$($selfarg),*>)?
- {
- const OFFSET: usize = ::core::mem::offset_of!(Self, $field) as usize;
+ unsafe impl$(<$($generics)*>)? $crate::list::HasSelfPtr<$item_type $(, $id)?> for $self {}
+ unsafe impl$(<$($generics)*>)? $crate::list::HasListLinks$(<$id>)? for $self {
#[inline]
unsafe fn raw_get_list_links(ptr: *mut Self) -> *mut $crate::list::ListLinks$(<$id>)? {
// SAFETY: The caller promises that the pointer is not dangling.
let ptr: *mut $crate::list::ListLinksSelfPtr<$item_type $(, $id)?> =
- unsafe { ::core::ptr::addr_of_mut!((*ptr).$field) };
+ unsafe { ::core::ptr::addr_of_mut!((*ptr)$(.$field)*) };
ptr.cast()
}
}
@@ -117,15 +103,95 @@ pub use impl_has_list_links_self_ptr;
/// implement that trait.
///
/// [`ListItem`]: crate::list::ListItem
+///
+/// # Examples
+///
+/// ```
+/// #[pin_data]
+/// struct SimpleListItem {
+/// value: u32,
+/// #[pin]
+/// links: kernel::list::ListLinks,
+/// }
+///
+/// kernel::list::impl_list_arc_safe! {
+/// impl ListArcSafe<0> for SimpleListItem { untracked; }
+/// }
+///
+/// kernel::list::impl_list_item! {
+/// impl ListItem<0> for SimpleListItem { using ListLinks { self.links }; }
+/// }
+///
+/// struct ListLinksHolder {
+/// inner: kernel::list::ListLinks,
+/// }
+///
+/// #[pin_data]
+/// struct ComplexListItem<T, U> {
+/// value: Result<T, U>,
+/// #[pin]
+/// links: ListLinksHolder,
+/// }
+///
+/// kernel::list::impl_list_arc_safe! {
+/// impl{T, U} ListArcSafe<0> for ComplexListItem<T, U> { untracked; }
+/// }
+///
+/// kernel::list::impl_list_item! {
+/// impl{T, U} ListItem<0> for ComplexListItem<T, U> { using ListLinks { self.links.inner }; }
+/// }
+/// ```
+///
+/// ```
+/// #[pin_data]
+/// struct SimpleListItem {
+/// value: u32,
+/// #[pin]
+/// links: kernel::list::ListLinksSelfPtr<SimpleListItem>,
+/// }
+///
+/// kernel::list::impl_list_arc_safe! {
+/// impl ListArcSafe<0> for SimpleListItem { untracked; }
+/// }
+///
+/// kernel::list::impl_list_item! {
+/// impl ListItem<0> for SimpleListItem { using ListLinksSelfPtr { self.links }; }
+/// }
+///
+/// struct ListLinksSelfPtrHolder<T, U> {
+/// inner: kernel::list::ListLinksSelfPtr<ComplexListItem<T, U>>,
+/// }
+///
+/// #[pin_data]
+/// struct ComplexListItem<T, U> {
+/// value: Result<T, U>,
+/// #[pin]
+/// links: ListLinksSelfPtrHolder<T, U>,
+/// }
+///
+/// kernel::list::impl_list_arc_safe! {
+/// impl{T, U} ListArcSafe<0> for ComplexListItem<T, U> { untracked; }
+/// }
+///
+/// kernel::list::impl_list_item! {
+/// impl{T, U} ListItem<0> for ComplexListItem<T, U> {
+/// using ListLinksSelfPtr { self.links.inner };
+/// }
+/// }
+/// ```
#[macro_export]
macro_rules! impl_list_item {
(
- $(impl$({$($generics:tt)*})? ListItem<$num:tt> for $t:ty {
- using ListLinks;
+ $(impl$({$($generics:tt)*})? ListItem<$num:tt> for $self:ty {
+ using ListLinks { self$(.$field:ident)* };
})*
) => {$(
+ $crate::list::impl_has_list_links! {
+ impl$({$($generics)*})? HasListLinks<$num> for $self { self$(.$field)* }
+ }
+
// SAFETY: See GUARANTEES comment on each method.
- unsafe impl$(<$($generics)*>)? $crate::list::ListItem<$num> for $t {
+ unsafe impl$(<$($generics)*>)? $crate::list::ListItem<$num> for $self {
// GUARANTEES:
// * This returns the same pointer as `prepare_to_insert` because `prepare_to_insert`
// is implemented in terms of `view_links`.
@@ -139,20 +205,19 @@ macro_rules! impl_list_item {
}
// GUARANTEES:
- // * `me` originates from the most recent call to `prepare_to_insert`, which just added
- // `offset` to the pointer passed to `prepare_to_insert`. This method subtracts
- // `offset` from `me` so it returns the pointer originally passed to
- // `prepare_to_insert`.
+ // * `me` originates from the most recent call to `prepare_to_insert`, which calls
+ // `raw_get_list_link`, which is implemented using `addr_of_mut!((*self)$(.$field)*)`.
+ // This method uses `container_of` to perform the inverse operation, so it returns the
+ // pointer originally passed to `prepare_to_insert`.
// * The pointer remains valid until the next call to `post_remove` because the caller
// of the most recent call to `prepare_to_insert` promised to retain ownership of the
// `ListArc` containing `Self` until the next call to `post_remove`. The value cannot
// be destroyed while a `ListArc` reference exists.
unsafe fn view_value(me: *mut $crate::list::ListLinks<$num>) -> *const Self {
- let offset = <Self as $crate::list::HasListLinks<$num>>::OFFSET;
// SAFETY: `me` originates from the most recent call to `prepare_to_insert`, so it
- // points at the field at offset `offset` in a value of type `Self`. Thus,
- // subtracting `offset` from `me` is still in-bounds of the allocation.
- unsafe { (me as *const u8).sub(offset) as *const Self }
+ // points at the field `$field` in a value of type `Self`. Thus, reversing that
+ // operation is still in-bounds of the allocation.
+ $crate::container_of!(me, Self, $($field).*)
}
// GUARANTEES:
@@ -169,27 +234,30 @@ macro_rules! impl_list_item {
}
// GUARANTEES:
- // * `me` originates from the most recent call to `prepare_to_insert`, which just added
- // `offset` to the pointer passed to `prepare_to_insert`. This method subtracts
- // `offset` from `me` so it returns the pointer originally passed to
- // `prepare_to_insert`.
+ // * `me` originates from the most recent call to `prepare_to_insert`, which calls
+ // `raw_get_list_link`, which is implemented using `addr_of_mut!((*self)$(.$field)*)`.
+ // This method uses `container_of` to perform the inverse operation, so it returns the
+ // pointer originally passed to `prepare_to_insert`.
unsafe fn post_remove(me: *mut $crate::list::ListLinks<$num>) -> *const Self {
- let offset = <Self as $crate::list::HasListLinks<$num>>::OFFSET;
// SAFETY: `me` originates from the most recent call to `prepare_to_insert`, so it
- // points at the field at offset `offset` in a value of type `Self`. Thus,
- // subtracting `offset` from `me` is still in-bounds of the allocation.
- unsafe { (me as *const u8).sub(offset) as *const Self }
+ // points at the field `$field` in a value of type `Self`. Thus, reversing that
+ // operation is still in-bounds of the allocation.
+ $crate::container_of!(me, Self, $($field).*)
}
}
)*};
(
- $(impl$({$($generics:tt)*})? ListItem<$num:tt> for $t:ty {
- using ListLinksSelfPtr;
+ $(impl$({$($generics:tt)*})? ListItem<$num:tt> for $self:ty {
+ using ListLinksSelfPtr { self$(.$field:ident)* };
})*
) => {$(
+ $crate::list::impl_has_list_links_self_ptr! {
+ impl$({$($generics)*})? HasSelfPtr<$self> for $self { self$(.$field)* }
+ }
+
// SAFETY: See GUARANTEES comment on each method.
- unsafe impl$(<$($generics)*>)? $crate::list::ListItem<$num> for $t {
+ unsafe impl$(<$($generics)*>)? $crate::list::ListItem<$num> for $self {
// GUARANTEES:
// This implementation of `ListItem` will not give out exclusive access to the same
// `ListLinks` several times because calls to `prepare_to_insert` and `post_remove`
@@ -202,14 +270,16 @@ macro_rules! impl_list_item {
// SAFETY: The caller promises that `me` points at a valid value of type `Self`.
let links_field = unsafe { <Self as $crate::list::ListItem<$num>>::view_links(me) };
- let spoff = $crate::list::ListLinksSelfPtr::<Self, $num>::LIST_LINKS_SELF_PTR_OFFSET;
- // Goes via the offset as the field is private.
- //
- // SAFETY: The constant is equal to `offset_of!(ListLinksSelfPtr, self_ptr)`, so
- // the pointer stays in bounds of the allocation.
- let self_ptr = unsafe { (links_field as *const u8).add(spoff) }
- as *const $crate::types::Opaque<*const Self>;
- let cell_inner = $crate::types::Opaque::raw_get(self_ptr);
+ let container = $crate::container_of!(
+ links_field, $crate::list::ListLinksSelfPtr<Self, $num>, inner
+ );
+
+ // SAFETY: By the same reasoning above, `links_field` is a valid pointer.
+ let self_ptr = unsafe {
+ $crate::list::ListLinksSelfPtr::raw_get_self_ptr(container)
+ };
+
+ let cell_inner = $crate::types::Opaque::cast_into(self_ptr);
// SAFETY: This value is not accessed in any other places than `prepare_to_insert`,
// `post_remove`, or `view_value`. By the safety requirements of those methods,
@@ -228,7 +298,9 @@ macro_rules! impl_list_item {
// this value is not in a list.
unsafe fn view_links(me: *const Self) -> *mut $crate::list::ListLinks<$num> {
// SAFETY: The caller promises that `me` points at a valid value of type `Self`.
- unsafe { <Self as HasListLinks<$num>>::raw_get_list_links(me.cast_mut()) }
+ unsafe {
+ <Self as $crate::list::HasListLinks<$num>>::raw_get_list_links(me.cast_mut())
+ }
}
// This function is also used as the implementation of `post_remove`, so the caller
@@ -247,12 +319,17 @@ macro_rules! impl_list_item {
// `ListArc` containing `Self` until the next call to `post_remove`. The value cannot
// be destroyed while a `ListArc` reference exists.
unsafe fn view_value(links_field: *mut $crate::list::ListLinks<$num>) -> *const Self {
- let spoff = $crate::list::ListLinksSelfPtr::<Self, $num>::LIST_LINKS_SELF_PTR_OFFSET;
- // SAFETY: The constant is equal to `offset_of!(ListLinksSelfPtr, self_ptr)`, so
- // the pointer stays in bounds of the allocation.
- let self_ptr = unsafe { (links_field as *const u8).add(spoff) }
- as *const ::core::cell::UnsafeCell<*const Self>;
- let cell_inner = ::core::cell::UnsafeCell::raw_get(self_ptr);
+ let container = $crate::container_of!(
+ links_field, $crate::list::ListLinksSelfPtr<Self, $num>, inner
+ );
+
+ // SAFETY: By the same reasoning above, `links_field` is a valid pointer.
+ let self_ptr = unsafe {
+ $crate::list::ListLinksSelfPtr::raw_get_self_ptr(container)
+ };
+
+ let cell_inner = $crate::types::Opaque::cast_into(self_ptr);
+
// SAFETY: This is not a data race, because the only function that writes to this
// value is `prepare_to_insert`, but by the safety requirements the
// `prepare_to_insert` method may not be called in parallel with `view_value` or
diff --git a/rust/kernel/miscdevice.rs b/rust/kernel/miscdevice.rs
index a1eb5737e3cb..6373fe183b27 100644
--- a/rust/kernel/miscdevice.rs
+++ b/rust/kernel/miscdevice.rs
@@ -33,7 +33,7 @@ impl MiscDeviceOptions {
pub const fn into_raw<T: MiscDevice>(self) -> bindings::miscdevice {
// SAFETY: All zeros is valid for this C type.
let mut result: bindings::miscdevice = unsafe { MaybeUninit::zeroed().assume_init() };
- result.minor = bindings::MISC_DYNAMIC_MINOR as _;
+ result.minor = bindings::MISC_DYNAMIC_MINOR as ffi::c_int;
result.name = self.name.as_char_ptr();
result.fops = MiscdeviceVTable::<T>::build();
result
@@ -222,7 +222,7 @@ impl<T: MiscDevice> MiscdeviceVTable<T> {
// type.
//
// SAFETY: The open call of a file can access the private data.
- unsafe { (*raw_file).private_data = ptr.into_foreign().cast() };
+ unsafe { (*raw_file).private_data = ptr.into_foreign() };
0
}
@@ -233,7 +233,7 @@ impl<T: MiscDevice> MiscdeviceVTable<T> {
/// must be associated with a `MiscDeviceRegistration<T>`.
unsafe extern "C" fn release(_inode: *mut bindings::inode, file: *mut bindings::file) -> c_int {
// SAFETY: The release call of a file owns the private data.
- let private = unsafe { (*file).private_data }.cast();
+ let private = unsafe { (*file).private_data };
// SAFETY: The release call of a file owns the private data.
let ptr = unsafe { <T::Ptr as ForeignOwnable>::from_foreign(private) };
@@ -277,7 +277,7 @@ impl<T: MiscDevice> MiscdeviceVTable<T> {
/// `file` must be a valid file that is associated with a `MiscDeviceRegistration<T>`.
unsafe extern "C" fn ioctl(file: *mut bindings::file, cmd: c_uint, arg: c_ulong) -> c_long {
// SAFETY: The ioctl call of a file can access the private data.
- let private = unsafe { (*file).private_data }.cast();
+ let private = unsafe { (*file).private_data };
// SAFETY: Ioctl calls can borrow the private data of the file.
let device = unsafe { <T::Ptr as ForeignOwnable>::borrow(private) };
@@ -302,7 +302,7 @@ impl<T: MiscDevice> MiscdeviceVTable<T> {
arg: c_ulong,
) -> c_long {
// SAFETY: The compat ioctl call of a file can access the private data.
- let private = unsafe { (*file).private_data }.cast();
+ let private = unsafe { (*file).private_data };
// SAFETY: Ioctl calls can borrow the private data of the file.
let device = unsafe { <T::Ptr as ForeignOwnable>::borrow(private) };
@@ -323,7 +323,7 @@ impl<T: MiscDevice> MiscdeviceVTable<T> {
/// - `seq_file` must be a valid `struct seq_file` that we can write to.
unsafe extern "C" fn show_fdinfo(seq_file: *mut bindings::seq_file, file: *mut bindings::file) {
// SAFETY: The release call of a file owns the private data.
- let private = unsafe { (*file).private_data }.cast();
+ let private = unsafe { (*file).private_data };
// SAFETY: Ioctl calls can borrow the private data of the file.
let device = unsafe { <T::Ptr as ForeignOwnable>::borrow(private) };
// SAFETY:
diff --git a/rust/kernel/mm/virt.rs b/rust/kernel/mm/virt.rs
index 31803674aecc..6086ca981b06 100644
--- a/rust/kernel/mm/virt.rs
+++ b/rust/kernel/mm/virt.rs
@@ -392,80 +392,80 @@ pub mod flags {
use crate::bindings;
/// No flags are set.
- pub const NONE: vm_flags_t = bindings::VM_NONE as _;
+ pub const NONE: vm_flags_t = bindings::VM_NONE as vm_flags_t;
/// Mapping allows reads.
- pub const READ: vm_flags_t = bindings::VM_READ as _;
+ pub const READ: vm_flags_t = bindings::VM_READ as vm_flags_t;
/// Mapping allows writes.
- pub const WRITE: vm_flags_t = bindings::VM_WRITE as _;
+ pub const WRITE: vm_flags_t = bindings::VM_WRITE as vm_flags_t;
/// Mapping allows execution.
- pub const EXEC: vm_flags_t = bindings::VM_EXEC as _;
+ pub const EXEC: vm_flags_t = bindings::VM_EXEC as vm_flags_t;
/// Mapping is shared.
- pub const SHARED: vm_flags_t = bindings::VM_SHARED as _;
+ pub const SHARED: vm_flags_t = bindings::VM_SHARED as vm_flags_t;
/// Mapping may be updated to allow reads.
- pub const MAYREAD: vm_flags_t = bindings::VM_MAYREAD as _;
+ pub const MAYREAD: vm_flags_t = bindings::VM_MAYREAD as vm_flags_t;
/// Mapping may be updated to allow writes.
- pub const MAYWRITE: vm_flags_t = bindings::VM_MAYWRITE as _;
+ pub const MAYWRITE: vm_flags_t = bindings::VM_MAYWRITE as vm_flags_t;
/// Mapping may be updated to allow execution.
- pub const MAYEXEC: vm_flags_t = bindings::VM_MAYEXEC as _;
+ pub const MAYEXEC: vm_flags_t = bindings::VM_MAYEXEC as vm_flags_t;
/// Mapping may be updated to be shared.
- pub const MAYSHARE: vm_flags_t = bindings::VM_MAYSHARE as _;
+ pub const MAYSHARE: vm_flags_t = bindings::VM_MAYSHARE as vm_flags_t;
/// Page-ranges managed without `struct page`, just pure PFN.
- pub const PFNMAP: vm_flags_t = bindings::VM_PFNMAP as _;
+ pub const PFNMAP: vm_flags_t = bindings::VM_PFNMAP as vm_flags_t;
/// Memory mapped I/O or similar.
- pub const IO: vm_flags_t = bindings::VM_IO as _;
+ pub const IO: vm_flags_t = bindings::VM_IO as vm_flags_t;
/// Do not copy this vma on fork.
- pub const DONTCOPY: vm_flags_t = bindings::VM_DONTCOPY as _;
+ pub const DONTCOPY: vm_flags_t = bindings::VM_DONTCOPY as vm_flags_t;
/// Cannot expand with mremap().
- pub const DONTEXPAND: vm_flags_t = bindings::VM_DONTEXPAND as _;
+ pub const DONTEXPAND: vm_flags_t = bindings::VM_DONTEXPAND as vm_flags_t;
/// Lock the pages covered when they are faulted in.
- pub const LOCKONFAULT: vm_flags_t = bindings::VM_LOCKONFAULT as _;
+ pub const LOCKONFAULT: vm_flags_t = bindings::VM_LOCKONFAULT as vm_flags_t;
/// Is a VM accounted object.
- pub const ACCOUNT: vm_flags_t = bindings::VM_ACCOUNT as _;
+ pub const ACCOUNT: vm_flags_t = bindings::VM_ACCOUNT as vm_flags_t;
/// Should the VM suppress accounting.
- pub const NORESERVE: vm_flags_t = bindings::VM_NORESERVE as _;
+ pub const NORESERVE: vm_flags_t = bindings::VM_NORESERVE as vm_flags_t;
/// Huge TLB Page VM.
- pub const HUGETLB: vm_flags_t = bindings::VM_HUGETLB as _;
+ pub const HUGETLB: vm_flags_t = bindings::VM_HUGETLB as vm_flags_t;
/// Synchronous page faults. (DAX-specific)
- pub const SYNC: vm_flags_t = bindings::VM_SYNC as _;
+ pub const SYNC: vm_flags_t = bindings::VM_SYNC as vm_flags_t;
/// Architecture-specific flag.
- pub const ARCH_1: vm_flags_t = bindings::VM_ARCH_1 as _;
+ pub const ARCH_1: vm_flags_t = bindings::VM_ARCH_1 as vm_flags_t;
/// Wipe VMA contents in child on fork.
- pub const WIPEONFORK: vm_flags_t = bindings::VM_WIPEONFORK as _;
+ pub const WIPEONFORK: vm_flags_t = bindings::VM_WIPEONFORK as vm_flags_t;
/// Do not include in the core dump.
- pub const DONTDUMP: vm_flags_t = bindings::VM_DONTDUMP as _;
+ pub const DONTDUMP: vm_flags_t = bindings::VM_DONTDUMP as vm_flags_t;
/// Not soft dirty clean area.
- pub const SOFTDIRTY: vm_flags_t = bindings::VM_SOFTDIRTY as _;
+ pub const SOFTDIRTY: vm_flags_t = bindings::VM_SOFTDIRTY as vm_flags_t;
/// Can contain `struct page` and pure PFN pages.
- pub const MIXEDMAP: vm_flags_t = bindings::VM_MIXEDMAP as _;
+ pub const MIXEDMAP: vm_flags_t = bindings::VM_MIXEDMAP as vm_flags_t;
/// MADV_HUGEPAGE marked this vma.
- pub const HUGEPAGE: vm_flags_t = bindings::VM_HUGEPAGE as _;
+ pub const HUGEPAGE: vm_flags_t = bindings::VM_HUGEPAGE as vm_flags_t;
/// MADV_NOHUGEPAGE marked this vma.
- pub const NOHUGEPAGE: vm_flags_t = bindings::VM_NOHUGEPAGE as _;
+ pub const NOHUGEPAGE: vm_flags_t = bindings::VM_NOHUGEPAGE as vm_flags_t;
/// KSM may merge identical pages.
- pub const MERGEABLE: vm_flags_t = bindings::VM_MERGEABLE as _;
+ pub const MERGEABLE: vm_flags_t = bindings::VM_MERGEABLE as vm_flags_t;
}
diff --git a/rust/kernel/net/phy.rs b/rust/kernel/net/phy.rs
index 602609027aa6..7de5cc7a0eee 100644
--- a/rust/kernel/net/phy.rs
+++ b/rust/kernel/net/phy.rs
@@ -142,7 +142,7 @@ impl Device {
// SAFETY: The struct invariant ensures that we may access
// this field without additional synchronization.
let bit_field = unsafe { &(*self.0.get())._bitfield_1 };
- bit_field.get(13, 1) == bindings::AUTONEG_ENABLE as u64
+ bit_field.get(13, 1) == u64::from(bindings::AUTONEG_ENABLE)
}
/// Gets the current auto-negotiation state.
@@ -419,7 +419,7 @@ impl<T: Driver> Adapter<T> {
// where we hold `phy_device->lock`, so the accessors on
// `Device` are okay to call.
let dev = unsafe { Device::from_raw(phydev) };
- T::match_phy_device(dev) as i32
+ T::match_phy_device(dev).into()
}
/// # Safety
diff --git a/rust/kernel/of.rs b/rust/kernel/of.rs
index 0888469bddb7..b76b35265df2 100644
--- a/rust/kernel/of.rs
+++ b/rust/kernel/of.rs
@@ -27,7 +27,7 @@ unsafe impl RawDeviceIdIndex for DeviceId {
const DRIVER_DATA_OFFSET: usize = core::mem::offset_of!(bindings::of_device_id, data);
fn index(&self) -> usize {
- self.0.data as _
+ self.0.data as usize
}
}
@@ -39,10 +39,10 @@ impl DeviceId {
// SAFETY: FFI type is valid to be zero-initialized.
let mut of: bindings::of_device_id = unsafe { core::mem::zeroed() };
- // TODO: Use `clone_from_slice` once the corresponding types do match.
+ // TODO: Use `copy_from_slice` once stabilized for `const`.
let mut i = 0;
while i < src.len() {
- of.compatible[i] = src[i] as _;
+ of.compatible[i] = src[i];
i += 1;
}
diff --git a/rust/kernel/opp.rs b/rust/kernel/opp.rs
index 846583da9a2f..08126035d2c6 100644
--- a/rust/kernel/opp.rs
+++ b/rust/kernel/opp.rs
@@ -92,7 +92,7 @@ fn to_c_str_array(names: &[CString]) -> Result<KVec<*const u8>> {
let mut list = KVec::with_capacity(names.len() + 1, GFP_KERNEL)?;
for name in names.iter() {
- list.push(name.as_ptr() as _, GFP_KERNEL)?;
+ list.push(name.as_ptr().cast(), GFP_KERNEL)?;
}
list.push(ptr::null(), GFP_KERNEL)?;
@@ -103,7 +103,7 @@ fn to_c_str_array(names: &[CString]) -> Result<KVec<*const u8>> {
///
/// Represents voltage in microvolts, wrapping a [`c_ulong`] value.
///
-/// ## Examples
+/// # Examples
///
/// ```
/// use kernel::opp::MicroVolt;
@@ -128,7 +128,7 @@ impl From<MicroVolt> for c_ulong {
///
/// Represents power in microwatts, wrapping a [`c_ulong`] value.
///
-/// ## Examples
+/// # Examples
///
/// ```
/// use kernel::opp::MicroWatt;
@@ -153,7 +153,7 @@ impl From<MicroWatt> for c_ulong {
///
/// The associated [`OPP`] is automatically removed when the [`Token`] is dropped.
///
-/// ## Examples
+/// # Examples
///
/// The following example demonstrates how to create an [`OPP`] dynamically.
///
@@ -202,7 +202,7 @@ impl Drop for Token {
/// Rust abstraction for the C `struct dev_pm_opp_data`, used to define operating performance
/// points (OPPs) dynamically.
///
-/// ## Examples
+/// # Examples
///
/// The following example demonstrates how to create an [`OPP`] with [`Data`].
///
@@ -254,7 +254,7 @@ impl Data {
/// [`OPP`] search options.
///
-/// ## Examples
+/// # Examples
///
/// Defines how to search for an [`OPP`] in a [`Table`] relative to a frequency.
///
@@ -326,7 +326,7 @@ impl Drop for ConfigToken {
///
/// Rust abstraction for the C `struct dev_pm_opp_config`.
///
-/// ## Examples
+/// # Examples
///
/// The following example demonstrates how to set OPP property-name configuration for a [`Device`].
///
@@ -345,7 +345,7 @@ impl Drop for ConfigToken {
/// impl ConfigOps for Driver {}
///
/// fn configure(dev: &ARef<Device>) -> Result<ConfigToken> {
-/// let name = CString::try_from_fmt(fmt!("{}", "slow"))?;
+/// let name = CString::try_from_fmt(fmt!("slow"))?;
///
/// // The OPP configuration is cleared once the [`ConfigToken`] goes out of scope.
/// Config::<Driver>::new()
@@ -569,7 +569,7 @@ impl<T: ConfigOps + Default> Config<T> {
///
/// Instances of this type are reference-counted.
///
-/// ## Examples
+/// # Examples
///
/// The following example demonstrates how to get OPP [`Table`] for a [`Cpumask`] and set its
/// frequency.
@@ -1011,7 +1011,7 @@ impl Drop for Table {
///
/// A reference to the [`OPP`], &[`OPP`], isn't refcounted by the Rust code.
///
-/// ## Examples
+/// # Examples
///
/// The following example demonstrates how to get [`OPP`] corresponding to a frequency value and
/// configure the device with it.
diff --git a/rust/kernel/pci.rs b/rust/kernel/pci.rs
index 44a2f3d2884a..887ee611b553 100644
--- a/rust/kernel/pci.rs
+++ b/rust/kernel/pci.rs
@@ -98,7 +98,7 @@ impl<T: Driver + 'static> Adapter<T> {
/// Declares a kernel module that exposes a single PCI driver.
///
-/// # Example
+/// # Examples
///
///```ignore
/// kernel::module_pci_driver! {
@@ -170,7 +170,7 @@ unsafe impl RawDeviceIdIndex for DeviceId {
const DRIVER_DATA_OFFSET: usize = core::mem::offset_of!(bindings::pci_device_id, driver_data);
fn index(&self) -> usize {
- self.0.driver_data as _
+ self.0.driver_data
}
}
@@ -193,7 +193,7 @@ macro_rules! pci_device_table {
/// The PCI driver trait.
///
-/// # Example
+/// # Examples
///
///```
/// # use kernel::{bindings, device::Core, pci};
@@ -205,7 +205,10 @@ macro_rules! pci_device_table {
/// MODULE_PCI_TABLE,
/// <MyDriver as pci::Driver>::IdInfo,
/// [
-/// (pci::DeviceId::from_id(bindings::PCI_VENDOR_ID_REDHAT, bindings::PCI_ANY_ID as _), ())
+/// (
+/// pci::DeviceId::from_id(bindings::PCI_VENDOR_ID_REDHAT, bindings::PCI_ANY_ID as u32),
+/// (),
+/// )
/// ]
/// );
///
@@ -344,7 +347,7 @@ impl<const SIZE: usize> Bar<SIZE> {
// `ioptr` is valid by the safety requirements.
// `num` is valid by the safety requirements.
unsafe {
- bindings::pci_iounmap(pdev.as_raw(), ioptr as _);
+ bindings::pci_iounmap(pdev.as_raw(), ioptr as *mut kernel::ffi::c_void);
bindings::pci_release_region(pdev.as_raw(), num);
}
}
diff --git a/rust/kernel/platform.rs b/rust/kernel/platform.rs
index b4d3087aff52..8f028c76f9fa 100644
--- a/rust/kernel/platform.rs
+++ b/rust/kernel/platform.rs
@@ -132,7 +132,7 @@ macro_rules! module_platform_driver {
///
/// Drivers must implement this trait in order to get a platform driver registered.
///
-/// # Example
+/// # Examples
///
///```
/// # use kernel::{acpi, bindings, c_str, device::Core, of, platform};
diff --git a/rust/kernel/prelude.rs b/rust/kernel/prelude.rs
index 2f30a398dddd..25fe97aafd02 100644
--- a/rust/kernel/prelude.rs
+++ b/rust/kernel/prelude.rs
@@ -31,9 +31,9 @@ pub use super::{build_assert, build_error};
// `super::std_vendor` is hidden, which makes the macro inline for some reason.
#[doc(no_inline)]
pub use super::dbg;
-pub use super::fmt;
pub use super::{dev_alert, dev_crit, dev_dbg, dev_emerg, dev_err, dev_info, dev_notice, dev_warn};
pub use super::{pr_alert, pr_crit, pr_debug, pr_emerg, pr_err, pr_info, pr_notice, pr_warn};
+pub use core::format_args as fmt;
pub use super::{try_init, try_pin_init};
@@ -46,3 +46,5 @@ pub use super::{str::CStr, ThisModule};
pub use super::init::InPlaceInit;
pub use super::current;
+
+pub use super::uaccess::UserPtr;
diff --git a/rust/kernel/print.rs b/rust/kernel/print.rs
index 9783d960a97a..2d743d78d220 100644
--- a/rust/kernel/print.rs
+++ b/rust/kernel/print.rs
@@ -8,10 +8,10 @@
use crate::{
ffi::{c_char, c_void},
+ fmt,
prelude::*,
str::RawFormatter,
};
-use core::fmt;
// Called from `vsprintf` with format specifier `%pA`.
#[expect(clippy::missing_safety_doc)]
@@ -25,7 +25,7 @@ unsafe extern "C" fn rust_fmt_argument(
// SAFETY: The C contract guarantees that `buf` is valid if it's less than `end`.
let mut w = unsafe { RawFormatter::from_ptrs(buf.cast(), end.cast()) };
// SAFETY: TODO.
- let _ = w.write_fmt(unsafe { *(ptr as *const fmt::Arguments<'_>) });
+ let _ = w.write_fmt(unsafe { *ptr.cast::<fmt::Arguments<'_>>() });
w.pos().cast()
}
@@ -109,7 +109,7 @@ pub unsafe fn call_printk(
bindings::_printk(
format_string.as_ptr(),
module_name.as_ptr(),
- &args as *const _ as *const c_void,
+ core::ptr::from_ref(&args).cast::<c_void>(),
);
}
}
@@ -129,7 +129,7 @@ pub fn call_printk_cont(args: fmt::Arguments<'_>) {
unsafe {
bindings::_printk(
format_strings::CONT.as_ptr(),
- &args as *const _ as *const c_void,
+ core::ptr::from_ref(&args).cast::<c_void>(),
);
}
}
@@ -149,7 +149,7 @@ macro_rules! print_macro (
// takes borrows on the arguments, but does not extend the scope of temporaries.
// Therefore, a `match` expression is used to keep them around, since
// the scrutinee is kept until the end of the `match`.
- match format_args!($($arg)+) {
+ match $crate::prelude::fmt!($($arg)+) {
// SAFETY: This hidden macro should only be called by the documented
// printing macros which ensure the format string is one of the fixed
// ones. All `__LOG_PREFIX`s are null-terminated as they are generated
@@ -168,7 +168,7 @@ macro_rules! print_macro (
// The `CONT` case.
($format_string:path, true, $($arg:tt)+) => (
$crate::print::call_printk_cont(
- format_args!($($arg)+),
+ $crate::prelude::fmt!($($arg)+),
);
);
);
diff --git a/rust/kernel/rbtree.rs b/rust/kernel/rbtree.rs
index 8d978c896747..b8fe6be6fcc4 100644
--- a/rust/kernel/rbtree.rs
+++ b/rust/kernel/rbtree.rs
@@ -191,6 +191,12 @@ impl<K, V> RBTree<K, V> {
}
}
+ /// Returns true if this tree is empty.
+ #[inline]
+ pub fn is_empty(&self) -> bool {
+ self.root.rb_node.is_null()
+ }
+
/// Returns an iterator over the tree nodes, sorted by key.
pub fn iter(&self) -> Iter<'_, K, V> {
Iter {
@@ -769,23 +775,14 @@ impl<'a, K, V> Cursor<'a, K, V> {
// the tree cannot change. By the tree invariant, all nodes are valid.
unsafe { bindings::rb_erase(&mut (*this).links, addr_of_mut!(self.tree.root)) };
- let current = match (prev, next) {
- (_, Some(next)) => next,
- (Some(prev), None) => prev,
- (None, None) => {
- return (None, node);
- }
- };
+ // INVARIANT:
+ // - `current` is a valid node in the [`RBTree`] pointed to by `self.tree`.
+ let cursor = next.or(prev).map(|current| Self {
+ current,
+ tree: self.tree,
+ });
- (
- // INVARIANT:
- // - `current` is a valid node in the [`RBTree`] pointed to by `self.tree`.
- Some(Self {
- current,
- tree: self.tree,
- }),
- node,
- )
+ (cursor, node)
}
/// Remove the previous node, returning it if it exists.
diff --git a/rust/kernel/revocable.rs b/rust/kernel/revocable.rs
index 46768b374656..0f4ae673256d 100644
--- a/rust/kernel/revocable.rs
+++ b/rust/kernel/revocable.rs
@@ -233,6 +233,10 @@ impl<T> PinnedDrop for Revocable<T> {
///
/// The RCU read-side lock is held while the guard is alive.
pub struct RevocableGuard<'a, T> {
+ // This can't use the `&'a T` type because references that appear in function arguments must
+ // not become dangling during the execution of the function, which can happen if the
+ // `RevocableGuard` is passed as a function argument and then dropped during execution of the
+ // function.
data_ref: *const T,
_rcu_guard: rcu::Guard,
_p: PhantomData<&'a ()>,
diff --git a/rust/kernel/seq_file.rs b/rust/kernel/seq_file.rs
index 7a9403eb6e5b..8f199b1a3bb1 100644
--- a/rust/kernel/seq_file.rs
+++ b/rust/kernel/seq_file.rs
@@ -37,7 +37,7 @@ impl SeqFile {
bindings::seq_printf(
self.inner.get(),
c_str!("%pA").as_char_ptr(),
- &args as *const _ as *const crate::ffi::c_void,
+ core::ptr::from_ref(&args).cast::<crate::ffi::c_void>(),
);
}
}
diff --git a/rust/kernel/str.rs b/rust/kernel/str.rs
index a927db8e079c..6c892550c0ba 100644
--- a/rust/kernel/str.rs
+++ b/rust/kernel/str.rs
@@ -3,7 +3,7 @@
//! String representations.
use crate::alloc::{flags::*, AllocError, KVec};
-use core::fmt::{self, Write};
+use crate::fmt::{self, Write};
use core::ops::{self, Deref, DerefMut, Index};
use crate::prelude::*;
@@ -29,7 +29,7 @@ impl BStr {
#[inline]
pub const fn from_bytes(bytes: &[u8]) -> &Self {
// SAFETY: `BStr` is transparent to `[u8]`.
- unsafe { &*(bytes as *const [u8] as *const BStr) }
+ unsafe { &*(core::ptr::from_ref(bytes) as *const BStr) }
}
/// Strip a prefix from `self`. Delegates to [`slice::strip_prefix`].
@@ -54,14 +54,14 @@ impl fmt::Display for BStr {
/// Formats printable ASCII characters, escaping the rest.
///
/// ```
- /// # use kernel::{fmt, b_str, str::{BStr, CString}};
+ /// # use kernel::{prelude::fmt, b_str, str::{BStr, CString}};
/// let ascii = b_str!("Hello, BStr!");
- /// let s = CString::try_from_fmt(fmt!("{}", ascii))?;
- /// assert_eq!(s.as_bytes(), "Hello, BStr!".as_bytes());
+ /// let s = CString::try_from_fmt(fmt!("{ascii}"))?;
+ /// assert_eq!(s.to_bytes(), "Hello, BStr!".as_bytes());
///
/// let non_ascii = b_str!("🦀");
- /// let s = CString::try_from_fmt(fmt!("{}", non_ascii))?;
- /// assert_eq!(s.as_bytes(), "\\xf0\\x9f\\xa6\\x80".as_bytes());
+ /// let s = CString::try_from_fmt(fmt!("{non_ascii}"))?;
+ /// assert_eq!(s.to_bytes(), "\\xf0\\x9f\\xa6\\x80".as_bytes());
/// # Ok::<(), kernel::error::Error>(())
/// ```
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
@@ -85,15 +85,15 @@ impl fmt::Debug for BStr {
/// escaping the rest.
///
/// ```
- /// # use kernel::{fmt, b_str, str::{BStr, CString}};
+ /// # use kernel::{prelude::fmt, b_str, str::{BStr, CString}};
/// // Embedded double quotes are escaped.
/// let ascii = b_str!("Hello, \"BStr\"!");
- /// let s = CString::try_from_fmt(fmt!("{:?}", ascii))?;
- /// assert_eq!(s.as_bytes(), "\"Hello, \\\"BStr\\\"!\"".as_bytes());
+ /// let s = CString::try_from_fmt(fmt!("{ascii:?}"))?;
+ /// assert_eq!(s.to_bytes(), "\"Hello, \\\"BStr\\\"!\"".as_bytes());
///
/// let non_ascii = b_str!("😺");
- /// let s = CString::try_from_fmt(fmt!("{:?}", non_ascii))?;
- /// assert_eq!(s.as_bytes(), "\"\\xf0\\x9f\\x98\\xba\"".as_bytes());
+ /// let s = CString::try_from_fmt(fmt!("{non_ascii:?}"))?;
+ /// assert_eq!(s.to_bytes(), "\"\\xf0\\x9f\\x98\\xba\"".as_bytes());
/// # Ok::<(), kernel::error::Error>(())
/// ```
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
@@ -175,6 +175,15 @@ macro_rules! b_str {
}};
}
+/// Returns a C pointer to the string.
+// It is a free function rather than a method on an extension trait because:
+//
+// - error[E0379]: functions in trait impls cannot be declared const
+#[inline]
+pub const fn as_char_ptr_in_const_context(c_str: &CStr) -> *const c_char {
+ c_str.0.as_ptr()
+}
+
/// Possible errors when using conversion functions in [`CStr`].
#[derive(Debug, Clone, Copy)]
pub enum CStrConvertError {
@@ -232,12 +241,12 @@ impl CStr {
/// last at least `'a`. When `CStr` is alive, the memory pointed by `ptr`
/// must not be mutated.
#[inline]
- pub unsafe fn from_char_ptr<'a>(ptr: *const crate::ffi::c_char) -> &'a Self {
+ pub unsafe fn from_char_ptr<'a>(ptr: *const c_char) -> &'a Self {
// SAFETY: The safety precondition guarantees `ptr` is a valid pointer
// to a `NUL`-terminated C string.
let len = unsafe { bindings::strlen(ptr) } + 1;
// SAFETY: Lifetime guaranteed by the safety precondition.
- let bytes = unsafe { core::slice::from_raw_parts(ptr as _, len) };
+ let bytes = unsafe { core::slice::from_raw_parts(ptr.cast(), len) };
// SAFETY: As `len` is returned by `strlen`, `bytes` does not contain interior `NUL`.
// As we have added 1 to `len`, the last byte is known to be `NUL`.
unsafe { Self::from_bytes_with_nul_unchecked(bytes) }
@@ -290,27 +299,49 @@ impl CStr {
#[inline]
pub unsafe fn from_bytes_with_nul_unchecked_mut(bytes: &mut [u8]) -> &mut CStr {
// SAFETY: Properties of `bytes` guaranteed by the safety precondition.
- unsafe { &mut *(bytes as *mut [u8] as *mut CStr) }
+ unsafe { &mut *(core::ptr::from_mut(bytes) as *mut CStr) }
}
/// Returns a C pointer to the string.
+ ///
+ /// Using this function in a const context is deprecated in favor of
+ /// [`as_char_ptr_in_const_context`] in preparation for replacing `CStr` with `core::ffi::CStr`
+ /// which does not have this method.
#[inline]
- pub const fn as_char_ptr(&self) -> *const crate::ffi::c_char {
- self.0.as_ptr()
+ pub const fn as_char_ptr(&self) -> *const c_char {
+ as_char_ptr_in_const_context(self)
}
/// Convert the string to a byte slice without the trailing `NUL` byte.
#[inline]
- pub fn as_bytes(&self) -> &[u8] {
+ pub fn to_bytes(&self) -> &[u8] {
&self.0[..self.len()]
}
+ /// Convert the string to a byte slice without the trailing `NUL` byte.
+ ///
+ /// This function is deprecated in favor of [`Self::to_bytes`] in preparation for replacing
+ /// `CStr` with `core::ffi::CStr` which does not have this method.
+ #[inline]
+ pub fn as_bytes(&self) -> &[u8] {
+ self.to_bytes()
+ }
+
/// Convert the string to a byte slice containing the trailing `NUL` byte.
#[inline]
- pub const fn as_bytes_with_nul(&self) -> &[u8] {
+ pub const fn to_bytes_with_nul(&self) -> &[u8] {
&self.0
}
+ /// Convert the string to a byte slice containing the trailing `NUL` byte.
+ ///
+ /// This function is deprecated in favor of [`Self::to_bytes_with_nul`] in preparation for
+ /// replacing `CStr` with `core::ffi::CStr` which does not have this method.
+ #[inline]
+ pub const fn as_bytes_with_nul(&self) -> &[u8] {
+ self.to_bytes_with_nul()
+ }
+
/// Yields a [`&str`] slice if the [`CStr`] contains valid UTF-8.
///
/// If the contents of the [`CStr`] are valid UTF-8 data, this
@@ -429,20 +460,20 @@ impl fmt::Display for CStr {
///
/// ```
/// # use kernel::c_str;
- /// # use kernel::fmt;
+ /// # use kernel::prelude::fmt;
/// # use kernel::str::CStr;
/// # use kernel::str::CString;
/// let penguin = c_str!("🐧");
- /// let s = CString::try_from_fmt(fmt!("{}", penguin))?;
- /// assert_eq!(s.as_bytes_with_nul(), "\\xf0\\x9f\\x90\\xa7\0".as_bytes());
+ /// let s = CString::try_from_fmt(fmt!("{penguin}"))?;
+ /// assert_eq!(s.to_bytes_with_nul(), "\\xf0\\x9f\\x90\\xa7\0".as_bytes());
///
/// let ascii = c_str!("so \"cool\"");
- /// let s = CString::try_from_fmt(fmt!("{}", ascii))?;
- /// assert_eq!(s.as_bytes_with_nul(), "so \"cool\"\0".as_bytes());
+ /// let s = CString::try_from_fmt(fmt!("{ascii}"))?;
+ /// assert_eq!(s.to_bytes_with_nul(), "so \"cool\"\0".as_bytes());
/// # Ok::<(), kernel::error::Error>(())
/// ```
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- for &c in self.as_bytes() {
+ for &c in self.to_bytes() {
if (0x20..0x7f).contains(&c) {
// Printable character.
f.write_char(c as char)?;
@@ -459,16 +490,16 @@ impl fmt::Debug for CStr {
///
/// ```
/// # use kernel::c_str;
- /// # use kernel::fmt;
+ /// # use kernel::prelude::fmt;
/// # use kernel::str::CStr;
/// # use kernel::str::CString;
/// let penguin = c_str!("🐧");
- /// let s = CString::try_from_fmt(fmt!("{:?}", penguin))?;
+ /// let s = CString::try_from_fmt(fmt!("{penguin:?}"))?;
/// assert_eq!(s.as_bytes_with_nul(), "\"\\xf0\\x9f\\x90\\xa7\"\0".as_bytes());
///
/// // Embedded double quotes are escaped.
/// let ascii = c_str!("so \"cool\"");
- /// let s = CString::try_from_fmt(fmt!("{:?}", ascii))?;
+ /// let s = CString::try_from_fmt(fmt!("{ascii:?}"))?;
/// assert_eq!(s.as_bytes_with_nul(), "\"so \\\"cool\\\"\"\0".as_bytes());
/// # Ok::<(), kernel::error::Error>(())
/// ```
@@ -578,7 +609,7 @@ mod tests {
macro_rules! format {
($($f:tt)*) => ({
- CString::try_from_fmt(::kernel::fmt!($($f)*))?.to_str()?
+ CString::try_from_fmt(fmt!($($f)*))?.to_str()?
})
}
@@ -728,9 +759,9 @@ impl RawFormatter {
pub(crate) unsafe fn from_ptrs(pos: *mut u8, end: *mut u8) -> Self {
// INVARIANT: The safety requirements guarantee the type invariants.
Self {
- beg: pos as _,
- pos: pos as _,
- end: end as _,
+ beg: pos as usize,
+ pos: pos as usize,
+ end: end as usize,
}
}
@@ -755,7 +786,7 @@ impl RawFormatter {
///
/// N.B. It may point to invalid memory.
pub(crate) fn pos(&self) -> *mut u8 {
- self.pos as _
+ self.pos as *mut u8
}
/// Returns the number of bytes written to the formatter.
@@ -840,14 +871,14 @@ impl fmt::Write for Formatter {
/// # Examples
///
/// ```
-/// use kernel::{str::CString, fmt};
+/// use kernel::{str::CString, prelude::fmt};
///
/// let s = CString::try_from_fmt(fmt!("{}{}{}", "abc", 10, 20))?;
-/// assert_eq!(s.as_bytes_with_nul(), "abc1020\0".as_bytes());
+/// assert_eq!(s.to_bytes_with_nul(), "abc1020\0".as_bytes());
///
/// let tmp = "testing";
/// let s = CString::try_from_fmt(fmt!("{tmp}{}", 123))?;
-/// assert_eq!(s.as_bytes_with_nul(), "testing123\0".as_bytes());
+/// assert_eq!(s.to_bytes_with_nul(), "testing123\0".as_bytes());
///
/// // This fails because it has an embedded `NUL` byte.
/// let s = CString::try_from_fmt(fmt!("a\0b{}", 123));
@@ -917,7 +948,7 @@ impl<'a> TryFrom<&'a CStr> for CString {
fn try_from(cstr: &'a CStr) -> Result<CString, AllocError> {
let mut buf = KVec::new();
- buf.extend_from_slice(cstr.as_bytes_with_nul(), GFP_KERNEL)?;
+ buf.extend_from_slice(cstr.to_bytes_with_nul(), GFP_KERNEL)?;
// INVARIANT: The `CStr` and `CString` types have the same invariants for
// the string data, and we copied it over without changes.
@@ -930,9 +961,3 @@ impl fmt::Debug for CString {
fmt::Debug::fmt(&**self, f)
}
}
-
-/// A convenience alias for [`core::format_args`].
-#[macro_export]
-macro_rules! fmt {
- ($($f:tt)*) => ( ::core::format_args!($($f)*) )
-}
diff --git a/rust/kernel/sync.rs b/rust/kernel/sync.rs
index c23a12639924..00f9b558a3ad 100644
--- a/rust/kernel/sync.rs
+++ b/rust/kernel/sync.rs
@@ -10,6 +10,7 @@ use crate::types::Opaque;
use pin_init;
mod arc;
+pub mod aref;
pub mod completion;
mod condvar;
pub mod lock;
@@ -41,7 +42,7 @@ impl LockClassKey {
/// Initializes a dynamically allocated lock class key. In the common case of using a
/// statically allocated lock class key, the static_lock_class! macro should be used instead.
///
- /// # Example
+ /// # Examples
/// ```
/// # use kernel::c_str;
/// # use kernel::alloc::KBox;
@@ -95,8 +96,11 @@ impl PinnedDrop for LockClassKey {
macro_rules! static_lock_class {
() => {{
static CLASS: $crate::sync::LockClassKey =
- // SAFETY: lockdep expects uninitialized memory when it's handed a statically allocated
- // lock_class_key
+ // Lockdep expects uninitialized memory when it's handed a statically allocated `struct
+ // lock_class_key`.
+ //
+ // SAFETY: `LockClassKey` transparently wraps `Opaque` which permits uninitialized
+ // memory.
unsafe { ::core::mem::MaybeUninit::uninit().assume_init() };
$crate::prelude::Pin::static_ref(&CLASS)
}};
diff --git a/rust/kernel/sync/arc.rs b/rust/kernel/sync/arc.rs
index c7af0aa48a0a..63a66761d0c7 100644
--- a/rust/kernel/sync/arc.rs
+++ b/rust/kernel/sync/arc.rs
@@ -19,12 +19,14 @@
use crate::{
alloc::{AllocError, Flags, KBox},
bindings,
+ ffi::c_void,
init::InPlaceInit,
try_init,
types::{ForeignOwnable, Opaque},
};
use core::{
alloc::Layout,
+ borrow::{Borrow, BorrowMut},
fmt,
marker::PhantomData,
mem::{ManuallyDrop, MaybeUninit},
@@ -140,10 +142,9 @@ pub struct Arc<T: ?Sized> {
_p: PhantomData<ArcInner<T>>,
}
-#[doc(hidden)]
#[pin_data]
#[repr(C)]
-pub struct ArcInner<T: ?Sized> {
+struct ArcInner<T: ?Sized> {
refcount: Opaque<bindings::refcount_t>,
data: T,
}
@@ -372,20 +373,22 @@ impl<T: ?Sized> Arc<T> {
}
}
-// SAFETY: The `into_foreign` function returns a pointer that is well-aligned.
+// SAFETY: The pointer returned by `into_foreign` comes from a well aligned
+// pointer to `ArcInner<T>`.
unsafe impl<T: 'static> ForeignOwnable for Arc<T> {
- type PointedTo = ArcInner<T>;
+ const FOREIGN_ALIGN: usize = core::mem::align_of::<ArcInner<T>>();
+
type Borrowed<'a> = ArcBorrow<'a, T>;
type BorrowedMut<'a> = Self::Borrowed<'a>;
- fn into_foreign(self) -> *mut Self::PointedTo {
- ManuallyDrop::new(self).ptr.as_ptr()
+ fn into_foreign(self) -> *mut c_void {
+ ManuallyDrop::new(self).ptr.as_ptr().cast()
}
- unsafe fn from_foreign(ptr: *mut Self::PointedTo) -> Self {
+ unsafe fn from_foreign(ptr: *mut c_void) -> Self {
// SAFETY: The safety requirements of this function ensure that `ptr` comes from a previous
// call to `Self::into_foreign`.
- let inner = unsafe { NonNull::new_unchecked(ptr) };
+ let inner = unsafe { NonNull::new_unchecked(ptr.cast::<ArcInner<T>>()) };
// SAFETY: By the safety requirement of this function, we know that `ptr` came from
// a previous call to `Arc::into_foreign`, which guarantees that `ptr` is valid and
@@ -393,20 +396,20 @@ unsafe impl<T: 'static> ForeignOwnable for Arc<T> {
unsafe { Self::from_inner(inner) }
}
- unsafe fn borrow<'a>(ptr: *mut Self::PointedTo) -> ArcBorrow<'a, T> {
+ unsafe fn borrow<'a>(ptr: *mut c_void) -> ArcBorrow<'a, T> {
// SAFETY: The safety requirements of this function ensure that `ptr` comes from a previous
// call to `Self::into_foreign`.
- let inner = unsafe { NonNull::new_unchecked(ptr) };
+ let inner = unsafe { NonNull::new_unchecked(ptr.cast::<ArcInner<T>>()) };
// SAFETY: The safety requirements of `from_foreign` ensure that the object remains alive
// for the lifetime of the returned value.
unsafe { ArcBorrow::new(inner) }
}
- unsafe fn borrow_mut<'a>(ptr: *mut Self::PointedTo) -> ArcBorrow<'a, T> {
+ unsafe fn borrow_mut<'a>(ptr: *mut c_void) -> ArcBorrow<'a, T> {
// SAFETY: The safety requirements for `borrow_mut` are a superset of the safety
// requirements for `borrow`.
- unsafe { Self::borrow(ptr) }
+ unsafe { <Self as ForeignOwnable>::borrow(ptr) }
}
}
@@ -426,6 +429,31 @@ impl<T: ?Sized> AsRef<T> for Arc<T> {
}
}
+/// # Examples
+///
+/// ```
+/// # use core::borrow::Borrow;
+/// # use kernel::sync::Arc;
+/// struct Foo<B: Borrow<u32>>(B);
+///
+/// // Owned instance.
+/// let owned = Foo(1);
+///
+/// // Shared instance.
+/// let arc = Arc::new(1, GFP_KERNEL)?;
+/// let shared = Foo(arc.clone());
+///
+/// let i = 1;
+/// // Borrowed from `i`.
+/// let borrowed = Foo(&i);
+/// # Ok::<(), Error>(())
+/// ```
+impl<T: ?Sized> Borrow<T> for Arc<T> {
+ fn borrow(&self) -> &T {
+ self.deref()
+ }
+}
+
impl<T: ?Sized> Clone for Arc<T> {
fn clone(&self) -> Self {
// SAFETY: By the type invariant, there is necessarily a reference to the object, so it is
@@ -834,6 +862,56 @@ impl<T: ?Sized> DerefMut for UniqueArc<T> {
}
}
+/// # Examples
+///
+/// ```
+/// # use core::borrow::Borrow;
+/// # use kernel::sync::UniqueArc;
+/// struct Foo<B: Borrow<u32>>(B);
+///
+/// // Owned instance.
+/// let owned = Foo(1);
+///
+/// // Owned instance using `UniqueArc`.
+/// let arc = UniqueArc::new(1, GFP_KERNEL)?;
+/// let shared = Foo(arc);
+///
+/// let i = 1;
+/// // Borrowed from `i`.
+/// let borrowed = Foo(&i);
+/// # Ok::<(), Error>(())
+/// ```
+impl<T: ?Sized> Borrow<T> for UniqueArc<T> {
+ fn borrow(&self) -> &T {
+ self.deref()
+ }
+}
+
+/// # Examples
+///
+/// ```
+/// # use core::borrow::BorrowMut;
+/// # use kernel::sync::UniqueArc;
+/// struct Foo<B: BorrowMut<u32>>(B);
+///
+/// // Owned instance.
+/// let owned = Foo(1);
+///
+/// // Owned instance using `UniqueArc`.
+/// let arc = UniqueArc::new(1, GFP_KERNEL)?;
+/// let shared = Foo(arc);
+///
+/// let mut i = 1;
+/// // Borrowed from `i`.
+/// let borrowed = Foo(&mut i);
+/// # Ok::<(), Error>(())
+/// ```
+impl<T: ?Sized> BorrowMut<T> for UniqueArc<T> {
+ fn borrow_mut(&mut self) -> &mut T {
+ self.deref_mut()
+ }
+}
+
impl<T: fmt::Display + ?Sized> fmt::Display for UniqueArc<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Display::fmt(self.deref(), f)
diff --git a/rust/kernel/sync/aref.rs b/rust/kernel/sync/aref.rs
new file mode 100644
index 000000000000..dbd77bb68617
--- /dev/null
+++ b/rust/kernel/sync/aref.rs
@@ -0,0 +1,154 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Internal reference counting support.
+
+use core::{marker::PhantomData, mem::ManuallyDrop, ops::Deref, ptr::NonNull};
+
+/// Types that are _always_ reference counted.
+///
+/// It allows such types to define their own custom ref increment and decrement functions.
+/// Additionally, it allows users to convert from a shared reference `&T` to an owned reference
+/// [`ARef<T>`].
+///
+/// This is usually implemented by wrappers to existing structures on the C side of the code. For
+/// Rust code, the recommendation is to use [`Arc`](crate::sync::Arc) to create reference-counted
+/// instances of a type.
+///
+/// # Safety
+///
+/// Implementers must ensure that increments to the reference count keep the object alive in memory
+/// at least until matching decrements are performed.
+///
+/// Implementers must also ensure that all instances are reference-counted. (Otherwise they
+/// won't be able to honour the requirement that [`AlwaysRefCounted::inc_ref`] keep the object
+/// alive.)
+pub unsafe trait AlwaysRefCounted {
+ /// Increments the reference count on the object.
+ fn inc_ref(&self);
+
+ /// Decrements the reference count on the object.
+ ///
+ /// Frees the object when the count reaches zero.
+ ///
+ /// # Safety
+ ///
+ /// Callers must ensure that there was a previous matching increment to the reference count,
+ /// and that the object is no longer used after its reference count is decremented (as it may
+ /// result in the object being freed), unless the caller owns another increment on the refcount
+ /// (e.g., it calls [`AlwaysRefCounted::inc_ref`] twice, then calls
+ /// [`AlwaysRefCounted::dec_ref`] once).
+ unsafe fn dec_ref(obj: NonNull<Self>);
+}
+
+/// An owned reference to an always-reference-counted object.
+///
+/// The object's reference count is automatically decremented when an instance of [`ARef`] is
+/// dropped. It is also automatically incremented when a new instance is created via
+/// [`ARef::clone`].
+///
+/// # Invariants
+///
+/// The pointer stored in `ptr` is non-null and valid for the lifetime of the [`ARef`] instance. In
+/// particular, the [`ARef`] instance owns an increment on the underlying object's reference count.
+pub struct ARef<T: AlwaysRefCounted> {
+ ptr: NonNull<T>,
+ _p: PhantomData<T>,
+}
+
+// SAFETY: It is safe to send `ARef<T>` to another thread when the underlying `T` is `Sync` because
+// it effectively means sharing `&T` (which is safe because `T` is `Sync`); additionally, it needs
+// `T` to be `Send` because any thread that has an `ARef<T>` may ultimately access `T` using a
+// mutable reference, for example, when the reference count reaches zero and `T` is dropped.
+unsafe impl<T: AlwaysRefCounted + Sync + Send> Send for ARef<T> {}
+
+// SAFETY: It is safe to send `&ARef<T>` to another thread when the underlying `T` is `Sync`
+// because it effectively means sharing `&T` (which is safe because `T` is `Sync`); additionally,
+// it needs `T` to be `Send` because any thread that has a `&ARef<T>` may clone it and get an
+// `ARef<T>` on that thread, so the thread may ultimately access `T` using a mutable reference, for
+// example, when the reference count reaches zero and `T` is dropped.
+unsafe impl<T: AlwaysRefCounted + Sync + Send> Sync for ARef<T> {}
+
+impl<T: AlwaysRefCounted> ARef<T> {
+ /// Creates a new instance of [`ARef`].
+ ///
+ /// It takes over an increment of the reference count on the underlying object.
+ ///
+ /// # Safety
+ ///
+ /// Callers must ensure that the reference count was incremented at least once, and that they
+ /// are properly relinquishing one increment. That is, if there is only one increment, callers
+ /// must not use the underlying object anymore -- it is only safe to do so via the newly
+ /// created [`ARef`].
+ pub unsafe fn from_raw(ptr: NonNull<T>) -> Self {
+ // INVARIANT: The safety requirements guarantee that the new instance now owns the
+ // increment on the refcount.
+ Self {
+ ptr,
+ _p: PhantomData,
+ }
+ }
+
+ /// Consumes the `ARef`, returning a raw pointer.
+ ///
+ /// This function does not change the refcount. After calling this function, the caller is
+ /// responsible for the refcount previously managed by the `ARef`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use core::ptr::NonNull;
+ /// use kernel::types::{ARef, AlwaysRefCounted};
+ ///
+ /// struct Empty {}
+ ///
+ /// # // SAFETY: TODO.
+ /// unsafe impl AlwaysRefCounted for Empty {
+ /// fn inc_ref(&self) {}
+ /// unsafe fn dec_ref(_obj: NonNull<Self>) {}
+ /// }
+ ///
+ /// let mut data = Empty {};
+ /// let ptr = NonNull::<Empty>::new(&mut data).unwrap();
+ /// # // SAFETY: TODO.
+ /// let data_ref: ARef<Empty> = unsafe { ARef::from_raw(ptr) };
+ /// let raw_ptr: NonNull<Empty> = ARef::into_raw(data_ref);
+ ///
+ /// assert_eq!(ptr, raw_ptr);
+ /// ```
+ pub fn into_raw(me: Self) -> NonNull<T> {
+ ManuallyDrop::new(me).ptr
+ }
+}
+
+impl<T: AlwaysRefCounted> Clone for ARef<T> {
+ fn clone(&self) -> Self {
+ self.inc_ref();
+ // SAFETY: We just incremented the refcount above.
+ unsafe { Self::from_raw(self.ptr) }
+ }
+}
+
+impl<T: AlwaysRefCounted> Deref for ARef<T> {
+ type Target = T;
+
+ fn deref(&self) -> &Self::Target {
+ // SAFETY: The type invariants guarantee that the object is valid.
+ unsafe { self.ptr.as_ref() }
+ }
+}
+
+impl<T: AlwaysRefCounted> From<&T> for ARef<T> {
+ fn from(b: &T) -> Self {
+ b.inc_ref();
+ // SAFETY: We just incremented the refcount above.
+ unsafe { Self::from_raw(NonNull::from(b)) }
+ }
+}
+
+impl<T: AlwaysRefCounted> Drop for ARef<T> {
+ fn drop(&mut self) {
+ // SAFETY: The type invariants guarantee that the `ARef` owns the reference we're about to
+ // decrement.
+ unsafe { T::dec_ref(self.ptr) };
+ }
+}
diff --git a/rust/kernel/time.rs b/rust/kernel/time.rs
index a8089a98da9e..64c8dcf548d6 100644
--- a/rust/kernel/time.rs
+++ b/rust/kernel/time.rs
@@ -24,6 +24,9 @@
//! C header: [`include/linux/jiffies.h`](srctree/include/linux/jiffies.h).
//! C header: [`include/linux/ktime.h`](srctree/include/linux/ktime.h).
+use core::marker::PhantomData;
+
+pub mod delay;
pub mod hrtimer;
/// The number of nanoseconds per microsecond.
@@ -49,26 +52,141 @@ pub fn msecs_to_jiffies(msecs: Msecs) -> Jiffies {
unsafe { bindings::__msecs_to_jiffies(msecs) }
}
+/// Trait for clock sources.
+///
+/// Selection of the clock source depends on the use case. In some cases the usage of a
+/// particular clock is mandatory, e.g. in network protocols, filesystems. In other
+/// cases the user of the clock has to decide which clock is best suited for the
+/// purpose. In most scenarios clock [`Monotonic`] is the best choice as it
+/// provides a accurate monotonic notion of time (leap second smearing ignored).
+pub trait ClockSource {
+ /// The kernel clock ID associated with this clock source.
+ ///
+ /// This constant corresponds to the C side `clockid_t` value.
+ const ID: bindings::clockid_t;
+
+ /// Get the current time from the clock source.
+ ///
+ /// The function must return a value in the range from 0 to `KTIME_MAX`.
+ fn ktime_get() -> bindings::ktime_t;
+}
+
+/// A monotonically increasing clock.
+///
+/// A nonsettable system-wide clock that represents monotonic time since as
+/// described by POSIX, "some unspecified point in the past". On Linux, that
+/// point corresponds to the number of seconds that the system has been
+/// running since it was booted.
+///
+/// The CLOCK_MONOTONIC clock is not affected by discontinuous jumps in the
+/// CLOCK_REAL (e.g., if the system administrator manually changes the
+/// clock), but is affected by frequency adjustments. This clock does not
+/// count time that the system is suspended.
+pub struct Monotonic;
+
+impl ClockSource for Monotonic {
+ const ID: bindings::clockid_t = bindings::CLOCK_MONOTONIC as bindings::clockid_t;
+
+ fn ktime_get() -> bindings::ktime_t {
+ // SAFETY: It is always safe to call `ktime_get()` outside of NMI context.
+ unsafe { bindings::ktime_get() }
+ }
+}
+
+/// A settable system-wide clock that measures real (i.e., wall-clock) time.
+///
+/// Setting this clock requires appropriate privileges. This clock is
+/// affected by discontinuous jumps in the system time (e.g., if the system
+/// administrator manually changes the clock), and by frequency adjustments
+/// performed by NTP and similar applications via adjtime(3), adjtimex(2),
+/// clock_adjtime(2), and ntp_adjtime(3). This clock normally counts the
+/// number of seconds since 1970-01-01 00:00:00 Coordinated Universal Time
+/// (UTC) except that it ignores leap seconds; near a leap second it may be
+/// adjusted by leap second smearing to stay roughly in sync with UTC. Leap
+/// second smearing applies frequency adjustments to the clock to speed up
+/// or slow down the clock to account for the leap second without
+/// discontinuities in the clock. If leap second smearing is not applied,
+/// the clock will experience discontinuity around leap second adjustment.
+pub struct RealTime;
+
+impl ClockSource for RealTime {
+ const ID: bindings::clockid_t = bindings::CLOCK_REALTIME as bindings::clockid_t;
+
+ fn ktime_get() -> bindings::ktime_t {
+ // SAFETY: It is always safe to call `ktime_get_real()` outside of NMI context.
+ unsafe { bindings::ktime_get_real() }
+ }
+}
+
+/// A monotonic that ticks while system is suspended.
+///
+/// A nonsettable system-wide clock that is identical to CLOCK_MONOTONIC,
+/// except that it also includes any time that the system is suspended. This
+/// allows applications to get a suspend-aware monotonic clock without
+/// having to deal with the complications of CLOCK_REALTIME, which may have
+/// discontinuities if the time is changed using settimeofday(2) or similar.
+pub struct BootTime;
+
+impl ClockSource for BootTime {
+ const ID: bindings::clockid_t = bindings::CLOCK_BOOTTIME as bindings::clockid_t;
+
+ fn ktime_get() -> bindings::ktime_t {
+ // SAFETY: It is always safe to call `ktime_get_boottime()` outside of NMI context.
+ unsafe { bindings::ktime_get_boottime() }
+ }
+}
+
+/// International Atomic Time.
+///
+/// A system-wide clock derived from wall-clock time but counting leap seconds.
+///
+/// This clock is coupled to CLOCK_REALTIME and will be set when CLOCK_REALTIME is
+/// set, or when the offset to CLOCK_REALTIME is changed via adjtimex(2). This
+/// usually happens during boot and **should** not happen during normal operations.
+/// However, if NTP or another application adjusts CLOCK_REALTIME by leap second
+/// smearing, this clock will not be precise during leap second smearing.
+///
+/// The acronym TAI refers to International Atomic Time.
+pub struct Tai;
+
+impl ClockSource for Tai {
+ const ID: bindings::clockid_t = bindings::CLOCK_TAI as bindings::clockid_t;
+
+ fn ktime_get() -> bindings::ktime_t {
+ // SAFETY: It is always safe to call `ktime_get_tai()` outside of NMI context.
+ unsafe { bindings::ktime_get_clocktai() }
+ }
+}
+
/// A specific point in time.
///
/// # Invariants
///
/// The `inner` value is in the range from 0 to `KTIME_MAX`.
#[repr(transparent)]
-#[derive(Copy, Clone, PartialEq, PartialOrd, Eq, Ord)]
-pub struct Instant {
+#[derive(PartialEq, PartialOrd, Eq, Ord)]
+pub struct Instant<C: ClockSource> {
inner: bindings::ktime_t,
+ _c: PhantomData<C>,
+}
+
+impl<C: ClockSource> Clone for Instant<C> {
+ fn clone(&self) -> Self {
+ *self
+ }
}
-impl Instant {
- /// Get the current time using `CLOCK_MONOTONIC`.
+impl<C: ClockSource> Copy for Instant<C> {}
+
+impl<C: ClockSource> Instant<C> {
+ /// Get the current time from the clock source.
#[inline]
pub fn now() -> Self {
- // INVARIANT: The `ktime_get()` function returns a value in the range
+ // INVARIANT: The `ClockSource::ktime_get()` function returns a value in the range
// from 0 to `KTIME_MAX`.
Self {
- // SAFETY: It is always safe to call `ktime_get()` outside of NMI context.
- inner: unsafe { bindings::ktime_get() },
+ inner: C::ktime_get(),
+ _c: PhantomData,
}
}
@@ -77,86 +195,25 @@ impl Instant {
pub fn elapsed(&self) -> Delta {
Self::now() - *self
}
+
+ #[inline]
+ pub(crate) fn as_nanos(&self) -> i64 {
+ self.inner
+ }
}
-impl core::ops::Sub for Instant {
+impl<C: ClockSource> core::ops::Sub for Instant<C> {
type Output = Delta;
// By the type invariant, it never overflows.
#[inline]
- fn sub(self, other: Instant) -> Delta {
+ fn sub(self, other: Instant<C>) -> Delta {
Delta {
nanos: self.inner - other.inner,
}
}
}
-/// An identifier for a clock. Used when specifying clock sources.
-///
-///
-/// Selection of the clock depends on the use case. In some cases the usage of a
-/// particular clock is mandatory, e.g. in network protocols, filesystems.In other
-/// cases the user of the clock has to decide which clock is best suited for the
-/// purpose. In most scenarios clock [`ClockId::Monotonic`] is the best choice as it
-/// provides a accurate monotonic notion of time (leap second smearing ignored).
-#[derive(Clone, Copy, PartialEq, Eq, Debug)]
-#[repr(u32)]
-pub enum ClockId {
- /// A settable system-wide clock that measures real (i.e., wall-clock) time.
- ///
- /// Setting this clock requires appropriate privileges. This clock is
- /// affected by discontinuous jumps in the system time (e.g., if the system
- /// administrator manually changes the clock), and by frequency adjustments
- /// performed by NTP and similar applications via adjtime(3), adjtimex(2),
- /// clock_adjtime(2), and ntp_adjtime(3). This clock normally counts the
- /// number of seconds since 1970-01-01 00:00:00 Coordinated Universal Time
- /// (UTC) except that it ignores leap seconds; near a leap second it may be
- /// adjusted by leap second smearing to stay roughly in sync with UTC. Leap
- /// second smearing applies frequency adjustments to the clock to speed up
- /// or slow down the clock to account for the leap second without
- /// discontinuities in the clock. If leap second smearing is not applied,
- /// the clock will experience discontinuity around leap second adjustment.
- RealTime = bindings::CLOCK_REALTIME,
- /// A monotonically increasing clock.
- ///
- /// A nonsettable system-wide clock that represents monotonic time since—as
- /// described by POSIX—"some unspecified point in the past". On Linux, that
- /// point corresponds to the number of seconds that the system has been
- /// running since it was booted.
- ///
- /// The CLOCK_MONOTONIC clock is not affected by discontinuous jumps in the
- /// CLOCK_REAL (e.g., if the system administrator manually changes the
- /// clock), but is affected by frequency adjustments. This clock does not
- /// count time that the system is suspended.
- Monotonic = bindings::CLOCK_MONOTONIC,
- /// A monotonic that ticks while system is suspended.
- ///
- /// A nonsettable system-wide clock that is identical to CLOCK_MONOTONIC,
- /// except that it also includes any time that the system is suspended. This
- /// allows applications to get a suspend-aware monotonic clock without
- /// having to deal with the complications of CLOCK_REALTIME, which may have
- /// discontinuities if the time is changed using settimeofday(2) or similar.
- BootTime = bindings::CLOCK_BOOTTIME,
- /// International Atomic Time.
- ///
- /// A system-wide clock derived from wall-clock time but counting leap seconds.
- ///
- /// This clock is coupled to CLOCK_REALTIME and will be set when CLOCK_REALTIME is
- /// set, or when the offset to CLOCK_REALTIME is changed via adjtimex(2). This
- /// usually happens during boot and **should** not happen during normal operations.
- /// However, if NTP or another application adjusts CLOCK_REALTIME by leap second
- /// smearing, this clock will not be precise during leap second smearing.
- ///
- /// The acronym TAI refers to International Atomic Time.
- TAI = bindings::CLOCK_TAI,
-}
-
-impl ClockId {
- fn into_c(self) -> bindings::clockid_t {
- self as bindings::clockid_t
- }
-}
-
/// A span of time.
///
/// This struct represents a span of time, with its value stored as nanoseconds.
@@ -228,13 +285,31 @@ impl Delta {
/// Return the smallest number of microseconds greater than or equal
/// to the value in the [`Delta`].
#[inline]
- pub const fn as_micros_ceil(self) -> i64 {
- self.as_nanos().saturating_add(NSEC_PER_USEC - 1) / NSEC_PER_USEC
+ pub fn as_micros_ceil(self) -> i64 {
+ #[cfg(CONFIG_64BIT)]
+ {
+ self.as_nanos().saturating_add(NSEC_PER_USEC - 1) / NSEC_PER_USEC
+ }
+
+ #[cfg(not(CONFIG_64BIT))]
+ // SAFETY: It is always safe to call `ktime_to_us()` with any value.
+ unsafe {
+ bindings::ktime_to_us(self.as_nanos().saturating_add(NSEC_PER_USEC - 1))
+ }
}
/// Return the number of milliseconds in the [`Delta`].
#[inline]
- pub const fn as_millis(self) -> i64 {
- self.as_nanos() / NSEC_PER_MSEC
+ pub fn as_millis(self) -> i64 {
+ #[cfg(CONFIG_64BIT)]
+ {
+ self.as_nanos() / NSEC_PER_MSEC
+ }
+
+ #[cfg(not(CONFIG_64BIT))]
+ // SAFETY: It is always safe to call `ktime_to_ms()` with any value.
+ unsafe {
+ bindings::ktime_to_ms(self.as_nanos())
+ }
}
}
diff --git a/rust/kernel/time/delay.rs b/rust/kernel/time/delay.rs
new file mode 100644
index 000000000000..eb8838da62bc
--- /dev/null
+++ b/rust/kernel/time/delay.rs
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Delay and sleep primitives.
+//!
+//! This module contains the kernel APIs related to delay and sleep that
+//! have been ported or wrapped for usage by Rust code in the kernel.
+//!
+//! C header: [`include/linux/delay.h`](srctree/include/linux/delay.h).
+
+use super::Delta;
+use crate::prelude::*;
+
+/// Sleeps for a given duration at least.
+///
+/// Equivalent to the C side [`fsleep()`], flexible sleep function,
+/// which automatically chooses the best sleep method based on a duration.
+///
+/// `delta` must be within `[0, i32::MAX]` microseconds;
+/// otherwise, it is erroneous behavior. That is, it is considered a bug
+/// to call this function with an out-of-range value, in which case the function
+/// will sleep for at least the maximum value in the range and may warn
+/// in the future.
+///
+/// The behavior above differs from the C side [`fsleep()`] for which out-of-range
+/// values mean "infinite timeout" instead.
+///
+/// This function can only be used in a nonatomic context.
+///
+/// [`fsleep()`]: https://docs.kernel.org/timers/delay_sleep_functions.html#c.fsleep
+pub fn fsleep(delta: Delta) {
+ // The maximum value is set to `i32::MAX` microseconds to prevent integer
+ // overflow inside fsleep, which could lead to unintentional infinite sleep.
+ const MAX_DELTA: Delta = Delta::from_micros(i32::MAX as i64);
+
+ let delta = if (Delta::ZERO..=MAX_DELTA).contains(&delta) {
+ delta
+ } else {
+ // TODO: Add WARN_ONCE() when it's supported.
+ MAX_DELTA
+ };
+
+ // SAFETY: It is always safe to call `fsleep()` with any duration.
+ unsafe {
+ // Convert the duration to microseconds and round up to preserve
+ // the guarantee; `fsleep()` sleeps for at least the provided duration,
+ // but that it may sleep for longer under some circumstances.
+ bindings::fsleep(delta.as_micros_ceil() as c_ulong)
+ }
+}
diff --git a/rust/kernel/time/hrtimer.rs b/rust/kernel/time/hrtimer.rs
index 36e1290cd079..144e3b57cc78 100644
--- a/rust/kernel/time/hrtimer.rs
+++ b/rust/kernel/time/hrtimer.rs
@@ -67,27 +67,11 @@
//! A `restart` operation on a timer in the **stopped** state is equivalent to a
//! `start` operation.
-use super::ClockId;
+use super::{ClockSource, Delta, Instant};
use crate::{prelude::*, types::Opaque};
use core::marker::PhantomData;
use pin_init::PinInit;
-/// A Rust wrapper around a `ktime_t`.
-// NOTE: Ktime is going to be removed when hrtimer is converted to Instant/Delta.
-#[repr(transparent)]
-#[derive(Copy, Clone, PartialEq, PartialOrd, Eq, Ord)]
-pub struct Ktime {
- inner: bindings::ktime_t,
-}
-
-impl Ktime {
- /// Returns the number of nanoseconds.
- #[inline]
- pub fn to_ns(self) -> i64 {
- self.inner
- }
-}
-
/// A timer backed by a C `struct hrtimer`.
///
/// # Invariants
@@ -98,7 +82,6 @@ impl Ktime {
pub struct HrTimer<T> {
#[pin]
timer: Opaque<bindings::hrtimer>,
- mode: HrTimerMode,
_t: PhantomData<T>,
}
@@ -112,9 +95,10 @@ unsafe impl<T> Sync for HrTimer<T> {}
impl<T> HrTimer<T> {
/// Return an initializer for a new timer instance.
- pub fn new(mode: HrTimerMode, clock: ClockId) -> impl PinInit<Self>
+ pub fn new() -> impl PinInit<Self>
where
T: HrTimerCallback,
+ T: HasHrTimer<T>,
{
pin_init!(Self {
// INVARIANT: We initialize `timer` with `hrtimer_setup` below.
@@ -126,12 +110,11 @@ impl<T> HrTimer<T> {
bindings::hrtimer_setup(
place,
Some(T::Pointer::run),
- clock.into_c(),
- mode.into_c(),
+ <<T as HasHrTimer<T>>::TimerMode as HrTimerMode>::Clock::ID,
+ <T as HasHrTimer<T>>::TimerMode::C_MODE,
);
}
}),
- mode: mode,
_t: PhantomData,
})
}
@@ -148,7 +131,7 @@ impl<T> HrTimer<T> {
// SAFETY: The field projection to `timer` does not go out of bounds,
// because the caller of this function promises that `this` points to an
// allocation of at least the size of `Self`.
- unsafe { Opaque::raw_get(core::ptr::addr_of!((*this).timer)) }
+ unsafe { Opaque::cast_into(core::ptr::addr_of!((*this).timer)) }
}
/// Cancel an initialized and potentially running timer.
@@ -193,6 +176,11 @@ impl<T> HrTimer<T> {
/// exist. A timer can be manipulated through any of the handles, and a handle
/// may represent a cancelled timer.
pub trait HrTimerPointer: Sync + Sized {
+ /// The operational mode associated with this timer.
+ ///
+ /// This defines how the expiration value is interpreted.
+ type TimerMode: HrTimerMode;
+
/// A handle representing a started or restarted timer.
///
/// If the timer is running or if the timer callback is executing when the
@@ -205,7 +193,7 @@ pub trait HrTimerPointer: Sync + Sized {
/// Start the timer with expiry after `expires` time units. If the timer was
/// already running, it is restarted with the new expiry time.
- fn start(self, expires: Ktime) -> Self::TimerHandle;
+ fn start(self, expires: <Self::TimerMode as HrTimerMode>::Expires) -> Self::TimerHandle;
}
/// Unsafe version of [`HrTimerPointer`] for situations where leaking the
@@ -220,6 +208,11 @@ pub trait HrTimerPointer: Sync + Sized {
/// [`UnsafeHrTimerPointer`] outlives any associated [`HrTimerPointer::TimerHandle`]
/// instances.
pub unsafe trait UnsafeHrTimerPointer: Sync + Sized {
+ /// The operational mode associated with this timer.
+ ///
+ /// This defines how the expiration value is interpreted.
+ type TimerMode: HrTimerMode;
+
/// A handle representing a running timer.
///
/// # Safety
@@ -236,7 +229,7 @@ pub unsafe trait UnsafeHrTimerPointer: Sync + Sized {
///
/// Caller promises keep the timer structure alive until the timer is dead.
/// Caller can ensure this by not leaking the returned [`Self::TimerHandle`].
- unsafe fn start(self, expires: Ktime) -> Self::TimerHandle;
+ unsafe fn start(self, expires: <Self::TimerMode as HrTimerMode>::Expires) -> Self::TimerHandle;
}
/// A trait for stack allocated timers.
@@ -246,9 +239,14 @@ pub unsafe trait UnsafeHrTimerPointer: Sync + Sized {
/// Implementers must ensure that `start_scoped` does not return until the
/// timer is dead and the timer handler is not running.
pub unsafe trait ScopedHrTimerPointer {
+ /// The operational mode associated with this timer.
+ ///
+ /// This defines how the expiration value is interpreted.
+ type TimerMode: HrTimerMode;
+
/// Start the timer to run after `expires` time units and immediately
/// after call `f`. When `f` returns, the timer is cancelled.
- fn start_scoped<T, F>(self, expires: Ktime, f: F) -> T
+ fn start_scoped<T, F>(self, expires: <Self::TimerMode as HrTimerMode>::Expires, f: F) -> T
where
F: FnOnce() -> T;
}
@@ -260,7 +258,13 @@ unsafe impl<T> ScopedHrTimerPointer for T
where
T: UnsafeHrTimerPointer,
{
- fn start_scoped<U, F>(self, expires: Ktime, f: F) -> U
+ type TimerMode = T::TimerMode;
+
+ fn start_scoped<U, F>(
+ self,
+ expires: <<T as UnsafeHrTimerPointer>::TimerMode as HrTimerMode>::Expires,
+ f: F,
+ ) -> U
where
F: FnOnce() -> U,
{
@@ -335,6 +339,11 @@ pub unsafe trait HrTimerHandle {
/// their documentation. All the methods of this trait must operate on the same
/// field.
pub unsafe trait HasHrTimer<T> {
+ /// The operational mode associated with this timer.
+ ///
+ /// This defines how the expiration value is interpreted.
+ type TimerMode: HrTimerMode;
+
/// Return a pointer to the [`HrTimer`] within `Self`.
///
/// This function is useful to get access to the value without creating
@@ -382,14 +391,14 @@ pub unsafe trait HasHrTimer<T> {
/// - `this` must point to a valid `Self`.
/// - Caller must ensure that the pointee of `this` lives until the timer
/// fires or is canceled.
- unsafe fn start(this: *const Self, expires: Ktime) {
+ unsafe fn start(this: *const Self, expires: <Self::TimerMode as HrTimerMode>::Expires) {
// SAFETY: By function safety requirement, `this` is a valid `Self`.
unsafe {
bindings::hrtimer_start_range_ns(
Self::c_timer_ptr(this).cast_mut(),
- expires.to_ns(),
+ expires.as_nanos(),
0,
- (*Self::raw_get_timer(this)).mode.into_c(),
+ <Self::TimerMode as HrTimerMode>::C_MODE,
);
}
}
@@ -411,80 +420,171 @@ impl HrTimerRestart {
}
}
-/// Operational mode of [`HrTimer`].
-// NOTE: Some of these have the same encoding on the C side, so we keep
-// `repr(Rust)` and convert elsewhere.
-#[derive(Clone, Copy, PartialEq, Eq, Debug)]
-pub enum HrTimerMode {
- /// Timer expires at the given expiration time.
- Absolute,
- /// Timer expires after the given expiration time interpreted as a duration from now.
- Relative,
- /// Timer does not move between CPU cores.
- Pinned,
- /// Timer handler is executed in soft irq context.
- Soft,
- /// Timer handler is executed in hard irq context.
- Hard,
- /// Timer expires at the given expiration time.
- /// Timer does not move between CPU cores.
- AbsolutePinned,
- /// Timer expires after the given expiration time interpreted as a duration from now.
- /// Timer does not move between CPU cores.
- RelativePinned,
- /// Timer expires at the given expiration time.
- /// Timer handler is executed in soft irq context.
- AbsoluteSoft,
- /// Timer expires after the given expiration time interpreted as a duration from now.
- /// Timer handler is executed in soft irq context.
- RelativeSoft,
- /// Timer expires at the given expiration time.
- /// Timer does not move between CPU cores.
- /// Timer handler is executed in soft irq context.
- AbsolutePinnedSoft,
- /// Timer expires after the given expiration time interpreted as a duration from now.
- /// Timer does not move between CPU cores.
- /// Timer handler is executed in soft irq context.
- RelativePinnedSoft,
- /// Timer expires at the given expiration time.
- /// Timer handler is executed in hard irq context.
- AbsoluteHard,
- /// Timer expires after the given expiration time interpreted as a duration from now.
- /// Timer handler is executed in hard irq context.
- RelativeHard,
- /// Timer expires at the given expiration time.
- /// Timer does not move between CPU cores.
- /// Timer handler is executed in hard irq context.
- AbsolutePinnedHard,
- /// Timer expires after the given expiration time interpreted as a duration from now.
- /// Timer does not move between CPU cores.
- /// Timer handler is executed in hard irq context.
- RelativePinnedHard,
+/// Time representations that can be used as expiration values in [`HrTimer`].
+pub trait HrTimerExpires {
+ /// Converts the expiration time into a nanosecond representation.
+ ///
+ /// This value corresponds to a raw ktime_t value, suitable for passing to kernel
+ /// timer functions. The interpretation (absolute vs relative) depends on the
+ /// associated [HrTimerMode] in use.
+ fn as_nanos(&self) -> i64;
}
-impl HrTimerMode {
- fn into_c(self) -> bindings::hrtimer_mode {
- use bindings::*;
- match self {
- HrTimerMode::Absolute => hrtimer_mode_HRTIMER_MODE_ABS,
- HrTimerMode::Relative => hrtimer_mode_HRTIMER_MODE_REL,
- HrTimerMode::Pinned => hrtimer_mode_HRTIMER_MODE_PINNED,
- HrTimerMode::Soft => hrtimer_mode_HRTIMER_MODE_SOFT,
- HrTimerMode::Hard => hrtimer_mode_HRTIMER_MODE_HARD,
- HrTimerMode::AbsolutePinned => hrtimer_mode_HRTIMER_MODE_ABS_PINNED,
- HrTimerMode::RelativePinned => hrtimer_mode_HRTIMER_MODE_REL_PINNED,
- HrTimerMode::AbsoluteSoft => hrtimer_mode_HRTIMER_MODE_ABS_SOFT,
- HrTimerMode::RelativeSoft => hrtimer_mode_HRTIMER_MODE_REL_SOFT,
- HrTimerMode::AbsolutePinnedSoft => hrtimer_mode_HRTIMER_MODE_ABS_PINNED_SOFT,
- HrTimerMode::RelativePinnedSoft => hrtimer_mode_HRTIMER_MODE_REL_PINNED_SOFT,
- HrTimerMode::AbsoluteHard => hrtimer_mode_HRTIMER_MODE_ABS_HARD,
- HrTimerMode::RelativeHard => hrtimer_mode_HRTIMER_MODE_REL_HARD,
- HrTimerMode::AbsolutePinnedHard => hrtimer_mode_HRTIMER_MODE_ABS_PINNED_HARD,
- HrTimerMode::RelativePinnedHard => hrtimer_mode_HRTIMER_MODE_REL_PINNED_HARD,
- }
+impl<C: ClockSource> HrTimerExpires for Instant<C> {
+ #[inline]
+ fn as_nanos(&self) -> i64 {
+ Instant::<C>::as_nanos(self)
+ }
+}
+
+impl HrTimerExpires for Delta {
+ #[inline]
+ fn as_nanos(&self) -> i64 {
+ Delta::as_nanos(*self)
}
}
+mod private {
+ use crate::time::ClockSource;
+
+ pub trait Sealed {}
+
+ impl<C: ClockSource> Sealed for super::AbsoluteMode<C> {}
+ impl<C: ClockSource> Sealed for super::RelativeMode<C> {}
+ impl<C: ClockSource> Sealed for super::AbsolutePinnedMode<C> {}
+ impl<C: ClockSource> Sealed for super::RelativePinnedMode<C> {}
+ impl<C: ClockSource> Sealed for super::AbsoluteSoftMode<C> {}
+ impl<C: ClockSource> Sealed for super::RelativeSoftMode<C> {}
+ impl<C: ClockSource> Sealed for super::AbsolutePinnedSoftMode<C> {}
+ impl<C: ClockSource> Sealed for super::RelativePinnedSoftMode<C> {}
+ impl<C: ClockSource> Sealed for super::AbsoluteHardMode<C> {}
+ impl<C: ClockSource> Sealed for super::RelativeHardMode<C> {}
+ impl<C: ClockSource> Sealed for super::AbsolutePinnedHardMode<C> {}
+ impl<C: ClockSource> Sealed for super::RelativePinnedHardMode<C> {}
+}
+
+/// Operational mode of [`HrTimer`].
+pub trait HrTimerMode: private::Sealed {
+ /// The C representation of hrtimer mode.
+ const C_MODE: bindings::hrtimer_mode;
+
+ /// Type representing the clock source.
+ type Clock: ClockSource;
+
+ /// Type representing the expiration specification (absolute or relative time).
+ type Expires: HrTimerExpires;
+}
+
+/// Timer that expires at a fixed point in time.
+pub struct AbsoluteMode<C: ClockSource>(PhantomData<C>);
+
+impl<C: ClockSource> HrTimerMode for AbsoluteMode<C> {
+ const C_MODE: bindings::hrtimer_mode = bindings::hrtimer_mode_HRTIMER_MODE_ABS;
+
+ type Clock = C;
+ type Expires = Instant<C>;
+}
+
+/// Timer that expires after a delay from now.
+pub struct RelativeMode<C: ClockSource>(PhantomData<C>);
+
+impl<C: ClockSource> HrTimerMode for RelativeMode<C> {
+ const C_MODE: bindings::hrtimer_mode = bindings::hrtimer_mode_HRTIMER_MODE_REL;
+
+ type Clock = C;
+ type Expires = Delta;
+}
+
+/// Timer with absolute expiration time, pinned to its current CPU.
+pub struct AbsolutePinnedMode<C: ClockSource>(PhantomData<C>);
+impl<C: ClockSource> HrTimerMode for AbsolutePinnedMode<C> {
+ const C_MODE: bindings::hrtimer_mode = bindings::hrtimer_mode_HRTIMER_MODE_ABS_PINNED;
+
+ type Clock = C;
+ type Expires = Instant<C>;
+}
+
+/// Timer with relative expiration time, pinned to its current CPU.
+pub struct RelativePinnedMode<C: ClockSource>(PhantomData<C>);
+impl<C: ClockSource> HrTimerMode for RelativePinnedMode<C> {
+ const C_MODE: bindings::hrtimer_mode = bindings::hrtimer_mode_HRTIMER_MODE_REL_PINNED;
+
+ type Clock = C;
+ type Expires = Delta;
+}
+
+/// Timer with absolute expiration, handled in soft irq context.
+pub struct AbsoluteSoftMode<C: ClockSource>(PhantomData<C>);
+impl<C: ClockSource> HrTimerMode for AbsoluteSoftMode<C> {
+ const C_MODE: bindings::hrtimer_mode = bindings::hrtimer_mode_HRTIMER_MODE_ABS_SOFT;
+
+ type Clock = C;
+ type Expires = Instant<C>;
+}
+
+/// Timer with relative expiration, handled in soft irq context.
+pub struct RelativeSoftMode<C: ClockSource>(PhantomData<C>);
+impl<C: ClockSource> HrTimerMode for RelativeSoftMode<C> {
+ const C_MODE: bindings::hrtimer_mode = bindings::hrtimer_mode_HRTIMER_MODE_REL_SOFT;
+
+ type Clock = C;
+ type Expires = Delta;
+}
+
+/// Timer with absolute expiration, pinned to CPU and handled in soft irq context.
+pub struct AbsolutePinnedSoftMode<C: ClockSource>(PhantomData<C>);
+impl<C: ClockSource> HrTimerMode for AbsolutePinnedSoftMode<C> {
+ const C_MODE: bindings::hrtimer_mode = bindings::hrtimer_mode_HRTIMER_MODE_ABS_PINNED_SOFT;
+
+ type Clock = C;
+ type Expires = Instant<C>;
+}
+
+/// Timer with absolute expiration, pinned to CPU and handled in soft irq context.
+pub struct RelativePinnedSoftMode<C: ClockSource>(PhantomData<C>);
+impl<C: ClockSource> HrTimerMode for RelativePinnedSoftMode<C> {
+ const C_MODE: bindings::hrtimer_mode = bindings::hrtimer_mode_HRTIMER_MODE_REL_PINNED_SOFT;
+
+ type Clock = C;
+ type Expires = Delta;
+}
+
+/// Timer with absolute expiration, handled in hard irq context.
+pub struct AbsoluteHardMode<C: ClockSource>(PhantomData<C>);
+impl<C: ClockSource> HrTimerMode for AbsoluteHardMode<C> {
+ const C_MODE: bindings::hrtimer_mode = bindings::hrtimer_mode_HRTIMER_MODE_ABS_HARD;
+
+ type Clock = C;
+ type Expires = Instant<C>;
+}
+
+/// Timer with relative expiration, handled in hard irq context.
+pub struct RelativeHardMode<C: ClockSource>(PhantomData<C>);
+impl<C: ClockSource> HrTimerMode for RelativeHardMode<C> {
+ const C_MODE: bindings::hrtimer_mode = bindings::hrtimer_mode_HRTIMER_MODE_REL_HARD;
+
+ type Clock = C;
+ type Expires = Delta;
+}
+
+/// Timer with absolute expiration, pinned to CPU and handled in hard irq context.
+pub struct AbsolutePinnedHardMode<C: ClockSource>(PhantomData<C>);
+impl<C: ClockSource> HrTimerMode for AbsolutePinnedHardMode<C> {
+ const C_MODE: bindings::hrtimer_mode = bindings::hrtimer_mode_HRTIMER_MODE_ABS_PINNED_HARD;
+
+ type Clock = C;
+ type Expires = Instant<C>;
+}
+
+/// Timer with relative expiration, pinned to CPU and handled in hard irq context.
+pub struct RelativePinnedHardMode<C: ClockSource>(PhantomData<C>);
+impl<C: ClockSource> HrTimerMode for RelativePinnedHardMode<C> {
+ const C_MODE: bindings::hrtimer_mode = bindings::hrtimer_mode_HRTIMER_MODE_REL_PINNED_HARD;
+
+ type Clock = C;
+ type Expires = Delta;
+}
+
/// Use to implement the [`HasHrTimer<T>`] trait.
///
/// See [`module`] documentation for an example.
@@ -496,12 +596,16 @@ macro_rules! impl_has_hr_timer {
impl$({$($generics:tt)*})?
HasHrTimer<$timer_type:ty>
for $self:ty
- { self.$field:ident }
+ {
+ mode : $mode:ty,
+ field : self.$field:ident $(,)?
+ }
$($rest:tt)*
) => {
// SAFETY: This implementation of `raw_get_timer` only compiles if the
// field has the right type.
unsafe impl$(<$($generics)*>)? $crate::time::hrtimer::HasHrTimer<$timer_type> for $self {
+ type TimerMode = $mode;
#[inline]
unsafe fn raw_get_timer(
diff --git a/rust/kernel/time/hrtimer/arc.rs b/rust/kernel/time/hrtimer/arc.rs
index ccf1e66e5b2d..ed490a7a8950 100644
--- a/rust/kernel/time/hrtimer/arc.rs
+++ b/rust/kernel/time/hrtimer/arc.rs
@@ -4,8 +4,8 @@ use super::HasHrTimer;
use super::HrTimer;
use super::HrTimerCallback;
use super::HrTimerHandle;
+use super::HrTimerMode;
use super::HrTimerPointer;
-use super::Ktime;
use super::RawHrTimerCallback;
use crate::sync::Arc;
use crate::sync::ArcBorrow;
@@ -54,9 +54,13 @@ where
T: HasHrTimer<T>,
T: for<'a> HrTimerCallback<Pointer<'a> = Self>,
{
+ type TimerMode = <T as HasHrTimer<T>>::TimerMode;
type TimerHandle = ArcHrTimerHandle<T>;
- fn start(self, expires: Ktime) -> ArcHrTimerHandle<T> {
+ fn start(
+ self,
+ expires: <<T as HasHrTimer<T>>::TimerMode as HrTimerMode>::Expires,
+ ) -> ArcHrTimerHandle<T> {
// SAFETY:
// - We keep `self` alive by wrapping it in a handle below.
// - Since we generate the pointer passed to `start` from a valid
diff --git a/rust/kernel/time/hrtimer/pin.rs b/rust/kernel/time/hrtimer/pin.rs
index 293ca9cf058c..aef16d9ee2f0 100644
--- a/rust/kernel/time/hrtimer/pin.rs
+++ b/rust/kernel/time/hrtimer/pin.rs
@@ -4,7 +4,7 @@ use super::HasHrTimer;
use super::HrTimer;
use super::HrTimerCallback;
use super::HrTimerHandle;
-use super::Ktime;
+use super::HrTimerMode;
use super::RawHrTimerCallback;
use super::UnsafeHrTimerPointer;
use core::pin::Pin;
@@ -54,9 +54,13 @@ where
T: HasHrTimer<T>,
T: HrTimerCallback<Pointer<'a> = Self>,
{
+ type TimerMode = <T as HasHrTimer<T>>::TimerMode;
type TimerHandle = PinHrTimerHandle<'a, T>;
- unsafe fn start(self, expires: Ktime) -> Self::TimerHandle {
+ unsafe fn start(
+ self,
+ expires: <<T as HasHrTimer<T>>::TimerMode as HrTimerMode>::Expires,
+ ) -> Self::TimerHandle {
// Cast to pointer
let self_ptr: *const T = self.get_ref();
@@ -79,7 +83,7 @@ where
unsafe extern "C" fn run(ptr: *mut bindings::hrtimer) -> bindings::hrtimer_restart {
// `HrTimer` is `repr(C)`
- let timer_ptr = ptr as *mut HrTimer<T>;
+ let timer_ptr = ptr.cast::<HrTimer<T>>();
// SAFETY: By the safety requirement of this function, `timer_ptr`
// points to a `HrTimer<T>` contained in an `T`.
diff --git a/rust/kernel/time/hrtimer/pin_mut.rs b/rust/kernel/time/hrtimer/pin_mut.rs
index 6033572d35ad..767d0a4e8a2c 100644
--- a/rust/kernel/time/hrtimer/pin_mut.rs
+++ b/rust/kernel/time/hrtimer/pin_mut.rs
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
use super::{
- HasHrTimer, HrTimer, HrTimerCallback, HrTimerHandle, Ktime, RawHrTimerCallback,
+ HasHrTimer, HrTimer, HrTimerCallback, HrTimerHandle, HrTimerMode, RawHrTimerCallback,
UnsafeHrTimerPointer,
};
use core::{marker::PhantomData, pin::Pin, ptr::NonNull};
@@ -52,9 +52,13 @@ where
T: HasHrTimer<T>,
T: HrTimerCallback<Pointer<'a> = Self>,
{
+ type TimerMode = <T as HasHrTimer<T>>::TimerMode;
type TimerHandle = PinMutHrTimerHandle<'a, T>;
- unsafe fn start(mut self, expires: Ktime) -> Self::TimerHandle {
+ unsafe fn start(
+ mut self,
+ expires: <<T as HasHrTimer<T>>::TimerMode as HrTimerMode>::Expires,
+ ) -> Self::TimerHandle {
// SAFETY:
// - We promise not to move out of `self`. We only pass `self`
// back to the caller as a `Pin<&mut self>`.
@@ -83,7 +87,7 @@ where
unsafe extern "C" fn run(ptr: *mut bindings::hrtimer) -> bindings::hrtimer_restart {
// `HrTimer` is `repr(C)`
- let timer_ptr = ptr as *mut HrTimer<T>;
+ let timer_ptr = ptr.cast::<HrTimer<T>>();
// SAFETY: By the safety requirement of this function, `timer_ptr`
// points to a `HrTimer<T>` contained in an `T`.
diff --git a/rust/kernel/time/hrtimer/tbox.rs b/rust/kernel/time/hrtimer/tbox.rs
index 29526a5da203..ec08303315f2 100644
--- a/rust/kernel/time/hrtimer/tbox.rs
+++ b/rust/kernel/time/hrtimer/tbox.rs
@@ -4,8 +4,8 @@ use super::HasHrTimer;
use super::HrTimer;
use super::HrTimerCallback;
use super::HrTimerHandle;
+use super::HrTimerMode;
use super::HrTimerPointer;
-use super::Ktime;
use super::RawHrTimerCallback;
use crate::prelude::*;
use core::ptr::NonNull;
@@ -64,9 +64,13 @@ where
T: for<'a> HrTimerCallback<Pointer<'a> = Pin<Box<T, A>>>,
A: crate::alloc::Allocator,
{
+ type TimerMode = <T as HasHrTimer<T>>::TimerMode;
type TimerHandle = BoxHrTimerHandle<T, A>;
- fn start(self, expires: Ktime) -> Self::TimerHandle {
+ fn start(
+ self,
+ expires: <<T as HasHrTimer<T>>::TimerMode as HrTimerMode>::Expires,
+ ) -> Self::TimerHandle {
// SAFETY:
// - We will not move out of this box during timer callback (we pass an
// immutable reference to the callback).
diff --git a/rust/kernel/types.rs b/rust/kernel/types.rs
index 3958a5f44d56..dc0a02f5c3cf 100644
--- a/rust/kernel/types.rs
+++ b/rust/kernel/types.rs
@@ -2,15 +2,17 @@
//! Kernel types.
+use crate::ffi::c_void;
use core::{
cell::UnsafeCell,
marker::{PhantomData, PhantomPinned},
- mem::{ManuallyDrop, MaybeUninit},
+ mem::MaybeUninit,
ops::{Deref, DerefMut},
- ptr::NonNull,
};
use pin_init::{PinInit, Wrapper, Zeroable};
+pub use crate::sync::aref::{ARef, AlwaysRefCounted};
+
/// Used to transfer ownership to and from foreign (non-Rust) languages.
///
/// Ownership is transferred from Rust to a foreign language by calling [`Self::into_foreign`] and
@@ -21,15 +23,10 @@ use pin_init::{PinInit, Wrapper, Zeroable};
///
/// # Safety
///
-/// Implementers must ensure that [`into_foreign`] returns a pointer which meets the alignment
-/// requirements of [`PointedTo`].
-///
-/// [`into_foreign`]: Self::into_foreign
-/// [`PointedTo`]: Self::PointedTo
+/// - Implementations must satisfy the guarantees of [`Self::into_foreign`].
pub unsafe trait ForeignOwnable: Sized {
- /// Type used when the value is foreign-owned. In practical terms only defines the alignment of
- /// the pointer.
- type PointedTo;
+ /// The alignment of pointers returned by `into_foreign`.
+ const FOREIGN_ALIGN: usize;
/// Type used to immutably borrow a value that is currently foreign-owned.
type Borrowed<'a>;
@@ -39,18 +36,21 @@ pub unsafe trait ForeignOwnable: Sized {
/// Converts a Rust-owned object to a foreign-owned one.
///
+ /// The foreign representation is a pointer to void. Aside from the guarantees listed below,
+ /// there are no other guarantees for this pointer. For example, it might be invalid, dangling
+ /// or pointing to uninitialized memory. Using it in any way except for [`from_foreign`],
+ /// [`try_from_foreign`], [`borrow`], or [`borrow_mut`] can result in undefined behavior.
+ ///
/// # Guarantees
///
- /// The return value is guaranteed to be well-aligned, but there are no other guarantees for
- /// this pointer. For example, it might be null, dangling, or point to uninitialized memory.
- /// Using it in any way except for [`ForeignOwnable::from_foreign`], [`ForeignOwnable::borrow`],
- /// [`ForeignOwnable::try_from_foreign`] can result in undefined behavior.
+ /// - Minimum alignment of returned pointer is [`Self::FOREIGN_ALIGN`].
+ /// - The returned pointer is not null.
///
/// [`from_foreign`]: Self::from_foreign
/// [`try_from_foreign`]: Self::try_from_foreign
/// [`borrow`]: Self::borrow
/// [`borrow_mut`]: Self::borrow_mut
- fn into_foreign(self) -> *mut Self::PointedTo;
+ fn into_foreign(self) -> *mut c_void;
/// Converts a foreign-owned object back to a Rust-owned one.
///
@@ -60,7 +60,7 @@ pub unsafe trait ForeignOwnable: Sized {
/// must not be passed to `from_foreign` more than once.
///
/// [`into_foreign`]: Self::into_foreign
- unsafe fn from_foreign(ptr: *mut Self::PointedTo) -> Self;
+ unsafe fn from_foreign(ptr: *mut c_void) -> Self;
/// Tries to convert a foreign-owned object back to a Rust-owned one.
///
@@ -72,7 +72,7 @@ pub unsafe trait ForeignOwnable: Sized {
/// `ptr` must either be null or satisfy the safety requirements for [`from_foreign`].
///
/// [`from_foreign`]: Self::from_foreign
- unsafe fn try_from_foreign(ptr: *mut Self::PointedTo) -> Option<Self> {
+ unsafe fn try_from_foreign(ptr: *mut c_void) -> Option<Self> {
if ptr.is_null() {
None
} else {
@@ -95,7 +95,7 @@ pub unsafe trait ForeignOwnable: Sized {
///
/// [`into_foreign`]: Self::into_foreign
/// [`from_foreign`]: Self::from_foreign
- unsafe fn borrow<'a>(ptr: *mut Self::PointedTo) -> Self::Borrowed<'a>;
+ unsafe fn borrow<'a>(ptr: *mut c_void) -> Self::Borrowed<'a>;
/// Borrows a foreign-owned object mutably.
///
@@ -123,23 +123,24 @@ pub unsafe trait ForeignOwnable: Sized {
/// [`from_foreign`]: Self::from_foreign
/// [`borrow`]: Self::borrow
/// [`Arc`]: crate::sync::Arc
- unsafe fn borrow_mut<'a>(ptr: *mut Self::PointedTo) -> Self::BorrowedMut<'a>;
+ unsafe fn borrow_mut<'a>(ptr: *mut c_void) -> Self::BorrowedMut<'a>;
}
-// SAFETY: The `into_foreign` function returns a pointer that is dangling, but well-aligned.
+// SAFETY: The pointer returned by `into_foreign` comes from a well aligned
+// pointer to `()`.
unsafe impl ForeignOwnable for () {
- type PointedTo = ();
+ const FOREIGN_ALIGN: usize = core::mem::align_of::<()>();
type Borrowed<'a> = ();
type BorrowedMut<'a> = ();
- fn into_foreign(self) -> *mut Self::PointedTo {
+ fn into_foreign(self) -> *mut c_void {
core::ptr::NonNull::dangling().as_ptr()
}
- unsafe fn from_foreign(_: *mut Self::PointedTo) -> Self {}
+ unsafe fn from_foreign(_: *mut c_void) -> Self {}
- unsafe fn borrow<'a>(_: *mut Self::PointedTo) -> Self::Borrowed<'a> {}
- unsafe fn borrow_mut<'a>(_: *mut Self::PointedTo) -> Self::BorrowedMut<'a> {}
+ unsafe fn borrow<'a>(_: *mut c_void) -> Self::Borrowed<'a> {}
+ unsafe fn borrow_mut<'a>(_: *mut c_void) -> Self::BorrowedMut<'a> {}
}
/// Runs a cleanup function/closure when dropped.
@@ -366,7 +367,7 @@ impl<T> Opaque<T> {
// initialize the `T`.
unsafe {
pin_init::pin_init_from_closure::<_, ::core::convert::Infallible>(move |slot| {
- init_func(Self::raw_get(slot));
+ init_func(Self::cast_into(slot));
Ok(())
})
}
@@ -386,7 +387,7 @@ impl<T> Opaque<T> {
// SAFETY: We contain a `MaybeUninit`, so it is OK for the `init_func` to not fully
// initialize the `T`.
unsafe {
- pin_init::pin_init_from_closure::<_, E>(move |slot| init_func(Self::raw_get(slot)))
+ pin_init::pin_init_from_closure::<_, E>(move |slot| init_func(Self::cast_into(slot)))
}
}
@@ -399,9 +400,14 @@ impl<T> Opaque<T> {
///
/// This function is useful to get access to the value without creating intermediate
/// references.
- pub const fn raw_get(this: *const Self) -> *mut T {
+ pub const fn cast_into(this: *const Self) -> *mut T {
UnsafeCell::raw_get(this.cast::<UnsafeCell<MaybeUninit<T>>>()).cast::<T>()
}
+
+ /// The opposite operation of [`Opaque::cast_into`].
+ pub const fn cast_from(this: *const T) -> *const Self {
+ this.cast()
+ }
}
impl<T> Wrapper<T> for Opaque<T> {
@@ -417,173 +423,6 @@ impl<T> Wrapper<T> for Opaque<T> {
}
}
-/// Types that are _always_ reference counted.
-///
-/// It allows such types to define their own custom ref increment and decrement functions.
-/// Additionally, it allows users to convert from a shared reference `&T` to an owned reference
-/// [`ARef<T>`].
-///
-/// This is usually implemented by wrappers to existing structures on the C side of the code. For
-/// Rust code, the recommendation is to use [`Arc`](crate::sync::Arc) to create reference-counted
-/// instances of a type.
-///
-/// # Safety
-///
-/// Implementers must ensure that increments to the reference count keep the object alive in memory
-/// at least until matching decrements are performed.
-///
-/// Implementers must also ensure that all instances are reference-counted. (Otherwise they
-/// won't be able to honour the requirement that [`AlwaysRefCounted::inc_ref`] keep the object
-/// alive.)
-pub unsafe trait AlwaysRefCounted {
- /// Increments the reference count on the object.
- fn inc_ref(&self);
-
- /// Decrements the reference count on the object.
- ///
- /// Frees the object when the count reaches zero.
- ///
- /// # Safety
- ///
- /// Callers must ensure that there was a previous matching increment to the reference count,
- /// and that the object is no longer used after its reference count is decremented (as it may
- /// result in the object being freed), unless the caller owns another increment on the refcount
- /// (e.g., it calls [`AlwaysRefCounted::inc_ref`] twice, then calls
- /// [`AlwaysRefCounted::dec_ref`] once).
- unsafe fn dec_ref(obj: NonNull<Self>);
-}
-
-/// An owned reference to an always-reference-counted object.
-///
-/// The object's reference count is automatically decremented when an instance of [`ARef`] is
-/// dropped. It is also automatically incremented when a new instance is created via
-/// [`ARef::clone`].
-///
-/// # Invariants
-///
-/// The pointer stored in `ptr` is non-null and valid for the lifetime of the [`ARef`] instance. In
-/// particular, the [`ARef`] instance owns an increment on the underlying object's reference count.
-pub struct ARef<T: AlwaysRefCounted> {
- ptr: NonNull<T>,
- _p: PhantomData<T>,
-}
-
-// SAFETY: It is safe to send `ARef<T>` to another thread when the underlying `T` is `Sync` because
-// it effectively means sharing `&T` (which is safe because `T` is `Sync`); additionally, it needs
-// `T` to be `Send` because any thread that has an `ARef<T>` may ultimately access `T` using a
-// mutable reference, for example, when the reference count reaches zero and `T` is dropped.
-unsafe impl<T: AlwaysRefCounted + Sync + Send> Send for ARef<T> {}
-
-// SAFETY: It is safe to send `&ARef<T>` to another thread when the underlying `T` is `Sync`
-// because it effectively means sharing `&T` (which is safe because `T` is `Sync`); additionally,
-// it needs `T` to be `Send` because any thread that has a `&ARef<T>` may clone it and get an
-// `ARef<T>` on that thread, so the thread may ultimately access `T` using a mutable reference, for
-// example, when the reference count reaches zero and `T` is dropped.
-unsafe impl<T: AlwaysRefCounted + Sync + Send> Sync for ARef<T> {}
-
-impl<T: AlwaysRefCounted> ARef<T> {
- /// Creates a new instance of [`ARef`].
- ///
- /// It takes over an increment of the reference count on the underlying object.
- ///
- /// # Safety
- ///
- /// Callers must ensure that the reference count was incremented at least once, and that they
- /// are properly relinquishing one increment. That is, if there is only one increment, callers
- /// must not use the underlying object anymore -- it is only safe to do so via the newly
- /// created [`ARef`].
- pub unsafe fn from_raw(ptr: NonNull<T>) -> Self {
- // INVARIANT: The safety requirements guarantee that the new instance now owns the
- // increment on the refcount.
- Self {
- ptr,
- _p: PhantomData,
- }
- }
-
- /// Consumes the `ARef`, returning a raw pointer.
- ///
- /// This function does not change the refcount. After calling this function, the caller is
- /// responsible for the refcount previously managed by the `ARef`.
- ///
- /// # Examples
- ///
- /// ```
- /// use core::ptr::NonNull;
- /// use kernel::types::{ARef, AlwaysRefCounted};
- ///
- /// struct Empty {}
- ///
- /// # // SAFETY: TODO.
- /// unsafe impl AlwaysRefCounted for Empty {
- /// fn inc_ref(&self) {}
- /// unsafe fn dec_ref(_obj: NonNull<Self>) {}
- /// }
- ///
- /// let mut data = Empty {};
- /// let ptr = NonNull::<Empty>::new(&mut data).unwrap();
- /// # // SAFETY: TODO.
- /// let data_ref: ARef<Empty> = unsafe { ARef::from_raw(ptr) };
- /// let raw_ptr: NonNull<Empty> = ARef::into_raw(data_ref);
- ///
- /// assert_eq!(ptr, raw_ptr);
- /// ```
- pub fn into_raw(me: Self) -> NonNull<T> {
- ManuallyDrop::new(me).ptr
- }
-}
-
-impl<T: AlwaysRefCounted> Clone for ARef<T> {
- fn clone(&self) -> Self {
- self.inc_ref();
- // SAFETY: We just incremented the refcount above.
- unsafe { Self::from_raw(self.ptr) }
- }
-}
-
-impl<T: AlwaysRefCounted> Deref for ARef<T> {
- type Target = T;
-
- fn deref(&self) -> &Self::Target {
- // SAFETY: The type invariants guarantee that the object is valid.
- unsafe { self.ptr.as_ref() }
- }
-}
-
-impl<T: AlwaysRefCounted> From<&T> for ARef<T> {
- fn from(b: &T) -> Self {
- b.inc_ref();
- // SAFETY: We just incremented the refcount above.
- unsafe { Self::from_raw(NonNull::from(b)) }
- }
-}
-
-impl<T: AlwaysRefCounted> Drop for ARef<T> {
- fn drop(&mut self) {
- // SAFETY: The type invariants guarantee that the `ARef` owns the reference we're about to
- // decrement.
- unsafe { T::dec_ref(self.ptr) };
- }
-}
-
-/// A sum type that always holds either a value of type `L` or `R`.
-///
-/// # Examples
-///
-/// ```
-/// use kernel::types::Either;
-///
-/// let left_value: Either<i32, &str> = Either::Left(7);
-/// let right_value: Either<i32, &str> = Either::Right("right value");
-/// ```
-pub enum Either<L, R> {
- /// Constructs an instance of [`Either`] containing a value of type `L`.
- Left(L),
-
- /// Constructs an instance of [`Either`] containing a value of type `R`.
- Right(R),
-}
-
/// Zero-sized type to mark types not [`Send`].
///
/// Add this type as a field to your struct if your type should not be sent to a different task.
diff --git a/rust/kernel/uaccess.rs b/rust/kernel/uaccess.rs
index 6d70edd8086a..a8fb4764185a 100644
--- a/rust/kernel/uaccess.rs
+++ b/rust/kernel/uaccess.rs
@@ -8,14 +8,57 @@ use crate::{
alloc::{Allocator, Flags},
bindings,
error::Result,
- ffi::c_void,
+ ffi::{c_char, c_void},
prelude::*,
transmute::{AsBytes, FromBytes},
};
use core::mem::{size_of, MaybeUninit};
-/// The type used for userspace addresses.
-pub type UserPtr = usize;
+/// A pointer into userspace.
+///
+/// This is the Rust equivalent to C pointers tagged with `__user`.
+#[repr(transparent)]
+#[derive(Copy, Clone)]
+pub struct UserPtr(*mut c_void);
+
+impl UserPtr {
+ /// Create a `UserPtr` from an integer representing the userspace address.
+ #[inline]
+ pub fn from_addr(addr: usize) -> Self {
+ Self(addr as *mut c_void)
+ }
+
+ /// Create a `UserPtr` from a pointer representing the userspace address.
+ #[inline]
+ pub fn from_ptr(addr: *mut c_void) -> Self {
+ Self(addr)
+ }
+
+ /// Cast this userspace pointer to a raw const void pointer.
+ ///
+ /// It is up to the caller to use the returned pointer correctly.
+ #[inline]
+ pub fn as_const_ptr(self) -> *const c_void {
+ self.0
+ }
+
+ /// Cast this userspace pointer to a raw mutable void pointer.
+ ///
+ /// It is up to the caller to use the returned pointer correctly.
+ #[inline]
+ pub fn as_mut_ptr(self) -> *mut c_void {
+ self.0
+ }
+
+ /// Increment this user pointer by `add` bytes.
+ ///
+ /// This addition is wrapping, so wrapping around the address space does not result in a panic
+ /// even if `CONFIG_RUST_OVERFLOW_CHECKS` is enabled.
+ #[inline]
+ pub fn wrapping_byte_add(self, add: usize) -> UserPtr {
+ UserPtr(self.0.wrapping_byte_add(add))
+ }
+}
/// A pointer to an area in userspace memory, which can be either read-only or read-write.
///
@@ -177,7 +220,7 @@ impl UserSliceReader {
pub fn skip(&mut self, num_skip: usize) -> Result {
// Update `self.length` first since that's the fallible part of this operation.
self.length = self.length.checked_sub(num_skip).ok_or(EFAULT)?;
- self.ptr = self.ptr.wrapping_add(num_skip);
+ self.ptr = self.ptr.wrapping_byte_add(num_skip);
Ok(())
}
@@ -224,11 +267,11 @@ impl UserSliceReader {
}
// SAFETY: `out_ptr` points into a mutable slice of length `len`, so we may write
// that many bytes to it.
- let res = unsafe { bindings::copy_from_user(out_ptr, self.ptr as *const c_void, len) };
+ let res = unsafe { bindings::copy_from_user(out_ptr, self.ptr.as_const_ptr(), len) };
if res != 0 {
return Err(EFAULT);
}
- self.ptr = self.ptr.wrapping_add(len);
+ self.ptr = self.ptr.wrapping_byte_add(len);
self.length -= len;
Ok(())
}
@@ -240,7 +283,7 @@ impl UserSliceReader {
pub fn read_slice(&mut self, out: &mut [u8]) -> Result {
// SAFETY: The types are compatible and `read_raw` doesn't write uninitialized bytes to
// `out`.
- let out = unsafe { &mut *(out as *mut [u8] as *mut [MaybeUninit<u8>]) };
+ let out = unsafe { &mut *(core::ptr::from_mut(out) as *mut [MaybeUninit<u8>]) };
self.read_raw(out)
}
@@ -262,14 +305,14 @@ impl UserSliceReader {
let res = unsafe {
bindings::_copy_from_user(
out.as_mut_ptr().cast::<c_void>(),
- self.ptr as *const c_void,
+ self.ptr.as_const_ptr(),
len,
)
};
if res != 0 {
return Err(EFAULT);
}
- self.ptr = self.ptr.wrapping_add(len);
+ self.ptr = self.ptr.wrapping_byte_add(len);
self.length -= len;
// SAFETY: The read above has initialized all bytes in `out`, and since `T` implements
// `FromBytes`, any bit-pattern is a valid value for this type.
@@ -291,6 +334,65 @@ impl UserSliceReader {
unsafe { buf.inc_len(len) };
Ok(())
}
+
+ /// Read a NUL-terminated string from userspace and return it.
+ ///
+ /// The string is read into `buf` and a NUL-terminator is added if the end of `buf` is reached.
+ /// Since there must be space to add a NUL-terminator, the buffer must not be empty. The
+ /// returned `&CStr` points into `buf`.
+ ///
+ /// Fails with [`EFAULT`] if the read happens on a bad address (some data may have been
+ /// copied).
+ #[doc(alias = "strncpy_from_user")]
+ pub fn strcpy_into_buf<'buf>(self, buf: &'buf mut [u8]) -> Result<&'buf CStr> {
+ if buf.is_empty() {
+ return Err(EINVAL);
+ }
+
+ // SAFETY: The types are compatible and `strncpy_from_user` doesn't write uninitialized
+ // bytes to `buf`.
+ let mut dst = unsafe { &mut *(core::ptr::from_mut(buf) as *mut [MaybeUninit<u8>]) };
+
+ // We never read more than `self.length` bytes.
+ if dst.len() > self.length {
+ dst = &mut dst[..self.length];
+ }
+
+ let mut len = raw_strncpy_from_user(dst, self.ptr)?;
+ if len < dst.len() {
+ // Add one to include the NUL-terminator.
+ len += 1;
+ } else if len < buf.len() {
+ // This implies that `len == dst.len() < buf.len()`.
+ //
+ // This means that we could not fill the entire buffer, but we had to stop reading
+ // because we hit the `self.length` limit of this `UserSliceReader`. Since we did not
+ // fill the buffer, we treat this case as if we tried to read past the `self.length`
+ // limit and received a page fault, which is consistent with other `UserSliceReader`
+ // methods that also return page faults when you exceed `self.length`.
+ return Err(EFAULT);
+ } else {
+ // This implies that `len == buf.len()`.
+ //
+ // This means that we filled the buffer exactly. In this case, we add a NUL-terminator
+ // and return it. Unlike the `len < dst.len()` branch, don't modify `len` because it
+ // already represents the length including the NUL-terminator.
+ //
+ // SAFETY: Due to the check at the beginning, the buffer is not empty.
+ unsafe { *buf.last_mut().unwrap_unchecked() = 0 };
+ }
+
+ // This method consumes `self`, so it can only be called once, thus we do not need to
+ // update `self.length`. This sidesteps concerns such as whether `self.length` should be
+ // incremented by `len` or `len-1` in the `len == buf.len()` case.
+
+ // SAFETY: There are two cases:
+ // * If we hit the `len < dst.len()` case, then `raw_strncpy_from_user` guarantees that
+ // this slice contains exactly one NUL byte at the end of the string.
+ // * Otherwise, `raw_strncpy_from_user` guarantees that the string contained no NUL bytes,
+ // and we have since added a NUL byte at the end.
+ Ok(unsafe { CStr::from_bytes_with_nul_unchecked(&buf[..len]) })
+ }
}
/// A writer for [`UserSlice`].
@@ -327,11 +429,11 @@ impl UserSliceWriter {
}
// SAFETY: `data_ptr` points into an immutable slice of length `len`, so we may read
// that many bytes from it.
- let res = unsafe { bindings::copy_to_user(self.ptr as *mut c_void, data_ptr, len) };
+ let res = unsafe { bindings::copy_to_user(self.ptr.as_mut_ptr(), data_ptr, len) };
if res != 0 {
return Err(EFAULT);
}
- self.ptr = self.ptr.wrapping_add(len);
+ self.ptr = self.ptr.wrapping_byte_add(len);
self.length -= len;
Ok(())
}
@@ -354,16 +456,53 @@ impl UserSliceWriter {
// is a compile-time constant.
let res = unsafe {
bindings::_copy_to_user(
- self.ptr as *mut c_void,
- (value as *const T).cast::<c_void>(),
+ self.ptr.as_mut_ptr(),
+ core::ptr::from_ref(value).cast::<c_void>(),
len,
)
};
if res != 0 {
return Err(EFAULT);
}
- self.ptr = self.ptr.wrapping_add(len);
+ self.ptr = self.ptr.wrapping_byte_add(len);
self.length -= len;
Ok(())
}
}
+
+/// Reads a nul-terminated string into `dst` and returns the length.
+///
+/// This reads from userspace until a NUL byte is encountered, or until `dst.len()` bytes have been
+/// read. Fails with [`EFAULT`] if a read happens on a bad address (some data may have been
+/// copied). When the end of the buffer is encountered, no NUL byte is added, so the string is
+/// *not* guaranteed to be NUL-terminated when `Ok(dst.len())` is returned.
+///
+/// # Guarantees
+///
+/// When this function returns `Ok(len)`, it is guaranteed that the first `len` bytes of `dst` are
+/// initialized and non-zero. Furthermore, if `len < dst.len()`, then `dst[len]` is a NUL byte.
+#[inline]
+fn raw_strncpy_from_user(dst: &mut [MaybeUninit<u8>], src: UserPtr) -> Result<usize> {
+ // CAST: Slice lengths are guaranteed to be `<= isize::MAX`.
+ let len = dst.len() as isize;
+
+ // SAFETY: `dst` is valid for writing `dst.len()` bytes.
+ let res = unsafe {
+ bindings::strncpy_from_user(
+ dst.as_mut_ptr().cast::<c_char>(),
+ src.as_const_ptr().cast::<c_char>(),
+ len,
+ )
+ };
+
+ if res < 0 {
+ return Err(Error::from_errno(res as i32));
+ }
+
+ #[cfg(CONFIG_RUST_OVERFLOW_CHECKS)]
+ assert!(res <= len);
+
+ // GUARANTEES: `strncpy_from_user` was successful, so `dst` has contents in accordance with the
+ // guarantees of this function.
+ Ok(res as usize)
+}
diff --git a/rust/kernel/workqueue.rs b/rust/kernel/workqueue.rs
index d092112d843f..b9343d5bc00f 100644
--- a/rust/kernel/workqueue.rs
+++ b/rust/kernel/workqueue.rs
@@ -26,7 +26,7 @@
//! * The [`WorkItemPointer`] trait is implemented for the pointer type that points at a something
//! that implements [`WorkItem`].
//!
-//! ## Example
+//! ## Examples
//!
//! This example defines a struct that holds an integer and can be scheduled on the workqueue. When
//! the struct is executed, it will print the integer. Since there is only one `work_struct` field,
@@ -131,10 +131,69 @@
//! # print_2_later(MyStruct::new(41, 42).unwrap());
//! ```
//!
+//! This example shows how you can schedule delayed work items:
+//!
+//! ```
+//! use kernel::sync::Arc;
+//! use kernel::workqueue::{self, impl_has_delayed_work, new_delayed_work, DelayedWork, WorkItem};
+//!
+//! #[pin_data]
+//! struct MyStruct {
+//! value: i32,
+//! #[pin]
+//! work: DelayedWork<MyStruct>,
+//! }
+//!
+//! impl_has_delayed_work! {
+//! impl HasDelayedWork<Self> for MyStruct { self.work }
+//! }
+//!
+//! impl MyStruct {
+//! fn new(value: i32) -> Result<Arc<Self>> {
+//! Arc::pin_init(
+//! pin_init!(MyStruct {
+//! value,
+//! work <- new_delayed_work!("MyStruct::work"),
+//! }),
+//! GFP_KERNEL,
+//! )
+//! }
+//! }
+//!
+//! impl WorkItem for MyStruct {
+//! type Pointer = Arc<MyStruct>;
+//!
+//! fn run(this: Arc<MyStruct>) {
+//! pr_info!("The value is: {}\n", this.value);
+//! }
+//! }
+//!
+//! /// This method will enqueue the struct for execution on the system workqueue, where its value
+//! /// will be printed 12 jiffies later.
+//! fn print_later(val: Arc<MyStruct>) {
+//! let _ = workqueue::system().enqueue_delayed(val, 12);
+//! }
+//!
+//! /// It is also possible to use the ordinary `enqueue` method together with `DelayedWork`. This
+//! /// is equivalent to calling `enqueue_delayed` with a delay of zero.
+//! fn print_now(val: Arc<MyStruct>) {
+//! let _ = workqueue::system().enqueue(val);
+//! }
+//! # print_later(MyStruct::new(42).unwrap());
+//! # print_now(MyStruct::new(42).unwrap());
+//! ```
+//!
//! C header: [`include/linux/workqueue.h`](srctree/include/linux/workqueue.h)
-use crate::alloc::{AllocError, Flags};
-use crate::{prelude::*, sync::Arc, sync::LockClassKey, types::Opaque};
+use crate::{
+ alloc::{AllocError, Flags},
+ container_of,
+ prelude::*,
+ sync::Arc,
+ sync::LockClassKey,
+ time::Jiffies,
+ types::Opaque,
+};
use core::marker::PhantomData;
/// Creates a [`Work`] initialiser with the given name and a newly-created lock class.
@@ -146,6 +205,33 @@ macro_rules! new_work {
}
pub use new_work;
+/// Creates a [`DelayedWork`] initialiser with the given name and a newly-created lock class.
+#[macro_export]
+macro_rules! new_delayed_work {
+ () => {
+ $crate::workqueue::DelayedWork::new(
+ $crate::optional_name!(),
+ $crate::static_lock_class!(),
+ $crate::c_str!(::core::concat!(
+ ::core::file!(),
+ ":",
+ ::core::line!(),
+ "_timer"
+ )),
+ $crate::static_lock_class!(),
+ )
+ };
+ ($name:literal) => {
+ $crate::workqueue::DelayedWork::new(
+ $crate::c_str!($name),
+ $crate::static_lock_class!(),
+ $crate::c_str!(::core::concat!($name, "_timer")),
+ $crate::static_lock_class!(),
+ )
+ };
+}
+pub use new_delayed_work;
+
/// A kernel work queue.
///
/// Wraps the kernel's C `struct workqueue_struct`.
@@ -170,7 +256,7 @@ impl Queue {
pub unsafe fn from_raw<'a>(ptr: *const bindings::workqueue_struct) -> &'a Queue {
// SAFETY: The `Queue` type is `#[repr(transparent)]`, so the pointer cast is valid. The
// caller promises that the pointer is not dangling.
- unsafe { &*(ptr as *const Queue) }
+ unsafe { &*ptr.cast::<Queue>() }
}
/// Enqueues a work item.
@@ -198,7 +284,7 @@ impl Queue {
unsafe {
w.__enqueue(move |work_ptr| {
bindings::queue_work_on(
- bindings::wq_misc_consts_WORK_CPU_UNBOUND as _,
+ bindings::wq_misc_consts_WORK_CPU_UNBOUND as ffi::c_int,
queue_ptr,
work_ptr,
)
@@ -206,6 +292,42 @@ impl Queue {
}
}
+ /// Enqueues a delayed work item.
+ ///
+ /// This may fail if the work item is already enqueued in a workqueue.
+ ///
+ /// The work item will be submitted using `WORK_CPU_UNBOUND`.
+ pub fn enqueue_delayed<W, const ID: u64>(&self, w: W, delay: Jiffies) -> W::EnqueueOutput
+ where
+ W: RawDelayedWorkItem<ID> + Send + 'static,
+ {
+ let queue_ptr = self.0.get();
+
+ // SAFETY: We only return `false` if the `work_struct` is already in a workqueue. The other
+ // `__enqueue` requirements are not relevant since `W` is `Send` and static.
+ //
+ // The call to `bindings::queue_delayed_work_on` will dereference the provided raw pointer,
+ // which is ok because `__enqueue` guarantees that the pointer is valid for the duration of
+ // this closure, and the safety requirements of `RawDelayedWorkItem` expands this
+ // requirement to apply to the entire `delayed_work`.
+ //
+ // Furthermore, if the C workqueue code accesses the pointer after this call to
+ // `__enqueue`, then the work item was successfully enqueued, and
+ // `bindings::queue_delayed_work_on` will have returned true. In this case, `__enqueue`
+ // promises that the raw pointer will stay valid until we call the function pointer in the
+ // `work_struct`, so the access is ok.
+ unsafe {
+ w.__enqueue(move |work_ptr| {
+ bindings::queue_delayed_work_on(
+ bindings::wq_misc_consts_WORK_CPU_UNBOUND as ffi::c_int,
+ queue_ptr,
+ container_of!(work_ptr, bindings::delayed_work, work),
+ delay,
+ )
+ })
+ }
+ }
+
/// Tries to spawn the given function or closure as a work item.
///
/// This method can fail because it allocates memory to store the work item.
@@ -298,6 +420,16 @@ pub unsafe trait RawWorkItem<const ID: u64> {
F: FnOnce(*mut bindings::work_struct) -> bool;
}
+/// A raw delayed work item.
+///
+/// # Safety
+///
+/// If the `__enqueue` method in the `RawWorkItem` implementation calls the closure, then the
+/// provided pointer must point at the `work` field of a valid `delayed_work`, and the guarantees
+/// that `__enqueue` provides about accessing the `work_struct` must also apply to the rest of the
+/// `delayed_work` struct.
+pub unsafe trait RawDelayedWorkItem<const ID: u64>: RawWorkItem<ID> {}
+
/// Defines the method that should be called directly when a work item is executed.
///
/// This trait is implemented by `Pin<KBox<T>>` and [`Arc<T>`], and is mainly intended to be
@@ -403,11 +535,11 @@ impl<T: ?Sized, const ID: u64> Work<T, ID> {
//
// A pointer cast would also be ok due to `#[repr(transparent)]`. We use `addr_of!` so that
// the compiler does not complain that the `work` field is unused.
- unsafe { Opaque::raw_get(core::ptr::addr_of!((*ptr).work)) }
+ unsafe { Opaque::cast_into(core::ptr::addr_of!((*ptr).work)) }
}
}
-/// Declares that a type has a [`Work<T, ID>`] field.
+/// Declares that a type contains a [`Work<T, ID>`].
///
/// The intended way of using this trait is via the [`impl_has_work!`] macro. You can use the macro
/// like this:
@@ -506,6 +638,178 @@ impl_has_work! {
impl{T} HasWork<Self> for ClosureWork<T> { self.work }
}
+/// Links for a delayed work item.
+///
+/// This struct contains a function pointer to the [`run`] function from the [`WorkItemPointer`]
+/// trait, and defines the linked list pointers necessary to enqueue a work item in a workqueue in
+/// a delayed manner.
+///
+/// Wraps the kernel's C `struct delayed_work`.
+///
+/// This is a helper type used to associate a `delayed_work` with the [`WorkItem`] that uses it.
+///
+/// [`run`]: WorkItemPointer::run
+#[pin_data]
+#[repr(transparent)]
+pub struct DelayedWork<T: ?Sized, const ID: u64 = 0> {
+ #[pin]
+ dwork: Opaque<bindings::delayed_work>,
+ _inner: PhantomData<T>,
+}
+
+// SAFETY: Kernel work items are usable from any thread.
+//
+// We do not need to constrain `T` since the work item does not actually contain a `T`.
+unsafe impl<T: ?Sized, const ID: u64> Send for DelayedWork<T, ID> {}
+// SAFETY: Kernel work items are usable from any thread.
+//
+// We do not need to constrain `T` since the work item does not actually contain a `T`.
+unsafe impl<T: ?Sized, const ID: u64> Sync for DelayedWork<T, ID> {}
+
+impl<T: ?Sized, const ID: u64> DelayedWork<T, ID> {
+ /// Creates a new instance of [`DelayedWork`].
+ #[inline]
+ pub fn new(
+ work_name: &'static CStr,
+ work_key: Pin<&'static LockClassKey>,
+ timer_name: &'static CStr,
+ timer_key: Pin<&'static LockClassKey>,
+ ) -> impl PinInit<Self>
+ where
+ T: WorkItem<ID>,
+ {
+ pin_init!(Self {
+ dwork <- Opaque::ffi_init(|slot: *mut bindings::delayed_work| {
+ // SAFETY: The `WorkItemPointer` implementation promises that `run` can be used as
+ // the work item function.
+ unsafe {
+ bindings::init_work_with_key(
+ core::ptr::addr_of_mut!((*slot).work),
+ Some(T::Pointer::run),
+ false,
+ work_name.as_char_ptr(),
+ work_key.as_ptr(),
+ )
+ }
+
+ // SAFETY: The `delayed_work_timer_fn` function pointer can be used here because
+ // the timer is embedded in a `struct delayed_work`, and only ever scheduled via
+ // the core workqueue code, and configured to run in irqsafe context.
+ unsafe {
+ bindings::timer_init_key(
+ core::ptr::addr_of_mut!((*slot).timer),
+ Some(bindings::delayed_work_timer_fn),
+ bindings::TIMER_IRQSAFE,
+ timer_name.as_char_ptr(),
+ timer_key.as_ptr(),
+ )
+ }
+ }),
+ _inner: PhantomData,
+ })
+ }
+
+ /// Get a pointer to the inner `delayed_work`.
+ ///
+ /// # Safety
+ ///
+ /// The provided pointer must not be dangling and must be properly aligned. (But the memory
+ /// need not be initialized.)
+ #[inline]
+ pub unsafe fn raw_as_work(ptr: *const Self) -> *mut Work<T, ID> {
+ // SAFETY: The caller promises that the pointer is aligned and not dangling.
+ let dw: *mut bindings::delayed_work =
+ unsafe { Opaque::cast_into(core::ptr::addr_of!((*ptr).dwork)) };
+ // SAFETY: The caller promises that the pointer is aligned and not dangling.
+ let wrk: *mut bindings::work_struct = unsafe { core::ptr::addr_of_mut!((*dw).work) };
+ // CAST: Work and work_struct have compatible layouts.
+ wrk.cast()
+ }
+}
+
+/// Declares that a type contains a [`DelayedWork<T, ID>`].
+///
+/// # Safety
+///
+/// The `HasWork<T, ID>` implementation must return a `work_struct` that is stored in the `work`
+/// field of a `delayed_work` with the same access rules as the `work_struct`.
+pub unsafe trait HasDelayedWork<T, const ID: u64 = 0>: HasWork<T, ID> {}
+
+/// Used to safely implement the [`HasDelayedWork<T, ID>`] trait.
+///
+/// This macro also implements the [`HasWork`] trait, so you do not need to use [`impl_has_work!`]
+/// when using this macro.
+///
+/// # Examples
+///
+/// ```
+/// use kernel::sync::Arc;
+/// use kernel::workqueue::{self, impl_has_delayed_work, DelayedWork};
+///
+/// struct MyStruct<'a, T, const N: usize> {
+/// work_field: DelayedWork<MyStruct<'a, T, N>, 17>,
+/// f: fn(&'a [T; N]),
+/// }
+///
+/// impl_has_delayed_work! {
+/// impl{'a, T, const N: usize} HasDelayedWork<MyStruct<'a, T, N>, 17>
+/// for MyStruct<'a, T, N> { self.work_field }
+/// }
+/// ```
+#[macro_export]
+macro_rules! impl_has_delayed_work {
+ ($(impl$({$($generics:tt)*})?
+ HasDelayedWork<$work_type:ty $(, $id:tt)?>
+ for $self:ty
+ { self.$field:ident }
+ )*) => {$(
+ // SAFETY: The implementation of `raw_get_work` only compiles if the field has the right
+ // type.
+ unsafe impl$(<$($generics)+>)?
+ $crate::workqueue::HasDelayedWork<$work_type $(, $id)?> for $self {}
+
+ // SAFETY: The implementation of `raw_get_work` only compiles if the field has the right
+ // type.
+ unsafe impl$(<$($generics)+>)? $crate::workqueue::HasWork<$work_type $(, $id)?> for $self {
+ #[inline]
+ unsafe fn raw_get_work(
+ ptr: *mut Self
+ ) -> *mut $crate::workqueue::Work<$work_type $(, $id)?> {
+ // SAFETY: The caller promises that the pointer is not dangling.
+ let ptr: *mut $crate::workqueue::DelayedWork<$work_type $(, $id)?> = unsafe {
+ ::core::ptr::addr_of_mut!((*ptr).$field)
+ };
+
+ // SAFETY: The caller promises that the pointer is not dangling.
+ unsafe { $crate::workqueue::DelayedWork::raw_as_work(ptr) }
+ }
+
+ #[inline]
+ unsafe fn work_container_of(
+ ptr: *mut $crate::workqueue::Work<$work_type $(, $id)?>,
+ ) -> *mut Self {
+ // SAFETY: The caller promises that the pointer points at a field of the right type
+ // in the right kind of struct.
+ let ptr = unsafe { $crate::workqueue::Work::raw_get(ptr) };
+
+ // SAFETY: The caller promises that the pointer points at a field of the right type
+ // in the right kind of struct.
+ let delayed_work = unsafe {
+ $crate::container_of!(ptr, $crate::bindings::delayed_work, work)
+ };
+
+ let delayed_work: *mut $crate::workqueue::DelayedWork<$work_type $(, $id)?> =
+ delayed_work.cast();
+
+ // SAFETY: The caller promises that the pointer points at a field of the right type
+ // in the right kind of struct.
+ unsafe { $crate::container_of!(delayed_work, Self, $field) }
+ }
+ }
+ )*};
+}
+pub use impl_has_delayed_work;
+
// SAFETY: The `__enqueue` implementation in RawWorkItem uses a `work_struct` initialized with the
// `run` method of this trait as the function pointer because:
// - `__enqueue` gets the `work_struct` from the `Work` field, using `T::raw_get_work`.
@@ -522,7 +826,7 @@ where
{
unsafe extern "C" fn run(ptr: *mut bindings::work_struct) {
// The `__enqueue` method always uses a `work_struct` stored in a `Work<T, ID>`.
- let ptr = ptr as *mut Work<T, ID>;
+ let ptr = ptr.cast::<Work<T, ID>>();
// SAFETY: This computes the pointer that `__enqueue` got from `Arc::into_raw`.
let ptr = unsafe { T::work_container_of(ptr) };
// SAFETY: This pointer comes from `Arc::into_raw` and we've been given back ownership.
@@ -567,6 +871,16 @@ where
}
}
+// SAFETY: By the safety requirements of `HasDelayedWork`, the `work_struct` returned by methods in
+// `HasWork` provides a `work_struct` that is the `work` field of a `delayed_work`, and the rest of
+// the `delayed_work` has the same access rules as its `work` field.
+unsafe impl<T, const ID: u64> RawDelayedWorkItem<ID> for Arc<T>
+where
+ T: WorkItem<ID, Pointer = Self>,
+ T: HasDelayedWork<T, ID>,
+{
+}
+
// SAFETY: TODO.
unsafe impl<T, const ID: u64> WorkItemPointer<ID> for Pin<KBox<T>>
where
@@ -575,7 +889,7 @@ where
{
unsafe extern "C" fn run(ptr: *mut bindings::work_struct) {
// The `__enqueue` method always uses a `work_struct` stored in a `Work<T, ID>`.
- let ptr = ptr as *mut Work<T, ID>;
+ let ptr = ptr.cast::<Work<T, ID>>();
// SAFETY: This computes the pointer that `__enqueue` got from `Arc::into_raw`.
let ptr = unsafe { T::work_container_of(ptr) };
// SAFETY: This pointer comes from `Arc::into_raw` and we've been given back ownership.
@@ -617,6 +931,16 @@ where
}
}
+// SAFETY: By the safety requirements of `HasDelayedWork`, the `work_struct` returned by methods in
+// `HasWork` provides a `work_struct` that is the `work` field of a `delayed_work`, and the rest of
+// the `delayed_work` has the same access rules as its `work` field.
+unsafe impl<T, const ID: u64> RawDelayedWorkItem<ID> for Pin<KBox<T>>
+where
+ T: WorkItem<ID, Pointer = Self>,
+ T: HasDelayedWork<T, ID>,
+{
+}
+
/// Returns the system work queue (`system_wq`).
///
/// It is the one used by `schedule[_delayed]_work[_on]()`. Multi-CPU multi-threaded. There are
diff --git a/rust/kernel/xarray.rs b/rust/kernel/xarray.rs
index 75719e7bb491..a49d6db28845 100644
--- a/rust/kernel/xarray.rs
+++ b/rust/kernel/xarray.rs
@@ -7,9 +7,10 @@
use crate::{
alloc, bindings, build_assert,
error::{Error, Result},
+ ffi::c_void,
types::{ForeignOwnable, NotThreadSafe, Opaque},
};
-use core::{iter, marker::PhantomData, mem, pin::Pin, ptr::NonNull};
+use core::{iter, marker::PhantomData, pin::Pin, ptr::NonNull};
use pin_init::{pin_data, pin_init, pinned_drop, PinInit};
/// An array which efficiently maps sparse integer indices to owned objects.
@@ -101,7 +102,7 @@ impl<T: ForeignOwnable> XArray<T> {
})
}
- fn iter(&self) -> impl Iterator<Item = NonNull<T::PointedTo>> + '_ {
+ fn iter(&self) -> impl Iterator<Item = NonNull<c_void>> + '_ {
let mut index = 0;
// SAFETY: `self.xa` is always valid by the type invariant.
@@ -179,7 +180,7 @@ impl<T> From<StoreError<T>> for Error {
impl<'a, T: ForeignOwnable> Guard<'a, T> {
fn load<F, U>(&self, index: usize, f: F) -> Option<U>
where
- F: FnOnce(NonNull<T::PointedTo>) -> U,
+ F: FnOnce(NonNull<c_void>) -> U,
{
// SAFETY: `self.xa.xa` is always valid by the type invariant.
let ptr = unsafe { bindings::xa_load(self.xa.xa.get(), index) };
@@ -230,7 +231,7 @@ impl<'a, T: ForeignOwnable> Guard<'a, T> {
gfp: alloc::Flags,
) -> Result<Option<T>, StoreError<T>> {
build_assert!(
- mem::align_of::<T::PointedTo>() >= 4,
+ T::FOREIGN_ALIGN >= 4,
"pointers stored in XArray must be 4-byte aligned"
);
let new = value.into_foreign();
diff --git a/rust/macros/module.rs b/rust/macros/module.rs
index 75efc6eeeafc..5ee54a00c0b6 100644
--- a/rust/macros/module.rs
+++ b/rust/macros/module.rs
@@ -94,7 +94,6 @@ struct ModuleInfo {
type_: String,
license: String,
name: String,
- author: Option<String>,
authors: Option<Vec<String>>,
description: Option<String>,
alias: Option<Vec<String>>,
@@ -108,7 +107,6 @@ impl ModuleInfo {
const EXPECTED_KEYS: &[&str] = &[
"type",
"name",
- "author",
"authors",
"description",
"license",
@@ -134,7 +132,6 @@ impl ModuleInfo {
match key.as_str() {
"type" => info.type_ = expect_ident(it),
"name" => info.name = expect_string_ascii(it),
- "author" => info.author = Some(expect_string(it)),
"authors" => info.authors = Some(expect_string_array(it)),
"description" => info.description = Some(expect_string(it)),
"license" => info.license = expect_string_ascii(it),
@@ -179,9 +176,6 @@ pub(crate) fn module(ts: TokenStream) -> TokenStream {
// Rust does not allow hyphens in identifiers, use underscore instead.
let ident = info.name.replace('-', "_");
let mut modinfo = ModInfoBuilder::new(ident.as_ref());
- if let Some(author) = info.author {
- modinfo.emit("author", &author);
- }
if let Some(authors) = info.authors {
for author in authors {
modinfo.emit("author", &author);
diff --git a/rust/pin-init/README.md b/rust/pin-init/README.md
index 2d0cda961d45..a4c01a8d78b2 100644
--- a/rust/pin-init/README.md
+++ b/rust/pin-init/README.md
@@ -125,7 +125,7 @@ impl DriverData {
fn new() -> impl PinInit<Self, Error> {
try_pin_init!(Self {
status <- CMutex::new(0),
- buffer: Box::init(pin_init::zeroed())?,
+ buffer: Box::init(pin_init::init_zeroed())?,
}? Error)
}
}
diff --git a/rust/pin-init/examples/big_struct_in_place.rs b/rust/pin-init/examples/big_struct_in_place.rs
index 30d44a334ffd..c05139927486 100644
--- a/rust/pin-init/examples/big_struct_in_place.rs
+++ b/rust/pin-init/examples/big_struct_in_place.rs
@@ -4,6 +4,7 @@ use pin_init::*;
// Struct with size over 1GiB
#[derive(Debug)]
+#[allow(dead_code)]
pub struct BigStruct {
buf: [u8; 1024 * 1024 * 1024],
a: u64,
@@ -20,20 +21,23 @@ pub struct ManagedBuf {
impl ManagedBuf {
pub fn new() -> impl Init<Self> {
- init!(ManagedBuf { buf <- zeroed() })
+ init!(ManagedBuf { buf <- init_zeroed() })
}
}
fn main() {
- // we want to initialize the struct in-place, otherwise we would get a stackoverflow
- let buf: Box<BigStruct> = Box::init(init!(BigStruct {
- buf <- zeroed(),
- a: 7,
- b: 186,
- c: 7789,
- d: 34,
- managed_buf <- ManagedBuf::new(),
- }))
- .unwrap();
- println!("{}", core::mem::size_of_val(&*buf));
+ #[cfg(any(feature = "std", feature = "alloc"))]
+ {
+ // we want to initialize the struct in-place, otherwise we would get a stackoverflow
+ let buf: Box<BigStruct> = Box::init(init!(BigStruct {
+ buf <- init_zeroed(),
+ a: 7,
+ b: 186,
+ c: 7789,
+ d: 34,
+ managed_buf <- ManagedBuf::new(),
+ }))
+ .unwrap();
+ println!("{}", core::mem::size_of_val(&*buf));
+ }
}
diff --git a/rust/pin-init/examples/linked_list.rs b/rust/pin-init/examples/linked_list.rs
index 0bbc7b8d83a1..f9e117c7dfe0 100644
--- a/rust/pin-init/examples/linked_list.rs
+++ b/rust/pin-init/examples/linked_list.rs
@@ -14,8 +14,9 @@ use core::{
use pin_init::*;
-#[expect(unused_attributes)]
+#[allow(unused_attributes)]
mod error;
+#[allow(unused_imports)]
use error::Error;
#[pin_data(PinnedDrop)]
@@ -39,6 +40,7 @@ impl ListHead {
}
#[inline]
+ #[allow(dead_code)]
pub fn insert_next(list: &ListHead) -> impl PinInit<Self, Infallible> + '_ {
try_pin_init!(&this in Self {
prev: list.next.prev().replace(unsafe { Link::new_unchecked(this)}),
@@ -112,6 +114,7 @@ impl Link {
}
#[inline]
+ #[allow(dead_code)]
fn prev(&self) -> &Link {
unsafe { &(*self.0.get().as_ptr()).prev }
}
@@ -138,7 +141,12 @@ impl Link {
}
#[allow(dead_code)]
+#[cfg(not(any(feature = "std", feature = "alloc")))]
+fn main() {}
+
+#[allow(dead_code)]
#[cfg_attr(test, test)]
+#[cfg(any(feature = "std", feature = "alloc"))]
fn main() -> Result<(), Error> {
let a = Box::pin_init(ListHead::new())?;
stack_pin_init!(let b = ListHead::insert_next(&a));
diff --git a/rust/pin-init/examples/mutex.rs b/rust/pin-init/examples/mutex.rs
index 3e3630780c96..9f295226cd64 100644
--- a/rust/pin-init/examples/mutex.rs
+++ b/rust/pin-init/examples/mutex.rs
@@ -12,14 +12,15 @@ use core::{
pin::Pin,
sync::atomic::{AtomicBool, Ordering},
};
+#[cfg(feature = "std")]
use std::{
sync::Arc,
- thread::{self, park, sleep, Builder, Thread},
+ thread::{self, sleep, Builder, Thread},
time::Duration,
};
use pin_init::*;
-#[expect(unused_attributes)]
+#[allow(unused_attributes)]
#[path = "./linked_list.rs"]
pub mod linked_list;
use linked_list::*;
@@ -36,6 +37,7 @@ impl SpinLock {
.compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed)
.is_err()
{
+ #[cfg(feature = "std")]
while self.inner.load(Ordering::Relaxed) {
thread::yield_now();
}
@@ -94,7 +96,8 @@ impl<T> CMutex<T> {
// println!("wait list length: {}", self.wait_list.size());
while self.locked.get() {
drop(sguard);
- park();
+ #[cfg(feature = "std")]
+ thread::park();
sguard = self.spin_lock.acquire();
}
// This does have an effect, as the ListHead inside wait_entry implements Drop!
@@ -131,8 +134,11 @@ impl<T> Drop for CMutexGuard<'_, T> {
let sguard = self.mtx.spin_lock.acquire();
self.mtx.locked.set(false);
if let Some(list_field) = self.mtx.wait_list.next() {
- let wait_entry = list_field.as_ptr().cast::<WaitEntry>();
- unsafe { (*wait_entry).thread.unpark() };
+ let _wait_entry = list_field.as_ptr().cast::<WaitEntry>();
+ #[cfg(feature = "std")]
+ unsafe {
+ (*_wait_entry).thread.unpark()
+ };
}
drop(sguard);
}
@@ -159,52 +165,61 @@ impl<T> DerefMut for CMutexGuard<'_, T> {
struct WaitEntry {
#[pin]
wait_list: ListHead,
+ #[cfg(feature = "std")]
thread: Thread,
}
impl WaitEntry {
#[inline]
fn insert_new(list: &ListHead) -> impl PinInit<Self> + '_ {
- pin_init!(Self {
- thread: thread::current(),
- wait_list <- ListHead::insert_prev(list),
- })
+ #[cfg(feature = "std")]
+ {
+ pin_init!(Self {
+ thread: thread::current(),
+ wait_list <- ListHead::insert_prev(list),
+ })
+ }
+ #[cfg(not(feature = "std"))]
+ {
+ pin_init!(Self {
+ wait_list <- ListHead::insert_prev(list),
+ })
+ }
}
}
-#[cfg(not(any(feature = "std", feature = "alloc")))]
-fn main() {}
-
-#[allow(dead_code)]
#[cfg_attr(test, test)]
-#[cfg(any(feature = "std", feature = "alloc"))]
+#[allow(dead_code)]
fn main() {
- let mtx: Pin<Arc<CMutex<usize>>> = Arc::pin_init(CMutex::new(0)).unwrap();
- let mut handles = vec![];
- let thread_count = 20;
- let workload = if cfg!(miri) { 100 } else { 1_000 };
- for i in 0..thread_count {
- let mtx = mtx.clone();
- handles.push(
- Builder::new()
- .name(format!("worker #{i}"))
- .spawn(move || {
- for _ in 0..workload {
- *mtx.lock() += 1;
- }
- println!("{i} halfway");
- sleep(Duration::from_millis((i as u64) * 10));
- for _ in 0..workload {
- *mtx.lock() += 1;
- }
- println!("{i} finished");
- })
- .expect("should not fail"),
- );
- }
- for h in handles {
- h.join().expect("thread panicked");
+ #[cfg(feature = "std")]
+ {
+ let mtx: Pin<Arc<CMutex<usize>>> = Arc::pin_init(CMutex::new(0)).unwrap();
+ let mut handles = vec![];
+ let thread_count = 20;
+ let workload = if cfg!(miri) { 100 } else { 1_000 };
+ for i in 0..thread_count {
+ let mtx = mtx.clone();
+ handles.push(
+ Builder::new()
+ .name(format!("worker #{i}"))
+ .spawn(move || {
+ for _ in 0..workload {
+ *mtx.lock() += 1;
+ }
+ println!("{i} halfway");
+ sleep(Duration::from_millis((i as u64) * 10));
+ for _ in 0..workload {
+ *mtx.lock() += 1;
+ }
+ println!("{i} finished");
+ })
+ .expect("should not fail"),
+ );
+ }
+ for h in handles {
+ h.join().expect("thread panicked");
+ }
+ println!("{:?}", &*mtx.lock());
+ assert_eq!(*mtx.lock(), workload * thread_count * 2);
}
- println!("{:?}", &*mtx.lock());
- assert_eq!(*mtx.lock(), workload * thread_count * 2);
}
diff --git a/rust/pin-init/examples/pthread_mutex.rs b/rust/pin-init/examples/pthread_mutex.rs
index 5acc5108b954..49b004c8c137 100644
--- a/rust/pin-init/examples/pthread_mutex.rs
+++ b/rust/pin-init/examples/pthread_mutex.rs
@@ -44,6 +44,7 @@ mod pthread_mtx {
pub enum Error {
#[allow(dead_code)]
IO(std::io::Error),
+ #[allow(dead_code)]
Alloc,
}
@@ -61,6 +62,7 @@ mod pthread_mtx {
}
impl<T> PThreadMutex<T> {
+ #[allow(dead_code)]
pub fn new(data: T) -> impl PinInit<Self, Error> {
fn init_raw() -> impl PinInit<UnsafeCell<libc::pthread_mutex_t>, Error> {
let init = |slot: *mut UnsafeCell<libc::pthread_mutex_t>| {
@@ -103,6 +105,7 @@ mod pthread_mtx {
}? Error)
}
+ #[allow(dead_code)]
pub fn lock(&self) -> PThreadMutexGuard<'_, T> {
// SAFETY: raw is always initialized
unsafe { libc::pthread_mutex_lock(self.raw.get()) };
@@ -137,6 +140,7 @@ mod pthread_mtx {
}
#[cfg_attr(test, test)]
+#[cfg_attr(all(test, miri), ignore)]
fn main() {
#[cfg(all(any(feature = "std", feature = "alloc"), not(windows)))]
{
diff --git a/rust/pin-init/examples/static_init.rs b/rust/pin-init/examples/static_init.rs
index 48531413ab94..0e165daa9798 100644
--- a/rust/pin-init/examples/static_init.rs
+++ b/rust/pin-init/examples/static_init.rs
@@ -3,6 +3,7 @@
#![allow(clippy::undocumented_unsafe_blocks)]
#![cfg_attr(feature = "alloc", feature(allocator_api))]
#![cfg_attr(not(RUSTC_LINT_REASONS_IS_STABLE), feature(lint_reasons))]
+#![allow(unused_imports)]
use core::{
cell::{Cell, UnsafeCell},
@@ -12,12 +13,13 @@ use core::{
time::Duration,
};
use pin_init::*;
+#[cfg(feature = "std")]
use std::{
sync::Arc,
thread::{sleep, Builder},
};
-#[expect(unused_attributes)]
+#[allow(unused_attributes)]
mod mutex;
use mutex::*;
@@ -82,42 +84,41 @@ unsafe impl PinInit<CMutex<usize>> for CountInit {
pub static COUNT: StaticInit<CMutex<usize>, CountInit> = StaticInit::new(CountInit);
-#[cfg(not(any(feature = "std", feature = "alloc")))]
-fn main() {}
-
-#[cfg(any(feature = "std", feature = "alloc"))]
fn main() {
- let mtx: Pin<Arc<CMutex<usize>>> = Arc::pin_init(CMutex::new(0)).unwrap();
- let mut handles = vec![];
- let thread_count = 20;
- let workload = 1_000;
- for i in 0..thread_count {
- let mtx = mtx.clone();
- handles.push(
- Builder::new()
- .name(format!("worker #{i}"))
- .spawn(move || {
- for _ in 0..workload {
- *COUNT.lock() += 1;
- std::thread::sleep(std::time::Duration::from_millis(10));
- *mtx.lock() += 1;
- std::thread::sleep(std::time::Duration::from_millis(10));
- *COUNT.lock() += 1;
- }
- println!("{i} halfway");
- sleep(Duration::from_millis((i as u64) * 10));
- for _ in 0..workload {
- std::thread::sleep(std::time::Duration::from_millis(10));
- *mtx.lock() += 1;
- }
- println!("{i} finished");
- })
- .expect("should not fail"),
- );
- }
- for h in handles {
- h.join().expect("thread panicked");
+ #[cfg(feature = "std")]
+ {
+ let mtx: Pin<Arc<CMutex<usize>>> = Arc::pin_init(CMutex::new(0)).unwrap();
+ let mut handles = vec![];
+ let thread_count = 20;
+ let workload = 1_000;
+ for i in 0..thread_count {
+ let mtx = mtx.clone();
+ handles.push(
+ Builder::new()
+ .name(format!("worker #{i}"))
+ .spawn(move || {
+ for _ in 0..workload {
+ *COUNT.lock() += 1;
+ std::thread::sleep(std::time::Duration::from_millis(10));
+ *mtx.lock() += 1;
+ std::thread::sleep(std::time::Duration::from_millis(10));
+ *COUNT.lock() += 1;
+ }
+ println!("{i} halfway");
+ sleep(Duration::from_millis((i as u64) * 10));
+ for _ in 0..workload {
+ std::thread::sleep(std::time::Duration::from_millis(10));
+ *mtx.lock() += 1;
+ }
+ println!("{i} finished");
+ })
+ .expect("should not fail"),
+ );
+ }
+ for h in handles {
+ h.join().expect("thread panicked");
+ }
+ println!("{:?}, {:?}", &*mtx.lock(), &*COUNT.lock());
+ assert_eq!(*mtx.lock(), workload * thread_count * 2);
}
- println!("{:?}, {:?}", &*mtx.lock(), &*COUNT.lock());
- assert_eq!(*mtx.lock(), workload * thread_count * 2);
}
diff --git a/rust/pin-init/src/__internal.rs b/rust/pin-init/src/__internal.rs
index 557b5948cddc..90f18e9a2912 100644
--- a/rust/pin-init/src/__internal.rs
+++ b/rust/pin-init/src/__internal.rs
@@ -188,6 +188,7 @@ impl<T> StackInit<T> {
}
#[test]
+#[cfg(feature = "std")]
fn stack_init_reuse() {
use ::std::{borrow::ToOwned, println, string::String};
use core::pin::pin;
diff --git a/rust/pin-init/src/lib.rs b/rust/pin-init/src/lib.rs
index f4e034497cdd..62e013a5cc20 100644
--- a/rust/pin-init/src/lib.rs
+++ b/rust/pin-init/src/lib.rs
@@ -148,7 +148,7 @@
//! fn new() -> impl PinInit<Self, Error> {
//! try_pin_init!(Self {
//! status <- CMutex::new(0),
-//! buffer: Box::init(pin_init::zeroed())?,
+//! buffer: Box::init(pin_init::init_zeroed())?,
//! }? Error)
//! }
//! }
@@ -742,7 +742,7 @@ macro_rules! stack_try_pin_init {
/// - Fields that you want to initialize in-place have to use `<-` instead of `:`.
/// - In front of the initializer you can write `&this in` to have access to a [`NonNull<Self>`]
/// pointer named `this` inside of the initializer.
-/// - Using struct update syntax one can place `..Zeroable::zeroed()` at the very end of the
+/// - Using struct update syntax one can place `..Zeroable::init_zeroed()` at the very end of the
/// struct, this initializes every field with 0 and then runs all initializers specified in the
/// body. This can only be done if [`Zeroable`] is implemented for the struct.
///
@@ -769,7 +769,7 @@ macro_rules! stack_try_pin_init {
/// });
/// let init = pin_init!(Buf {
/// buf: [1; 64],
-/// ..Zeroable::zeroed()
+/// ..Zeroable::init_zeroed()
/// });
/// ```
///
@@ -805,7 +805,7 @@ macro_rules! pin_init {
/// ```rust
/// # #![feature(allocator_api)]
/// # #[path = "../examples/error.rs"] mod error; use error::Error;
-/// use pin_init::{pin_data, try_pin_init, PinInit, InPlaceInit, zeroed};
+/// use pin_init::{pin_data, try_pin_init, PinInit, InPlaceInit, init_zeroed};
///
/// #[pin_data]
/// struct BigBuf {
@@ -817,7 +817,7 @@ macro_rules! pin_init {
/// impl BigBuf {
/// fn new() -> impl PinInit<Self, Error> {
/// try_pin_init!(Self {
-/// big: Box::init(zeroed())?,
+/// big: Box::init(init_zeroed())?,
/// small: [0; 1024 * 1024],
/// ptr: core::ptr::null_mut(),
/// }? Error)
@@ -866,7 +866,7 @@ macro_rules! try_pin_init {
/// # #[path = "../examples/error.rs"] mod error; use error::Error;
/// # #[path = "../examples/mutex.rs"] mod mutex; use mutex::*;
/// # use pin_init::InPlaceInit;
-/// use pin_init::{init, Init, zeroed};
+/// use pin_init::{init, Init, init_zeroed};
///
/// struct BigBuf {
/// small: [u8; 1024 * 1024],
@@ -875,7 +875,7 @@ macro_rules! try_pin_init {
/// impl BigBuf {
/// fn new() -> impl Init<Self> {
/// init!(Self {
-/// small <- zeroed(),
+/// small <- init_zeroed(),
/// })
/// }
/// }
@@ -913,7 +913,7 @@ macro_rules! init {
/// # #![feature(allocator_api)]
/// # use core::alloc::AllocError;
/// # use pin_init::InPlaceInit;
-/// use pin_init::{try_init, Init, zeroed};
+/// use pin_init::{try_init, Init, init_zeroed};
///
/// struct BigBuf {
/// big: Box<[u8; 1024 * 1024 * 1024]>,
@@ -923,7 +923,7 @@ macro_rules! init {
/// impl BigBuf {
/// fn new() -> impl Init<Self, AllocError> {
/// try_init!(Self {
-/// big: Box::init(zeroed())?,
+/// big: Box::init(init_zeroed())?,
/// small: [0; 1024 * 1024],
/// }? AllocError)
/// }
@@ -953,7 +953,7 @@ macro_rules! try_init {
/// Asserts that a field on a struct using `#[pin_data]` is marked with `#[pin]` ie. that it is
/// structurally pinned.
///
-/// # Example
+/// # Examples
///
/// This will succeed:
/// ```
@@ -1170,7 +1170,7 @@ pub unsafe trait Init<T: ?Sized, E = Infallible>: PinInit<T, E> {
///
/// ```rust
/// # #![expect(clippy::disallowed_names)]
- /// use pin_init::{init, zeroed, Init};
+ /// use pin_init::{init, init_zeroed, Init};
///
/// struct Foo {
/// buf: [u8; 1_000_000],
@@ -1183,7 +1183,7 @@ pub unsafe trait Init<T: ?Sized, E = Infallible>: PinInit<T, E> {
/// }
///
/// let foo = init!(Foo {
- /// buf <- zeroed()
+ /// buf <- init_zeroed()
/// }).chain(|foo| {
/// foo.setup();
/// Ok(())
@@ -1495,7 +1495,45 @@ pub unsafe trait PinnedDrop: __internal::HasPinData {
/// ```rust,ignore
/// let val: Self = unsafe { core::mem::zeroed() };
/// ```
-pub unsafe trait Zeroable {}
+pub unsafe trait Zeroable {
+ /// Create a new zeroed `Self`.
+ ///
+ /// The returned initializer will write `0x00` to every byte of the given `slot`.
+ #[inline]
+ fn init_zeroed() -> impl Init<Self>
+ where
+ Self: Sized,
+ {
+ init_zeroed()
+ }
+
+ /// Create a `Self` consisting of all zeroes.
+ ///
+ /// Whenever a type implements [`Zeroable`], this function should be preferred over
+ /// [`core::mem::zeroed()`] or using `MaybeUninit<T>::zeroed().assume_init()`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use pin_init::{Zeroable, zeroed};
+ ///
+ /// #[derive(Zeroable)]
+ /// struct Point {
+ /// x: u32,
+ /// y: u32,
+ /// }
+ ///
+ /// let point: Point = zeroed();
+ /// assert_eq!(point.x, 0);
+ /// assert_eq!(point.y, 0);
+ /// ```
+ fn zeroed() -> Self
+ where
+ Self: Sized,
+ {
+ zeroed()
+ }
+}
/// Marker trait for types that allow `Option<Self>` to be set to all zeroes in order to write
/// `None` to that location.
@@ -1508,11 +1546,21 @@ pub unsafe trait ZeroableOption {}
// SAFETY: by the safety requirement of `ZeroableOption`, this is valid.
unsafe impl<T: ZeroableOption> Zeroable for Option<T> {}
-/// Create a new zeroed T.
+// SAFETY: `Option<&T>` is part of the option layout optimization guarantee:
+// <https://doc.rust-lang.org/stable/std/option/index.html#representation>.
+unsafe impl<T> ZeroableOption for &T {}
+// SAFETY: `Option<&mut T>` is part of the option layout optimization guarantee:
+// <https://doc.rust-lang.org/stable/std/option/index.html#representation>.
+unsafe impl<T> ZeroableOption for &mut T {}
+// SAFETY: `Option<NonNull<T>>` is part of the option layout optimization guarantee:
+// <https://doc.rust-lang.org/stable/std/option/index.html#representation>.
+unsafe impl<T> ZeroableOption for NonNull<T> {}
+
+/// Create an initializer for a zeroed `T`.
///
/// The returned initializer will write `0x00` to every byte of the given `slot`.
#[inline]
-pub fn zeroed<T: Zeroable>() -> impl Init<T> {
+pub fn init_zeroed<T: Zeroable>() -> impl Init<T> {
// SAFETY: Because `T: Zeroable`, all bytes zero is a valid bit pattern for `T`
// and because we write all zeroes, the memory is initialized.
unsafe {
@@ -1523,6 +1571,31 @@ pub fn zeroed<T: Zeroable>() -> impl Init<T> {
}
}
+/// Create a `T` consisting of all zeroes.
+///
+/// Whenever a type implements [`Zeroable`], this function should be preferred over
+/// [`core::mem::zeroed()`] or using `MaybeUninit<T>::zeroed().assume_init()`.
+///
+/// # Examples
+///
+/// ```
+/// use pin_init::{Zeroable, zeroed};
+///
+/// #[derive(Zeroable)]
+/// struct Point {
+/// x: u32,
+/// y: u32,
+/// }
+///
+/// let point: Point = zeroed();
+/// assert_eq!(point.x, 0);
+/// assert_eq!(point.y, 0);
+/// ```
+pub const fn zeroed<T: Zeroable>() -> T {
+ // SAFETY:By the type invariants of `Zeroable`, all zeroes is a valid bit pattern for `T`.
+ unsafe { core::mem::zeroed() }
+}
+
macro_rules! impl_zeroable {
($($({$($generics:tt)*})? $t:ty, )*) => {
// SAFETY: Safety comments written in the macro invocation.
@@ -1560,7 +1633,6 @@ impl_zeroable! {
Option<NonZeroU128>, Option<NonZeroUsize>,
Option<NonZeroI8>, Option<NonZeroI16>, Option<NonZeroI32>, Option<NonZeroI64>,
Option<NonZeroI128>, Option<NonZeroIsize>,
- {<T>} Option<NonNull<T>>,
// SAFETY: `null` pointer is valid.
//
@@ -1590,6 +1662,22 @@ macro_rules! impl_tuple_zeroable {
impl_tuple_zeroable!(A, B, C, D, E, F, G, H, I, J);
+macro_rules! impl_fn_zeroable_option {
+ ([$($abi:literal),* $(,)?] $args:tt) => {
+ $(impl_fn_zeroable_option!({extern $abi} $args);)*
+ $(impl_fn_zeroable_option!({unsafe extern $abi} $args);)*
+ };
+ ({$($prefix:tt)*} {$(,)?}) => {};
+ ({$($prefix:tt)*} {$ret:ident, $($rest:ident),* $(,)?}) => {
+ // SAFETY: function pointers are part of the option layout optimization:
+ // <https://doc.rust-lang.org/stable/std/option/index.html#representation>.
+ unsafe impl<$ret, $($rest),*> ZeroableOption for $($prefix)* fn($($rest),*) -> $ret {}
+ impl_fn_zeroable_option!({$($prefix)*} {$($rest),*,});
+ };
+}
+
+impl_fn_zeroable_option!(["Rust", "C"] { A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S, T, U });
+
/// This trait allows creating an instance of `Self` which contains exactly one
/// [structurally pinned value](https://doc.rust-lang.org/std/pin/index.html#projections-and-structural-pinning).
///
diff --git a/rust/pin-init/src/macros.rs b/rust/pin-init/src/macros.rs
index 935d77745d1d..9ced630737b8 100644
--- a/rust/pin-init/src/macros.rs
+++ b/rust/pin-init/src/macros.rs
@@ -1030,7 +1030,7 @@ macro_rules! __pin_data {
///
/// This macro has multiple internal call configurations, these are always the very first ident:
/// - nothing: this is the base case and called by the `{try_}{pin_}init!` macros.
-/// - `with_update_parsed`: when the `..Zeroable::zeroed()` syntax has been handled.
+/// - `with_update_parsed`: when the `..Zeroable::init_zeroed()` syntax has been handled.
/// - `init_slot`: recursively creates the code that initializes all fields in `slot`.
/// - `make_initializer`: recursively create the struct initializer that guarantees that every
/// field has been initialized exactly once.
@@ -1059,7 +1059,7 @@ macro_rules! __init_internal {
@data($data, $($use_data)?),
@has_data($has_data, $get_data),
@construct_closure($construct_closure),
- @zeroed(), // Nothing means default behavior.
+ @init_zeroed(), // Nothing means default behavior.
)
};
(
@@ -1074,7 +1074,7 @@ macro_rules! __init_internal {
@has_data($has_data:ident, $get_data:ident),
// `pin_init_from_closure` or `init_from_closure`.
@construct_closure($construct_closure:ident),
- @munch_fields(..Zeroable::zeroed()),
+ @munch_fields(..Zeroable::init_zeroed()),
) => {
$crate::__init_internal!(with_update_parsed:
@this($($this)?),
@@ -1084,7 +1084,7 @@ macro_rules! __init_internal {
@data($data, $($use_data)?),
@has_data($has_data, $get_data),
@construct_closure($construct_closure),
- @zeroed(()), // `()` means zero all fields not mentioned.
+ @init_zeroed(()), // `()` means zero all fields not mentioned.
)
};
(
@@ -1124,7 +1124,7 @@ macro_rules! __init_internal {
@has_data($has_data:ident, $get_data:ident),
// `pin_init_from_closure` or `init_from_closure`.
@construct_closure($construct_closure:ident),
- @zeroed($($init_zeroed:expr)?),
+ @init_zeroed($($init_zeroed:expr)?),
) => {{
// We do not want to allow arbitrary returns, so we declare this type as the `Ok` return
// type and shadow it later when we insert the arbitrary user code. That way there will be
@@ -1196,7 +1196,7 @@ macro_rules! __init_internal {
@data($data:ident),
@slot($slot:ident),
@guards($($guards:ident,)*),
- @munch_fields($(..Zeroable::zeroed())? $(,)?),
+ @munch_fields($(..Zeroable::init_zeroed())? $(,)?),
) => {
// Endpoint of munching, no fields are left. If execution reaches this point, all fields
// have been initialized. Therefore we can now dismiss the guards by forgetting them.
@@ -1300,11 +1300,11 @@ macro_rules! __init_internal {
(make_initializer:
@slot($slot:ident),
@type_name($t:path),
- @munch_fields(..Zeroable::zeroed() $(,)?),
+ @munch_fields(..Zeroable::init_zeroed() $(,)?),
@acc($($acc:tt)*),
) => {
// Endpoint, nothing more to munch, create the initializer. Since the users specified
- // `..Zeroable::zeroed()`, the slot will already have been zeroed and all field that have
+ // `..Zeroable::init_zeroed()`, the slot will already have been zeroed and all field that have
// not been overwritten are thus zero and initialized. We still check that all fields are
// actually accessible by using the struct update syntax ourselves.
// We are inside of a closure that is never executed and thus we can abuse `slot` to
diff --git a/rust/uapi/lib.rs b/rust/uapi/lib.rs
index c98d7a8cde77..31c2f713313f 100644
--- a/rust/uapi/lib.rs
+++ b/rust/uapi/lib.rs
@@ -14,6 +14,9 @@
#![cfg_attr(test, allow(unsafe_op_in_unsafe_fn))]
#![allow(
clippy::all,
+ clippy::cast_lossless,
+ clippy::ptr_as_ptr,
+ clippy::ref_as_ptr,
clippy::undocumented_unsafe_blocks,
dead_code,
missing_docs,