summaryrefslogtreecommitdiff
path: root/rust
diff options
context:
space:
mode:
Diffstat (limited to 'rust')
-rw-r--r--rust/bindings/bindings_helper.h2
-rw-r--r--rust/helpers/helpers.c1
-rw-r--r--rust/helpers/scatterlist.c24
-rw-r--r--rust/kernel/alloc/allocator.rs52
-rw-r--r--rust/kernel/alloc/allocator/iter.rs102
-rw-r--r--rust/kernel/alloc/allocator_test.rs29
-rw-r--r--rust/kernel/alloc/kbox.rs40
-rw-r--r--rust/kernel/alloc/kvec.rs40
-rw-r--r--rust/kernel/alloc/layout.rs5
-rw-r--r--rust/kernel/devres.rs6
-rw-r--r--rust/kernel/dma.rs86
-rw-r--r--rust/kernel/drm/driver.rs3
-rw-r--r--rust/kernel/drm/gem/mod.rs93
-rw-r--r--rust/kernel/lib.rs2
-rw-r--r--rust/kernel/page.rs87
-rw-r--r--rust/kernel/scatterlist.rs491
-rw-r--r--rust/kernel/transmute.rs114
-rw-r--r--rust/kernel/workqueue.rs9
-rw-r--r--rust/pin-init/README.md12
-rw-r--r--rust/pin-init/examples/error.rs4
-rw-r--r--rust/pin-init/src/lib.rs4
-rw-r--r--rust/pin-init/src/macros.rs239
-rw-r--r--rust/uapi/uapi_helper.h1
23 files changed, 1337 insertions, 109 deletions
diff --git a/rust/bindings/bindings_helper.h b/rust/bindings/bindings_helper.h
index 84d60635e8a9..c2cc52ee9945 100644
--- a/rust/bindings/bindings_helper.h
+++ b/rust/bindings/bindings_helper.h
@@ -47,6 +47,7 @@
#include <linux/cpumask.h>
#include <linux/cred.h>
#include <linux/device/faux.h>
+#include <linux/dma-direction.h>
#include <linux/dma-mapping.h>
#include <linux/errname.h>
#include <linux/ethtool.h>
@@ -57,6 +58,7 @@
#include <linux/jiffies.h>
#include <linux/jump_label.h>
#include <linux/mdio.h>
+#include <linux/mm.h>
#include <linux/miscdevice.h>
#include <linux/of_device.h>
#include <linux/pci.h>
diff --git a/rust/helpers/helpers.c b/rust/helpers/helpers.c
index 7cf7fe95e41d..e94542bf6ea7 100644
--- a/rust/helpers/helpers.c
+++ b/rust/helpers/helpers.c
@@ -39,6 +39,7 @@
#include "rcu.c"
#include "refcount.c"
#include "regulator.c"
+#include "scatterlist.c"
#include "security.c"
#include "signal.c"
#include "slab.c"
diff --git a/rust/helpers/scatterlist.c b/rust/helpers/scatterlist.c
new file mode 100644
index 000000000000..80c956ee09ab
--- /dev/null
+++ b/rust/helpers/scatterlist.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/dma-direction.h>
+
+dma_addr_t rust_helper_sg_dma_address(struct scatterlist *sg)
+{
+ return sg_dma_address(sg);
+}
+
+unsigned int rust_helper_sg_dma_len(struct scatterlist *sg)
+{
+ return sg_dma_len(sg);
+}
+
+struct scatterlist *rust_helper_sg_next(struct scatterlist *sg)
+{
+ return sg_next(sg);
+}
+
+void rust_helper_dma_unmap_sgtable(struct device *dev, struct sg_table *sgt,
+ enum dma_data_direction dir, unsigned long attrs)
+{
+ return dma_unmap_sgtable(dev, sgt, dir, attrs);
+}
diff --git a/rust/kernel/alloc/allocator.rs b/rust/kernel/alloc/allocator.rs
index 2692cf90c948..84ee7e9d7b0e 100644
--- a/rust/kernel/alloc/allocator.rs
+++ b/rust/kernel/alloc/allocator.rs
@@ -15,8 +15,12 @@ use core::ptr::NonNull;
use crate::alloc::{AllocError, Allocator};
use crate::bindings;
+use crate::page;
use crate::pr_warn;
+mod iter;
+pub use self::iter::VmallocPageIter;
+
/// The contiguous kernel allocator.
///
/// `Kmalloc` is typically used for physically contiguous allocations up to page size, but also
@@ -142,6 +146,54 @@ unsafe impl Allocator for Kmalloc {
}
}
+impl Vmalloc {
+ /// Convert a pointer to a [`Vmalloc`] allocation to a [`page::BorrowedPage`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # use core::ptr::{NonNull, from_mut};
+ /// # use kernel::{page, prelude::*};
+ /// use kernel::alloc::allocator::Vmalloc;
+ ///
+ /// let mut vbox = VBox::<[u8; page::PAGE_SIZE]>::new_uninit(GFP_KERNEL)?;
+ ///
+ /// {
+ /// // SAFETY: By the type invariant of `Box` the inner pointer of `vbox` is non-null.
+ /// let ptr = unsafe { NonNull::new_unchecked(from_mut(&mut *vbox)) };
+ ///
+ /// // SAFETY:
+ /// // `ptr` is a valid pointer to a `Vmalloc` allocation.
+ /// // `ptr` is valid for the entire lifetime of `page`.
+ /// let page = unsafe { Vmalloc::to_page(ptr.cast()) };
+ ///
+ /// // SAFETY: There is no concurrent read or write to the same page.
+ /// unsafe { page.fill_zero_raw(0, page::PAGE_SIZE)? };
+ /// }
+ /// # Ok::<(), Error>(())
+ /// ```
+ ///
+ /// # Safety
+ ///
+ /// - `ptr` must be a valid pointer to a [`Vmalloc`] allocation.
+ /// - `ptr` must remain valid for the entire duration of `'a`.
+ pub unsafe fn to_page<'a>(ptr: NonNull<u8>) -> page::BorrowedPage<'a> {
+ // SAFETY: `ptr` is a valid pointer to `Vmalloc` memory.
+ let page = unsafe { bindings::vmalloc_to_page(ptr.as_ptr().cast()) };
+
+ // SAFETY: `vmalloc_to_page` returns a valid pointer to a `struct page` for a valid pointer
+ // to `Vmalloc` memory.
+ let page = unsafe { NonNull::new_unchecked(page) };
+
+ // SAFETY:
+ // - `page` is a valid pointer to a `struct page`, given that by the safety requirements of
+ // this function `ptr` is a valid pointer to a `Vmalloc` allocation.
+ // - By the safety requirements of this function `ptr` is valid for the entire lifetime of
+ // `'a`.
+ unsafe { page::BorrowedPage::from_raw(page) }
+ }
+}
+
// SAFETY: `realloc` delegates to `ReallocFunc::call`, which guarantees that
// - memory remains valid until it is explicitly freed,
// - passing a pointer to a valid memory allocation is OK,
diff --git a/rust/kernel/alloc/allocator/iter.rs b/rust/kernel/alloc/allocator/iter.rs
new file mode 100644
index 000000000000..5759f86029b7
--- /dev/null
+++ b/rust/kernel/alloc/allocator/iter.rs
@@ -0,0 +1,102 @@
+// SPDX-License-Identifier: GPL-2.0
+
+use super::Vmalloc;
+use crate::page;
+use core::marker::PhantomData;
+use core::ptr::NonNull;
+
+/// An [`Iterator`] of [`page::BorrowedPage`] items owned by a [`Vmalloc`] allocation.
+///
+/// # Guarantees
+///
+/// The pages iterated by the [`Iterator`] appear in the order as they are mapped in the CPU's
+/// virtual address space ascendingly.
+///
+/// # Invariants
+///
+/// - `buf` is a valid and [`page::PAGE_SIZE`] aligned pointer into a [`Vmalloc`] allocation.
+/// - `size` is the number of bytes from `buf` until the end of the [`Vmalloc`] allocation `buf`
+/// points to.
+pub struct VmallocPageIter<'a> {
+ /// The base address of the [`Vmalloc`] buffer.
+ buf: NonNull<u8>,
+ /// The size of the buffer pointed to by `buf` in bytes.
+ size: usize,
+ /// The current page index of the [`Iterator`].
+ index: usize,
+ _p: PhantomData<page::BorrowedPage<'a>>,
+}
+
+impl<'a> Iterator for VmallocPageIter<'a> {
+ type Item = page::BorrowedPage<'a>;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ let offset = self.index.checked_mul(page::PAGE_SIZE)?;
+
+ // Even though `self.size()` may be smaller than `Self::page_count() * page::PAGE_SIZE`, it
+ // is always a number between `(Self::page_count() - 1) * page::PAGE_SIZE` and
+ // `Self::page_count() * page::PAGE_SIZE`, hence the check below is sufficient.
+ if offset < self.size() {
+ self.index += 1;
+ } else {
+ return None;
+ }
+
+ // TODO: Use `NonNull::add()` instead, once the minimum supported compiler version is
+ // bumped to 1.80 or later.
+ //
+ // SAFETY: `offset` is in the interval `[0, (self.page_count() - 1) * page::PAGE_SIZE]`,
+ // hence the resulting pointer is guaranteed to be within the same allocation.
+ let ptr = unsafe { self.buf.as_ptr().add(offset) };
+
+ // SAFETY: `ptr` is guaranteed to be non-null given that it is derived from `self.buf`.
+ let ptr = unsafe { NonNull::new_unchecked(ptr) };
+
+ // SAFETY:
+ // - `ptr` is a valid pointer to a `Vmalloc` allocation.
+ // - `ptr` is valid for the duration of `'a`.
+ Some(unsafe { Vmalloc::to_page(ptr) })
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let remaining = self.page_count().saturating_sub(self.index);
+
+ (remaining, Some(remaining))
+ }
+}
+
+impl<'a> VmallocPageIter<'a> {
+ /// Creates a new [`VmallocPageIter`] instance.
+ ///
+ /// # Safety
+ ///
+ /// - `buf` must be a [`page::PAGE_SIZE`] aligned pointer into a [`Vmalloc`] allocation.
+ /// - `buf` must be valid for at least the lifetime of `'a`.
+ /// - `size` must be the number of bytes from `buf` until the end of the [`Vmalloc`] allocation
+ /// `buf` points to.
+ pub unsafe fn new(buf: NonNull<u8>, size: usize) -> Self {
+ // INVARIANT: By the safety requirements, `buf` is a valid and `page::PAGE_SIZE` aligned
+ // pointer into a [`Vmalloc`] allocation.
+ Self {
+ buf,
+ size,
+ index: 0,
+ _p: PhantomData,
+ }
+ }
+
+ /// Returns the size of the backing [`Vmalloc`] allocation in bytes.
+ ///
+ /// Note that this is the size the [`Vmalloc`] allocation has been allocated with. Hence, this
+ /// number may be smaller than `[`Self::page_count`] * [`page::PAGE_SIZE`]`.
+ #[inline]
+ pub fn size(&self) -> usize {
+ self.size
+ }
+
+ /// Returns the number of pages owned by the backing [`Vmalloc`] allocation.
+ #[inline]
+ pub fn page_count(&self) -> usize {
+ self.size().div_ceil(page::PAGE_SIZE)
+ }
+}
diff --git a/rust/kernel/alloc/allocator_test.rs b/rust/kernel/alloc/allocator_test.rs
index 90dd987d40e4..7b10e276f621 100644
--- a/rust/kernel/alloc/allocator_test.rs
+++ b/rust/kernel/alloc/allocator_test.rs
@@ -12,8 +12,10 @@
use super::{flags::*, AllocError, Allocator, Flags};
use core::alloc::Layout;
use core::cmp;
+use core::marker::PhantomData;
use core::ptr;
use core::ptr::NonNull;
+use kernel::page;
/// The userspace allocator based on libc.
pub struct Cmalloc;
@@ -33,6 +35,33 @@ impl Cmalloc {
}
}
+pub struct VmallocPageIter<'a> {
+ _p: PhantomData<page::BorrowedPage<'a>>,
+}
+
+impl<'a> Iterator for VmallocPageIter<'a> {
+ type Item = page::BorrowedPage<'a>;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ None
+ }
+}
+
+impl<'a> VmallocPageIter<'a> {
+ #[allow(clippy::missing_safety_doc)]
+ pub unsafe fn new(_buf: NonNull<u8>, _size: usize) -> Self {
+ Self { _p: PhantomData }
+ }
+
+ pub fn size(&self) -> usize {
+ 0
+ }
+
+ pub fn page_count(&self) -> usize {
+ 0
+ }
+}
+
extern "C" {
#[link_name = "aligned_alloc"]
fn libc_aligned_alloc(align: usize, size: usize) -> *mut crate::ffi::c_void;
diff --git a/rust/kernel/alloc/kbox.rs b/rust/kernel/alloc/kbox.rs
index 856d05aa60f1..b69ff4a1d748 100644
--- a/rust/kernel/alloc/kbox.rs
+++ b/rust/kernel/alloc/kbox.rs
@@ -3,7 +3,7 @@
//! Implementation of [`Box`].
#[allow(unused_imports)] // Used in doc comments.
-use super::allocator::{KVmalloc, Kmalloc, Vmalloc};
+use super::allocator::{KVmalloc, Kmalloc, Vmalloc, VmallocPageIter};
use super::{AllocError, Allocator, Flags};
use core::alloc::Layout;
use core::borrow::{Borrow, BorrowMut};
@@ -18,6 +18,7 @@ use core::result::Result;
use crate::ffi::c_void;
use crate::init::InPlaceInit;
+use crate::page::AsPageIter;
use crate::types::ForeignOwnable;
use pin_init::{InPlaceWrite, Init, PinInit, ZeroableOption};
@@ -598,3 +599,40 @@ where
unsafe { A::free(self.0.cast(), layout) };
}
}
+
+/// # Examples
+///
+/// ```
+/// # use kernel::prelude::*;
+/// use kernel::alloc::allocator::VmallocPageIter;
+/// use kernel::page::{AsPageIter, PAGE_SIZE};
+///
+/// let mut vbox = VBox::new((), GFP_KERNEL)?;
+///
+/// assert!(vbox.page_iter().next().is_none());
+///
+/// let mut vbox = VBox::<[u8; PAGE_SIZE]>::new_uninit(GFP_KERNEL)?;
+///
+/// let page = vbox.page_iter().next().expect("At least one page should be available.\n");
+///
+/// // SAFETY: There is no concurrent read or write to the same page.
+/// unsafe { page.fill_zero_raw(0, PAGE_SIZE)? };
+/// # Ok::<(), Error>(())
+/// ```
+impl<T> AsPageIter for VBox<T> {
+ type Iter<'a>
+ = VmallocPageIter<'a>
+ where
+ T: 'a;
+
+ fn page_iter(&mut self) -> Self::Iter<'_> {
+ let ptr = self.0.cast();
+ let size = core::mem::size_of::<T>();
+
+ // SAFETY:
+ // - `ptr` is a valid pointer to the beginning of a `Vmalloc` allocation.
+ // - `ptr` is guaranteed to be valid for the lifetime of `'a`.
+ // - `size` is the size of the `Vmalloc` allocation `ptr` points to.
+ unsafe { VmallocPageIter::new(ptr, size) }
+ }
+}
diff --git a/rust/kernel/alloc/kvec.rs b/rust/kernel/alloc/kvec.rs
index 3c72e0bdddb8..ac438e70a1ed 100644
--- a/rust/kernel/alloc/kvec.rs
+++ b/rust/kernel/alloc/kvec.rs
@@ -3,10 +3,11 @@
//! Implementation of [`Vec`].
use super::{
- allocator::{KVmalloc, Kmalloc, Vmalloc},
+ allocator::{KVmalloc, Kmalloc, Vmalloc, VmallocPageIter},
layout::ArrayLayout,
AllocError, Allocator, Box, Flags,
};
+use crate::page::AsPageIter;
use core::{
borrow::{Borrow, BorrowMut},
fmt,
@@ -1017,6 +1018,43 @@ where
}
}
+/// # Examples
+///
+/// ```
+/// # use kernel::prelude::*;
+/// use kernel::alloc::allocator::VmallocPageIter;
+/// use kernel::page::{AsPageIter, PAGE_SIZE};
+///
+/// let mut vec = VVec::<u8>::new();
+///
+/// assert!(vec.page_iter().next().is_none());
+///
+/// vec.reserve(PAGE_SIZE, GFP_KERNEL)?;
+///
+/// let page = vec.page_iter().next().expect("At least one page should be available.\n");
+///
+/// // SAFETY: There is no concurrent read or write to the same page.
+/// unsafe { page.fill_zero_raw(0, PAGE_SIZE)? };
+/// # Ok::<(), Error>(())
+/// ```
+impl<T> AsPageIter for VVec<T> {
+ type Iter<'a>
+ = VmallocPageIter<'a>
+ where
+ T: 'a;
+
+ fn page_iter(&mut self) -> Self::Iter<'_> {
+ let ptr = self.ptr.cast();
+ let size = self.layout.size();
+
+ // SAFETY:
+ // - `ptr` is a valid pointer to the beginning of a `Vmalloc` allocation.
+ // - `ptr` is guaranteed to be valid for the lifetime of `'a`.
+ // - `size` is the size of the `Vmalloc` allocation `ptr` points to.
+ unsafe { VmallocPageIter::new(ptr, size) }
+ }
+}
+
/// An [`Iterator`] implementation for [`Vec`] that moves elements out of a vector.
///
/// This structure is created by the [`Vec::into_iter`] method on [`Vec`] (provided by the
diff --git a/rust/kernel/alloc/layout.rs b/rust/kernel/alloc/layout.rs
index 93ed514f7cc7..666accb7859c 100644
--- a/rust/kernel/alloc/layout.rs
+++ b/rust/kernel/alloc/layout.rs
@@ -98,6 +98,11 @@ impl<T> ArrayLayout<T> {
pub const fn is_empty(&self) -> bool {
self.len == 0
}
+
+ /// Returns the size of the [`ArrayLayout`] in bytes.
+ pub const fn size(&self) -> usize {
+ self.len() * core::mem::size_of::<T>()
+ }
}
impl<T> From<ArrayLayout<T>> for Layout {
diff --git a/rust/kernel/devres.rs b/rust/kernel/devres.rs
index d04e3fcebafb..aea6b7e31170 100644
--- a/rust/kernel/devres.rs
+++ b/rust/kernel/devres.rs
@@ -135,11 +135,9 @@ impl<T: Send> Devres<T> {
T: 'a,
Error: From<E>,
{
- let callback = Self::devres_callback;
-
try_pin_init!(&this in Self {
dev: dev.into(),
- callback,
+ callback: Self::devres_callback,
// INVARIANT: `inner` is properly initialized.
inner <- Opaque::pin_init(try_pin_init!(Inner {
devm <- Completion::new(),
@@ -160,7 +158,7 @@ impl<T: Send> Devres<T> {
// properly initialized, because we require `dev` (i.e. the *bound* device) to
// live at least as long as the returned `impl PinInit<Self, Error>`.
to_result(unsafe {
- bindings::devm_add_action(dev.as_raw(), Some(callback), inner.cast())
+ bindings::devm_add_action(dev.as_raw(), Some(*callback), inner.cast())
}).inspect_err(|_| {
let inner = Opaque::cast_into(inner);
diff --git a/rust/kernel/dma.rs b/rust/kernel/dma.rs
index 2bc8ab51ec28..b2a6282876da 100644
--- a/rust/kernel/dma.rs
+++ b/rust/kernel/dma.rs
@@ -13,6 +13,16 @@ use crate::{
types::ARef,
};
+/// DMA address type.
+///
+/// Represents a bus address used for Direct Memory Access (DMA) operations.
+///
+/// This is an alias of the kernel's `dma_addr_t`, which may be `u32` or `u64` depending on
+/// `CONFIG_ARCH_DMA_ADDR_T_64BIT`.
+///
+/// Note that this may be `u64` even on 32-bit architectures.
+pub type DmaAddress = bindings::dma_addr_t;
+
/// Trait to be implemented by DMA capable bus devices.
///
/// The [`dma::Device`](Device) trait should be implemented by bus specific device representations,
@@ -244,6 +254,74 @@ pub mod attrs {
pub const DMA_ATTR_PRIVILEGED: Attrs = Attrs(bindings::DMA_ATTR_PRIVILEGED);
}
+/// DMA data direction.
+///
+/// Corresponds to the C [`enum dma_data_direction`].
+///
+/// [`enum dma_data_direction`]: srctree/include/linux/dma-direction.h
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+#[repr(u32)]
+pub enum DataDirection {
+ /// The DMA mapping is for bidirectional data transfer.
+ ///
+ /// This is used when the buffer can be both read from and written to by the device.
+ /// The cache for the corresponding memory region is both flushed and invalidated.
+ Bidirectional = Self::const_cast(bindings::dma_data_direction_DMA_BIDIRECTIONAL),
+
+ /// The DMA mapping is for data transfer from memory to the device (write).
+ ///
+ /// The CPU has prepared data in the buffer, and the device will read it.
+ /// The cache for the corresponding memory region is flushed before device access.
+ ToDevice = Self::const_cast(bindings::dma_data_direction_DMA_TO_DEVICE),
+
+ /// The DMA mapping is for data transfer from the device to memory (read).
+ ///
+ /// The device will write data into the buffer for the CPU to read.
+ /// The cache for the corresponding memory region is invalidated before CPU access.
+ FromDevice = Self::const_cast(bindings::dma_data_direction_DMA_FROM_DEVICE),
+
+ /// The DMA mapping is not for data transfer.
+ ///
+ /// This is primarily for debugging purposes. With this direction, the DMA mapping API
+ /// will not perform any cache coherency operations.
+ None = Self::const_cast(bindings::dma_data_direction_DMA_NONE),
+}
+
+impl DataDirection {
+ /// Casts the bindgen-generated enum type to a `u32` at compile time.
+ ///
+ /// This function will cause a compile-time error if the underlying value of the
+ /// C enum is out of bounds for `u32`.
+ const fn const_cast(val: bindings::dma_data_direction) -> u32 {
+ // CAST: The C standard allows compilers to choose different integer types for enums.
+ // To safely check the value, we cast it to a wide signed integer type (`i128`)
+ // which can hold any standard C integer enum type without truncation.
+ let wide_val = val as i128;
+
+ // Check if the value is outside the valid range for the target type `u32`.
+ // CAST: `u32::MAX` is cast to `i128` to match the type of `wide_val` for the comparison.
+ if wide_val < 0 || wide_val > u32::MAX as i128 {
+ // Trigger a compile-time error in a const context.
+ build_error!("C enum value is out of bounds for the target type `u32`.");
+ }
+
+ // CAST: This cast is valid because the check above guarantees that `wide_val`
+ // is within the representable range of `u32`.
+ wide_val as u32
+ }
+}
+
+impl From<DataDirection> for bindings::dma_data_direction {
+ /// Returns the raw representation of [`enum dma_data_direction`].
+ fn from(direction: DataDirection) -> Self {
+ // CAST: `direction as u32` gets the underlying representation of our `#[repr(u32)]` enum.
+ // The subsequent cast to `Self` (the bindgen type) assumes the C enum is compatible
+ // with the enum variants of `DataDirection`, which is a valid assumption given our
+ // compile-time checks.
+ direction as u32 as Self
+ }
+}
+
/// An abstraction of the `dma_alloc_coherent` API.
///
/// This is an abstraction around the `dma_alloc_coherent` API which is used to allocate and map
@@ -275,7 +353,7 @@ pub mod attrs {
// entire `CoherentAllocation` including the allocated memory itself.
pub struct CoherentAllocation<T: AsBytes + FromBytes> {
dev: ARef<device::Device>,
- dma_handle: bindings::dma_addr_t,
+ dma_handle: DmaAddress,
count: usize,
cpu_addr: *mut T,
dma_attrs: Attrs,
@@ -376,7 +454,7 @@ impl<T: AsBytes + FromBytes> CoherentAllocation<T> {
/// Returns a DMA handle which may be given to the device as the DMA address base of
/// the region.
- pub fn dma_handle(&self) -> bindings::dma_addr_t {
+ pub fn dma_handle(&self) -> DmaAddress {
self.dma_handle
}
@@ -384,13 +462,13 @@ impl<T: AsBytes + FromBytes> CoherentAllocation<T> {
/// device as the DMA address base of the region.
///
/// Returns `EINVAL` if `offset` is not within the bounds of the allocation.
- pub fn dma_handle_with_offset(&self, offset: usize) -> Result<bindings::dma_addr_t> {
+ pub fn dma_handle_with_offset(&self, offset: usize) -> Result<DmaAddress> {
if offset >= self.count {
Err(EINVAL)
} else {
// INVARIANT: The type invariant of `Self` guarantees that `size_of::<T> * count` fits
// into a `usize`, and `offset` is inferior to `count`.
- Ok(self.dma_handle + (offset * core::mem::size_of::<T>()) as bindings::dma_addr_t)
+ Ok(self.dma_handle + (offset * core::mem::size_of::<T>()) as DmaAddress)
}
}
diff --git a/rust/kernel/drm/driver.rs b/rust/kernel/drm/driver.rs
index 8fefae41bcc6..91e13b6ca26a 100644
--- a/rust/kernel/drm/driver.rs
+++ b/rust/kernel/drm/driver.rs
@@ -86,6 +86,9 @@ pub struct AllocOps {
/// Trait for memory manager implementations. Implemented internally.
pub trait AllocImpl: super::private::Sealed + drm::gem::IntoGEMObject {
+ /// The [`Driver`] implementation for this [`AllocImpl`].
+ type Driver: drm::Driver;
+
/// The C callback operations for this memory manager.
const ALLOC_OPS: AllocOps;
}
diff --git a/rust/kernel/drm/gem/mod.rs b/rust/kernel/drm/gem/mod.rs
index a822aedee949..6ccbb25628a1 100644
--- a/rust/kernel/drm/gem/mod.rs
+++ b/rust/kernel/drm/gem/mod.rs
@@ -13,34 +13,34 @@ use crate::{
sync::aref::{ARef, AlwaysRefCounted},
types::Opaque,
};
-use core::{mem, ops::Deref, ptr::NonNull};
+use core::{ops::Deref, ptr::NonNull};
+
+/// A type alias for retrieving a [`Driver`]s [`DriverFile`] implementation from its
+/// [`DriverObject`] implementation.
+///
+/// [`Driver`]: drm::Driver
+/// [`DriverFile`]: drm::file::DriverFile
+pub type DriverFile<T> = drm::File<<<T as DriverObject>::Driver as drm::Driver>::File>;
/// GEM object functions, which must be implemented by drivers.
-pub trait BaseDriverObject<T: BaseObject>: Sync + Send + Sized {
+pub trait DriverObject: Sync + Send + Sized {
+ /// Parent `Driver` for this object.
+ type Driver: drm::Driver;
+
/// Create a new driver data object for a GEM object of a given size.
- fn new(dev: &drm::Device<T::Driver>, size: usize) -> impl PinInit<Self, Error>;
+ fn new(dev: &drm::Device<Self::Driver>, size: usize) -> impl PinInit<Self, Error>;
/// Open a new handle to an existing object, associated with a File.
- fn open(
- _obj: &<<T as IntoGEMObject>::Driver as drm::Driver>::Object,
- _file: &drm::File<<<T as IntoGEMObject>::Driver as drm::Driver>::File>,
- ) -> Result {
+ fn open(_obj: &<Self::Driver as drm::Driver>::Object, _file: &DriverFile<Self>) -> Result {
Ok(())
}
/// Close a handle to an existing object, associated with a File.
- fn close(
- _obj: &<<T as IntoGEMObject>::Driver as drm::Driver>::Object,
- _file: &drm::File<<<T as IntoGEMObject>::Driver as drm::Driver>::File>,
- ) {
- }
+ fn close(_obj: &<Self::Driver as drm::Driver>::Object, _file: &DriverFile<Self>) {}
}
/// Trait that represents a GEM object subtype
pub trait IntoGEMObject: Sized + super::private::Sealed + AlwaysRefCounted {
- /// Owning driver for this type
- type Driver: drm::Driver;
-
/// Returns a reference to the raw `drm_gem_object` structure, which must be valid as long as
/// this owning object is valid.
fn as_raw(&self) -> *mut bindings::drm_gem_object;
@@ -75,25 +75,16 @@ unsafe impl<T: IntoGEMObject> AlwaysRefCounted for T {
}
}
-/// Trait which must be implemented by drivers using base GEM objects.
-pub trait DriverObject: BaseDriverObject<Object<Self>> {
- /// Parent `Driver` for this object.
- type Driver: drm::Driver;
-}
-
-extern "C" fn open_callback<T: BaseDriverObject<U>, U: BaseObject>(
+extern "C" fn open_callback<T: DriverObject>(
raw_obj: *mut bindings::drm_gem_object,
raw_file: *mut bindings::drm_file,
) -> core::ffi::c_int {
// SAFETY: `open_callback` is only ever called with a valid pointer to a `struct drm_file`.
- let file = unsafe {
- drm::File::<<<U as IntoGEMObject>::Driver as drm::Driver>::File>::from_raw(raw_file)
- };
- // SAFETY: `open_callback` is specified in the AllocOps structure for `Object<T>`, ensuring that
- // `raw_obj` is indeed contained within a `Object<T>`.
- let obj = unsafe {
- <<<U as IntoGEMObject>::Driver as drm::Driver>::Object as IntoGEMObject>::from_raw(raw_obj)
- };
+ let file = unsafe { DriverFile::<T>::from_raw(raw_file) };
+
+ // SAFETY: `open_callback` is specified in the AllocOps structure for `DriverObject<T>`,
+ // ensuring that `raw_obj` is contained within a `DriverObject<T>`
+ let obj = unsafe { <<T::Driver as drm::Driver>::Object as IntoGEMObject>::from_raw(raw_obj) };
match T::open(obj, file) {
Err(e) => e.to_errno(),
@@ -101,26 +92,21 @@ extern "C" fn open_callback<T: BaseDriverObject<U>, U: BaseObject>(
}
}
-extern "C" fn close_callback<T: BaseDriverObject<U>, U: BaseObject>(
+extern "C" fn close_callback<T: DriverObject>(
raw_obj: *mut bindings::drm_gem_object,
raw_file: *mut bindings::drm_file,
) {
// SAFETY: `open_callback` is only ever called with a valid pointer to a `struct drm_file`.
- let file = unsafe {
- drm::File::<<<U as IntoGEMObject>::Driver as drm::Driver>::File>::from_raw(raw_file)
- };
+ let file = unsafe { DriverFile::<T>::from_raw(raw_file) };
+
// SAFETY: `close_callback` is specified in the AllocOps structure for `Object<T>`, ensuring
// that `raw_obj` is indeed contained within a `Object<T>`.
- let obj = unsafe {
- <<<U as IntoGEMObject>::Driver as drm::Driver>::Object as IntoGEMObject>::from_raw(raw_obj)
- };
+ let obj = unsafe { <<T::Driver as drm::Driver>::Object as IntoGEMObject>::from_raw(raw_obj) };
T::close(obj, file);
}
impl<T: DriverObject> IntoGEMObject for Object<T> {
- type Driver = T::Driver;
-
fn as_raw(&self) -> *mut bindings::drm_gem_object {
self.obj.get()
}
@@ -142,10 +128,12 @@ pub trait BaseObject: IntoGEMObject {
/// Creates a new handle for the object associated with a given `File`
/// (or returns an existing one).
- fn create_handle(
- &self,
- file: &drm::File<<<Self as IntoGEMObject>::Driver as drm::Driver>::File>,
- ) -> Result<u32> {
+ fn create_handle<D, F>(&self, file: &drm::File<F>) -> Result<u32>
+ where
+ Self: AllocImpl<Driver = D>,
+ D: drm::Driver<Object = Self, File = F>,
+ F: drm::file::DriverFile<Driver = D>,
+ {
let mut handle: u32 = 0;
// SAFETY: The arguments are all valid per the type invariants.
to_result(unsafe {
@@ -155,10 +143,12 @@ pub trait BaseObject: IntoGEMObject {
}
/// Looks up an object by its handle for a given `File`.
- fn lookup_handle(
- file: &drm::File<<<Self as IntoGEMObject>::Driver as drm::Driver>::File>,
- handle: u32,
- ) -> Result<ARef<Self>> {
+ fn lookup_handle<D, F>(file: &drm::File<F>, handle: u32) -> Result<ARef<Self>>
+ where
+ Self: AllocImpl<Driver = D>,
+ D: drm::Driver<Object = Self, File = F>,
+ F: drm::file::DriverFile<Driver = D>,
+ {
// SAFETY: The arguments are all valid per the type invariants.
let ptr = unsafe { bindings::drm_gem_object_lookup(file.as_raw().cast(), handle) };
if ptr.is_null() {
@@ -208,13 +198,10 @@ pub struct Object<T: DriverObject + Send + Sync> {
}
impl<T: DriverObject> Object<T> {
- /// The size of this object's structure.
- pub const SIZE: usize = mem::size_of::<Self>();
-
const OBJECT_FUNCS: bindings::drm_gem_object_funcs = bindings::drm_gem_object_funcs {
free: Some(Self::free_callback),
- open: Some(open_callback::<T, Object<T>>),
- close: Some(close_callback::<T, Object<T>>),
+ open: Some(open_callback::<T>),
+ close: Some(close_callback::<T>),
print_info: None,
export: None,
pin: None,
@@ -297,6 +284,8 @@ impl<T: DriverObject> Deref for Object<T> {
}
impl<T: DriverObject> AllocImpl for Object<T> {
+ type Driver = T::Driver;
+
const ALLOC_OPS: AllocOps = AllocOps {
gem_create_object: None,
prime_handle_to_fd: None,
diff --git a/rust/kernel/lib.rs b/rust/kernel/lib.rs
index fef97f2a5098..44e4b8853ff3 100644
--- a/rust/kernel/lib.rs
+++ b/rust/kernel/lib.rs
@@ -18,6 +18,7 @@
//
// Stable since Rust 1.79.0.
#![feature(inline_const)]
+#![feature(pointer_is_aligned)]
//
// Stable since Rust 1.81.0.
#![feature(lint_reasons)]
@@ -113,6 +114,7 @@ pub mod print;
pub mod rbtree;
pub mod regulator;
pub mod revocable;
+pub mod scatterlist;
pub mod security;
pub mod seq_file;
pub mod sizes;
diff --git a/rust/kernel/page.rs b/rust/kernel/page.rs
index 7c1b17246ed5..75ef096075cb 100644
--- a/rust/kernel/page.rs
+++ b/rust/kernel/page.rs
@@ -9,7 +9,12 @@ use crate::{
error::Result,
uaccess::UserSliceReader,
};
-use core::ptr::{self, NonNull};
+use core::{
+ marker::PhantomData,
+ mem::ManuallyDrop,
+ ops::Deref,
+ ptr::{self, NonNull},
+};
/// A bitwise shift for the page size.
pub const PAGE_SHIFT: usize = bindings::PAGE_SHIFT as usize;
@@ -30,6 +35,86 @@ pub const fn page_align(addr: usize) -> usize {
(addr + (PAGE_SIZE - 1)) & PAGE_MASK
}
+/// Representation of a non-owning reference to a [`Page`].
+///
+/// This type provides a borrowed version of a [`Page`] that is owned by some other entity, e.g. a
+/// [`Vmalloc`] allocation such as [`VBox`].
+///
+/// # Example
+///
+/// ```
+/// # use kernel::{bindings, prelude::*};
+/// use kernel::page::{BorrowedPage, Page, PAGE_SIZE};
+/// # use core::{mem::MaybeUninit, ptr, ptr::NonNull };
+///
+/// fn borrow_page<'a>(vbox: &'a mut VBox<MaybeUninit<[u8; PAGE_SIZE]>>) -> BorrowedPage<'a> {
+/// let ptr = ptr::from_ref(&**vbox);
+///
+/// // SAFETY: `ptr` is a valid pointer to `Vmalloc` memory.
+/// let page = unsafe { bindings::vmalloc_to_page(ptr.cast()) };
+///
+/// // SAFETY: `vmalloc_to_page` returns a valid pointer to a `struct page` for a valid
+/// // pointer to `Vmalloc` memory.
+/// let page = unsafe { NonNull::new_unchecked(page) };
+///
+/// // SAFETY:
+/// // - `self.0` is a valid pointer to a `struct page`.
+/// // - `self.0` is valid for the entire lifetime of `self`.
+/// unsafe { BorrowedPage::from_raw(page) }
+/// }
+///
+/// let mut vbox = VBox::<[u8; PAGE_SIZE]>::new_uninit(GFP_KERNEL)?;
+/// let page = borrow_page(&mut vbox);
+///
+/// // SAFETY: There is no concurrent read or write to this page.
+/// unsafe { page.fill_zero_raw(0, PAGE_SIZE)? };
+/// # Ok::<(), Error>(())
+/// ```
+///
+/// # Invariants
+///
+/// The borrowed underlying pointer to a `struct page` is valid for the entire lifetime `'a`.
+///
+/// [`VBox`]: kernel::alloc::VBox
+/// [`Vmalloc`]: kernel::alloc::allocator::Vmalloc
+pub struct BorrowedPage<'a>(ManuallyDrop<Page>, PhantomData<&'a Page>);
+
+impl<'a> BorrowedPage<'a> {
+ /// Constructs a [`BorrowedPage`] from a raw pointer to a `struct page`.
+ ///
+ /// # Safety
+ ///
+ /// - `ptr` must point to a valid `bindings::page`.
+ /// - `ptr` must remain valid for the entire lifetime `'a`.
+ pub unsafe fn from_raw(ptr: NonNull<bindings::page>) -> Self {
+ let page = Page { page: ptr };
+
+ // INVARIANT: The safety requirements guarantee that `ptr` is valid for the entire lifetime
+ // `'a`.
+ Self(ManuallyDrop::new(page), PhantomData)
+ }
+}
+
+impl<'a> Deref for BorrowedPage<'a> {
+ type Target = Page;
+
+ fn deref(&self) -> &Self::Target {
+ &self.0
+ }
+}
+
+/// Trait to be implemented by types which provide an [`Iterator`] implementation of
+/// [`BorrowedPage`] items, such as [`VmallocPageIter`](kernel::alloc::allocator::VmallocPageIter).
+pub trait AsPageIter {
+ /// The [`Iterator`] type, e.g. [`VmallocPageIter`](kernel::alloc::allocator::VmallocPageIter).
+ type Iter<'a>: Iterator<Item = BorrowedPage<'a>>
+ where
+ Self: 'a;
+
+ /// Returns an [`Iterator`] of [`BorrowedPage`] items over all pages owned by `self`.
+ fn page_iter(&mut self) -> Self::Iter<'_>;
+}
+
/// A pointer to a page that owns the page allocation.
///
/// # Invariants
diff --git a/rust/kernel/scatterlist.rs b/rust/kernel/scatterlist.rs
new file mode 100644
index 000000000000..9709dff60b5a
--- /dev/null
+++ b/rust/kernel/scatterlist.rs
@@ -0,0 +1,491 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Abstractions for scatter-gather lists.
+//!
+//! C header: [`include/linux/scatterlist.h`](srctree/include/linux/scatterlist.h)
+//!
+//! Scatter-gather (SG) I/O is a memory access technique that allows devices to perform DMA
+//! operations on data buffers that are not physically contiguous in memory. It works by creating a
+//! "scatter-gather list", an array where each entry specifies the address and length of a
+//! physically contiguous memory segment.
+//!
+//! The device's DMA controller can then read this list and process the segments sequentially as
+//! part of one logical I/O request. This avoids the need for a single, large, physically contiguous
+//! memory buffer, which can be difficult or impossible to allocate.
+//!
+//! This module provides safe Rust abstractions over the kernel's `struct scatterlist` and
+//! `struct sg_table` types.
+//!
+//! The main entry point is the [`SGTable`] type, which represents a complete scatter-gather table.
+//! It can be either:
+//!
+//! - An owned table ([`SGTable<Owned<P>>`]), created from a Rust memory buffer (e.g., [`VVec`]).
+//! This type manages the allocation of the `struct sg_table`, the DMA mapping of the buffer, and
+//! the automatic cleanup of all resources.
+//! - A borrowed reference (&[`SGTable`]), which provides safe, read-only access to a table that was
+//! allocated by other (e.g., C) code.
+//!
+//! Individual entries in the table are represented by [`SGEntry`], which can be accessed by
+//! iterating over an [`SGTable`].
+
+use crate::{
+ alloc,
+ alloc::allocator::VmallocPageIter,
+ bindings,
+ device::{Bound, Device},
+ devres::Devres,
+ dma, error,
+ io::resource::ResourceSize,
+ page,
+ prelude::*,
+ types::{ARef, Opaque},
+};
+use core::{ops::Deref, ptr::NonNull};
+
+/// A single entry in a scatter-gather list.
+///
+/// An `SGEntry` represents a single, physically contiguous segment of memory that has been mapped
+/// for DMA.
+///
+/// Instances of this struct are obtained by iterating over an [`SGTable`]. Drivers do not create
+/// or own [`SGEntry`] objects directly.
+#[repr(transparent)]
+pub struct SGEntry(Opaque<bindings::scatterlist>);
+
+// SAFETY: `SGEntry` can be sent to any task.
+unsafe impl Send for SGEntry {}
+
+// SAFETY: `SGEntry` has no interior mutability and can be accessed concurrently.
+unsafe impl Sync for SGEntry {}
+
+impl SGEntry {
+ /// Convert a raw `struct scatterlist *` to a `&'a SGEntry`.
+ ///
+ /// # Safety
+ ///
+ /// Callers must ensure that the `struct scatterlist` pointed to by `ptr` is valid for the
+ /// lifetime `'a`.
+ #[inline]
+ unsafe fn from_raw<'a>(ptr: *mut bindings::scatterlist) -> &'a Self {
+ // SAFETY: The safety requirements of this function guarantee that `ptr` is a valid pointer
+ // to a `struct scatterlist` for the duration of `'a`.
+ unsafe { &*ptr.cast() }
+ }
+
+ /// Obtain the raw `struct scatterlist *`.
+ #[inline]
+ fn as_raw(&self) -> *mut bindings::scatterlist {
+ self.0.get()
+ }
+
+ /// Returns the DMA address of this SG entry.
+ ///
+ /// This is the address that the device should use to access the memory segment.
+ #[inline]
+ pub fn dma_address(&self) -> dma::DmaAddress {
+ // SAFETY: `self.as_raw()` is a valid pointer to a `struct scatterlist`.
+ unsafe { bindings::sg_dma_address(self.as_raw()) }
+ }
+
+ /// Returns the length of this SG entry in bytes.
+ #[inline]
+ pub fn dma_len(&self) -> ResourceSize {
+ #[allow(clippy::useless_conversion)]
+ // SAFETY: `self.as_raw()` is a valid pointer to a `struct scatterlist`.
+ unsafe { bindings::sg_dma_len(self.as_raw()) }.into()
+ }
+}
+
+/// The borrowed generic type of an [`SGTable`], representing a borrowed or externally managed
+/// table.
+#[repr(transparent)]
+pub struct Borrowed(Opaque<bindings::sg_table>);
+
+// SAFETY: `Borrowed` can be sent to any task.
+unsafe impl Send for Borrowed {}
+
+// SAFETY: `Borrowed` has no interior mutability and can be accessed concurrently.
+unsafe impl Sync for Borrowed {}
+
+/// A scatter-gather table.
+///
+/// This struct is a wrapper around the kernel's `struct sg_table`. It manages a list of DMA-mapped
+/// memory segments that can be passed to a device for I/O operations.
+///
+/// The generic parameter `T` is used as a generic type to distinguish between owned and borrowed
+/// tables.
+///
+/// - [`SGTable<Owned>`]: An owned table created and managed entirely by Rust code. It handles
+/// allocation, DMA mapping, and cleanup of all associated resources. See [`SGTable::new`].
+/// - [`SGTable<Borrowed>`} (or simply [`SGTable`]): Represents a table whose lifetime is managed
+/// externally. It can be used safely via a borrowed reference `&'a SGTable`, where `'a` is the
+/// external lifetime.
+///
+/// All [`SGTable`] variants can be iterated over the individual [`SGEntry`]s.
+#[repr(transparent)]
+#[pin_data]
+pub struct SGTable<T: private::Sealed = Borrowed> {
+ #[pin]
+ inner: T,
+}
+
+impl SGTable {
+ /// Creates a borrowed `&'a SGTable` from a raw `struct sg_table` pointer.
+ ///
+ /// This allows safe access to an `sg_table` that is managed elsewhere (for example, in C code).
+ ///
+ /// # Safety
+ ///
+ /// Callers must ensure that:
+ ///
+ /// - the `struct sg_table` pointed to by `ptr` is valid for the entire lifetime of `'a`,
+ /// - the data behind `ptr` is not modified concurrently for the duration of `'a`.
+ #[inline]
+ pub unsafe fn from_raw<'a>(ptr: *mut bindings::sg_table) -> &'a Self {
+ // SAFETY: The safety requirements of this function guarantee that `ptr` is a valid pointer
+ // to a `struct sg_table` for the duration of `'a`.
+ unsafe { &*ptr.cast() }
+ }
+
+ #[inline]
+ fn as_raw(&self) -> *mut bindings::sg_table {
+ self.inner.0.get()
+ }
+
+ /// Returns an [`SGTableIter`] bound to the lifetime of `self`.
+ pub fn iter(&self) -> SGTableIter<'_> {
+ // SAFETY: `self.as_raw()` is a valid pointer to a `struct sg_table`.
+ let nents = unsafe { (*self.as_raw()).nents };
+
+ let pos = if nents > 0 {
+ // SAFETY: `self.as_raw()` is a valid pointer to a `struct sg_table`.
+ let ptr = unsafe { (*self.as_raw()).sgl };
+
+ // SAFETY: `ptr` is guaranteed to be a valid pointer to a `struct scatterlist`.
+ Some(unsafe { SGEntry::from_raw(ptr) })
+ } else {
+ None
+ };
+
+ SGTableIter { pos, nents }
+ }
+}
+
+/// Represents the DMA mapping state of a `struct sg_table`.
+///
+/// This is used as an inner type of [`Owned`] to manage the DMA mapping lifecycle.
+///
+/// # Invariants
+///
+/// - `sgt` is a valid pointer to a `struct sg_table` for the entire lifetime of the
+/// [`DmaMappedSgt`].
+/// - `sgt` is always DMA mapped.
+struct DmaMappedSgt {
+ sgt: NonNull<bindings::sg_table>,
+ dev: ARef<Device>,
+ dir: dma::DataDirection,
+}
+
+// SAFETY: `DmaMappedSgt` can be sent to any task.
+unsafe impl Send for DmaMappedSgt {}
+
+// SAFETY: `DmaMappedSgt` has no interior mutability and can be accessed concurrently.
+unsafe impl Sync for DmaMappedSgt {}
+
+impl DmaMappedSgt {
+ /// # Safety
+ ///
+ /// - `sgt` must be a valid pointer to a `struct sg_table` for the entire lifetime of the
+ /// returned [`DmaMappedSgt`].
+ /// - The caller must guarantee that `sgt` remains DMA mapped for the entire lifetime of
+ /// [`DmaMappedSgt`].
+ unsafe fn new(
+ sgt: NonNull<bindings::sg_table>,
+ dev: &Device<Bound>,
+ dir: dma::DataDirection,
+ ) -> Result<Self> {
+ // SAFETY:
+ // - `dev.as_raw()` is a valid pointer to a `struct device`, which is guaranteed to be
+ // bound to a driver for the duration of this call.
+ // - `sgt` is a valid pointer to a `struct sg_table`.
+ error::to_result(unsafe {
+ bindings::dma_map_sgtable(dev.as_raw(), sgt.as_ptr(), dir.into(), 0)
+ })?;
+
+ // INVARIANT: By the safety requirements of this function it is guaranteed that `sgt` is
+ // valid for the entire lifetime of this object instance.
+ Ok(Self {
+ sgt,
+ dev: dev.into(),
+ dir,
+ })
+ }
+}
+
+impl Drop for DmaMappedSgt {
+ #[inline]
+ fn drop(&mut self) {
+ // SAFETY:
+ // - `self.dev.as_raw()` is a pointer to a valid `struct device`.
+ // - `self.dev` is the same device the mapping has been created for in `Self::new()`.
+ // - `self.sgt.as_ptr()` is a valid pointer to a `struct sg_table` by the type invariants
+ // of `Self`.
+ // - `self.dir` is the same `dma::DataDirection` the mapping has been created with in
+ // `Self::new()`.
+ unsafe {
+ bindings::dma_unmap_sgtable(self.dev.as_raw(), self.sgt.as_ptr(), self.dir.into(), 0)
+ };
+ }
+}
+
+/// A transparent wrapper around a `struct sg_table`.
+///
+/// While we could also create the `struct sg_table` in the constructor of [`Owned`], we can't tear
+/// down the `struct sg_table` in [`Owned::drop`]; the drop order in [`Owned`] matters.
+#[repr(transparent)]
+struct RawSGTable(Opaque<bindings::sg_table>);
+
+// SAFETY: `RawSGTable` can be sent to any task.
+unsafe impl Send for RawSGTable {}
+
+// SAFETY: `RawSGTable` has no interior mutability and can be accessed concurrently.
+unsafe impl Sync for RawSGTable {}
+
+impl RawSGTable {
+ /// # Safety
+ ///
+ /// - `pages` must be a slice of valid `struct page *`.
+ /// - The pages pointed to by `pages` must remain valid for the entire lifetime of the returned
+ /// [`RawSGTable`].
+ unsafe fn new(
+ pages: &mut [*mut bindings::page],
+ size: usize,
+ max_segment: u32,
+ flags: alloc::Flags,
+ ) -> Result<Self> {
+ // `sg_alloc_table_from_pages_segment()` expects at least one page, otherwise it
+ // produces a NPE.
+ if pages.is_empty() {
+ return Err(EINVAL);
+ }
+
+ let sgt = Opaque::zeroed();
+ // SAFETY:
+ // - `sgt.get()` is a valid pointer to uninitialized memory.
+ // - As by the check above, `pages` is not empty.
+ error::to_result(unsafe {
+ bindings::sg_alloc_table_from_pages_segment(
+ sgt.get(),
+ pages.as_mut_ptr(),
+ pages.len().try_into()?,
+ 0,
+ size,
+ max_segment,
+ flags.as_raw(),
+ )
+ })?;
+
+ Ok(Self(sgt))
+ }
+
+ #[inline]
+ fn as_raw(&self) -> *mut bindings::sg_table {
+ self.0.get()
+ }
+}
+
+impl Drop for RawSGTable {
+ #[inline]
+ fn drop(&mut self) {
+ // SAFETY: `sgt` is a valid and initialized `struct sg_table`.
+ unsafe { bindings::sg_free_table(self.0.get()) };
+ }
+}
+
+/// The [`Owned`] generic type of an [`SGTable`].
+///
+/// A [`SGTable<Owned>`] signifies that the [`SGTable`] owns all associated resources:
+///
+/// - The backing memory pages.
+/// - The `struct sg_table` allocation (`sgt`).
+/// - The DMA mapping, managed through a [`Devres`]-managed `DmaMappedSgt`.
+///
+/// Users interact with this type through the [`SGTable`] handle and do not need to manage
+/// [`Owned`] directly.
+#[pin_data]
+pub struct Owned<P> {
+ // Note: The drop order is relevant; we first have to unmap the `struct sg_table`, then free the
+ // `struct sg_table` and finally free the backing pages.
+ #[pin]
+ dma: Devres<DmaMappedSgt>,
+ sgt: RawSGTable,
+ _pages: P,
+}
+
+// SAFETY: `Owned` can be sent to any task if `P` can be send to any task.
+unsafe impl<P: Send> Send for Owned<P> {}
+
+// SAFETY: `Owned` has no interior mutability and can be accessed concurrently if `P` can be
+// accessed concurrently.
+unsafe impl<P: Sync> Sync for Owned<P> {}
+
+impl<P> Owned<P>
+where
+ for<'a> P: page::AsPageIter<Iter<'a> = VmallocPageIter<'a>> + 'static,
+{
+ fn new(
+ dev: &Device<Bound>,
+ mut pages: P,
+ dir: dma::DataDirection,
+ flags: alloc::Flags,
+ ) -> Result<impl PinInit<Self, Error> + '_> {
+ let page_iter = pages.page_iter();
+ let size = page_iter.size();
+
+ let mut page_vec: KVec<*mut bindings::page> =
+ KVec::with_capacity(page_iter.page_count(), flags)?;
+
+ for page in page_iter {
+ page_vec.push(page.as_ptr(), flags)?;
+ }
+
+ // `dma_max_mapping_size` returns `size_t`, but `sg_alloc_table_from_pages_segment()` takes
+ // an `unsigned int`.
+ //
+ // SAFETY: `dev.as_raw()` is a valid pointer to a `struct device`.
+ let max_segment = match unsafe { bindings::dma_max_mapping_size(dev.as_raw()) } {
+ 0 => u32::MAX,
+ max_segment => u32::try_from(max_segment).unwrap_or(u32::MAX),
+ };
+
+ Ok(try_pin_init!(&this in Self {
+ // SAFETY:
+ // - `page_vec` is a `KVec` of valid `struct page *` obtained from `pages`.
+ // - The pages contained in `pages` remain valid for the entire lifetime of the
+ // `RawSGTable`.
+ sgt: unsafe { RawSGTable::new(&mut page_vec, size, max_segment, flags) }?,
+ dma <- {
+ // SAFETY: `this` is a valid pointer to uninitialized memory.
+ let sgt = unsafe { &raw mut (*this.as_ptr()).sgt }.cast();
+
+ // SAFETY: `sgt` is guaranteed to be non-null.
+ let sgt = unsafe { NonNull::new_unchecked(sgt) };
+
+ // SAFETY:
+ // - It is guaranteed that the object returned by `DmaMappedSgt::new` won't out-live
+ // `sgt`.
+ // - `sgt` is never DMA unmapped manually.
+ Devres::new(dev, unsafe { DmaMappedSgt::new(sgt, dev, dir) })
+ },
+ _pages: pages,
+ }))
+ }
+}
+
+impl<P> SGTable<Owned<P>>
+where
+ for<'a> P: page::AsPageIter<Iter<'a> = VmallocPageIter<'a>> + 'static,
+{
+ /// Allocates a new scatter-gather table from the given pages and maps it for DMA.
+ ///
+ /// This constructor creates a new [`SGTable<Owned>`] that takes ownership of `P`.
+ /// It allocates a `struct sg_table`, populates it with entries corresponding to the physical
+ /// pages of `P`, and maps the table for DMA with the specified [`Device`] and
+ /// [`dma::DataDirection`].
+ ///
+ /// The DMA mapping is managed through [`Devres`], ensuring that the DMA mapping is unmapped
+ /// once the associated [`Device`] is unbound, or when the [`SGTable<Owned>`] is dropped.
+ ///
+ /// # Parameters
+ ///
+ /// * `dev`: The [`Device`] that will be performing the DMA.
+ /// * `pages`: The entity providing the backing pages. It must implement [`page::AsPageIter`].
+ /// The ownership of this entity is moved into the new [`SGTable<Owned>`].
+ /// * `dir`: The [`dma::DataDirection`] of the DMA transfer.
+ /// * `flags`: Allocation flags for internal allocations (e.g., [`GFP_KERNEL`]).
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use kernel::{
+ /// device::{Bound, Device},
+ /// dma, page,
+ /// prelude::*,
+ /// scatterlist::{SGTable, Owned},
+ /// };
+ ///
+ /// fn test(dev: &Device<Bound>) -> Result {
+ /// let size = 4 * page::PAGE_SIZE;
+ /// let pages = VVec::<u8>::with_capacity(size, GFP_KERNEL)?;
+ ///
+ /// let sgt = KBox::pin_init(SGTable::new(
+ /// dev,
+ /// pages,
+ /// dma::DataDirection::ToDevice,
+ /// GFP_KERNEL,
+ /// ), GFP_KERNEL)?;
+ ///
+ /// Ok(())
+ /// }
+ /// ```
+ pub fn new(
+ dev: &Device<Bound>,
+ pages: P,
+ dir: dma::DataDirection,
+ flags: alloc::Flags,
+ ) -> impl PinInit<Self, Error> + '_ {
+ try_pin_init!(Self {
+ inner <- Owned::new(dev, pages, dir, flags)?
+ })
+ }
+}
+
+impl<P> Deref for SGTable<Owned<P>> {
+ type Target = SGTable;
+
+ #[inline]
+ fn deref(&self) -> &Self::Target {
+ // SAFETY:
+ // - `self.inner.sgt.as_raw()` is a valid pointer to a `struct sg_table` for the entire
+ // lifetime of `self`.
+ // - The backing `struct sg_table` is not modified for the entire lifetime of `self`.
+ unsafe { SGTable::from_raw(self.inner.sgt.as_raw()) }
+ }
+}
+
+mod private {
+ pub trait Sealed {}
+
+ impl Sealed for super::Borrowed {}
+ impl<P> Sealed for super::Owned<P> {}
+}
+
+/// An [`Iterator`] over the DMA mapped [`SGEntry`] items of an [`SGTable`].
+///
+/// Note that the existence of an [`SGTableIter`] does not guarantee that the [`SGEntry`] items
+/// actually remain DMA mapped; they are prone to be unmapped on device unbind.
+pub struct SGTableIter<'a> {
+ pos: Option<&'a SGEntry>,
+ /// The number of DMA mapped entries in a `struct sg_table`.
+ nents: c_uint,
+}
+
+impl<'a> Iterator for SGTableIter<'a> {
+ type Item = &'a SGEntry;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ let entry = self.pos?;
+ self.nents = self.nents.saturating_sub(1);
+
+ // SAFETY: `entry.as_raw()` is a valid pointer to a `struct scatterlist`.
+ let next = unsafe { bindings::sg_next(entry.as_raw()) };
+
+ self.pos = (!next.is_null() && self.nents > 0).then(|| {
+ // SAFETY: If `next` is not NULL, `sg_next()` guarantees to return a valid pointer to
+ // the next `struct scatterlist`.
+ unsafe { SGEntry::from_raw(next) }
+ });
+
+ Some(entry)
+ }
+}
diff --git a/rust/kernel/transmute.rs b/rust/kernel/transmute.rs
index 1c7d43771a37..cfc37d81adf2 100644
--- a/rust/kernel/transmute.rs
+++ b/rust/kernel/transmute.rs
@@ -2,6 +2,8 @@
//! Traits for transmuting types.
+use core::mem::size_of;
+
/// Types for which any bit pattern is valid.
///
/// Not all types are valid for all values. For example, a `bool` must be either zero or one, so
@@ -9,10 +11,93 @@
///
/// It's okay for the type to have padding, as initializing those bytes has no effect.
///
+/// # Examples
+///
+/// ```
+/// use kernel::transmute::FromBytes;
+///
+/// # fn test() -> Option<()> {
+/// let raw = [1, 2, 3, 4];
+///
+/// let result = u32::from_bytes(&raw)?;
+///
+/// #[cfg(target_endian = "little")]
+/// assert_eq!(*result, 0x4030201);
+///
+/// #[cfg(target_endian = "big")]
+/// assert_eq!(*result, 0x1020304);
+///
+/// # Some(()) }
+/// # test().ok_or(EINVAL)?;
+/// # Ok::<(), Error>(())
+/// ```
+///
/// # Safety
///
/// All bit-patterns must be valid for this type. This type must not have interior mutability.
-pub unsafe trait FromBytes {}
+pub unsafe trait FromBytes {
+ /// Converts a slice of bytes to a reference to `Self`.
+ ///
+ /// Succeeds if the reference is properly aligned, and the size of `bytes` is equal to that of
+ /// `T` and different from zero.
+ ///
+ /// Otherwise, returns [`None`].
+ fn from_bytes(bytes: &[u8]) -> Option<&Self>
+ where
+ Self: Sized,
+ {
+ let slice_ptr = bytes.as_ptr().cast::<Self>();
+ let size = size_of::<Self>();
+
+ #[allow(clippy::incompatible_msrv)]
+ if bytes.len() == size && slice_ptr.is_aligned() {
+ // SAFETY: Size and alignment were just checked.
+ unsafe { Some(&*slice_ptr) }
+ } else {
+ None
+ }
+ }
+
+ /// Converts a mutable slice of bytes to a reference to `Self`.
+ ///
+ /// Succeeds if the reference is properly aligned, and the size of `bytes` is equal to that of
+ /// `T` and different from zero.
+ ///
+ /// Otherwise, returns [`None`].
+ fn from_bytes_mut(bytes: &mut [u8]) -> Option<&mut Self>
+ where
+ Self: AsBytes + Sized,
+ {
+ let slice_ptr = bytes.as_mut_ptr().cast::<Self>();
+ let size = size_of::<Self>();
+
+ #[allow(clippy::incompatible_msrv)]
+ if bytes.len() == size && slice_ptr.is_aligned() {
+ // SAFETY: Size and alignment were just checked.
+ unsafe { Some(&mut *slice_ptr) }
+ } else {
+ None
+ }
+ }
+
+ /// Creates an owned instance of `Self` by copying `bytes`.
+ ///
+ /// Unlike [`FromBytes::from_bytes`], which requires aligned input, this method can be used on
+ /// non-aligned data at the cost of a copy.
+ fn from_bytes_copy(bytes: &[u8]) -> Option<Self>
+ where
+ Self: Sized,
+ {
+ if bytes.len() == size_of::<Self>() {
+ // SAFETY: we just verified that `bytes` has the same size as `Self`, and per the
+ // invariants of `FromBytes`, any byte sequence of the correct length is a valid value
+ // for `Self`.
+ Some(unsafe { core::ptr::read_unaligned(bytes.as_ptr().cast::<Self>()) })
+ } else {
+ None
+ }
+ }
+}
macro_rules! impl_frombytes {
($($({$($generics:tt)*})? $t:ty, )*) => {
@@ -47,7 +132,32 @@ impl_frombytes! {
///
/// Values of this type may not contain any uninitialized bytes. This type must not have interior
/// mutability.
-pub unsafe trait AsBytes {}
+pub unsafe trait AsBytes {
+ /// Returns `self` as a slice of bytes.
+ fn as_bytes(&self) -> &[u8] {
+ // CAST: `Self` implements `AsBytes` thus all bytes of `self` are initialized.
+ let data = core::ptr::from_ref(self).cast::<u8>();
+ let len = core::mem::size_of_val(self);
+
+ // SAFETY: `data` is non-null and valid for reads of `len * sizeof::<u8>()` bytes.
+ unsafe { core::slice::from_raw_parts(data, len) }
+ }
+
+ /// Returns `self` as a mutable slice of bytes.
+ fn as_bytes_mut(&mut self) -> &mut [u8]
+ where
+ Self: FromBytes,
+ {
+ // CAST: `Self` implements both `AsBytes` and `FromBytes` thus making `Self`
+ // bi-directionally transmutable to `[u8; size_of_val(self)]`.
+ let data = core::ptr::from_mut(self).cast::<u8>();
+ let len = core::mem::size_of_val(self);
+
+ // SAFETY: `data` is non-null and valid for read and writes of `len * sizeof::<u8>()`
+ // bytes.
+ unsafe { core::slice::from_raw_parts_mut(data, len) }
+ }
+}
macro_rules! impl_asbytes {
($($({$($generics:tt)*})? $t:ty, )*) => {
diff --git a/rust/kernel/workqueue.rs b/rust/kernel/workqueue.rs
index b9343d5bc00f..706e833e9702 100644
--- a/rust/kernel/workqueue.rs
+++ b/rust/kernel/workqueue.rs
@@ -356,18 +356,11 @@ struct ClosureWork<T> {
func: Option<T>,
}
-impl<T> ClosureWork<T> {
- fn project(self: Pin<&mut Self>) -> &mut Option<T> {
- // SAFETY: The `func` field is not structurally pinned.
- unsafe { &mut self.get_unchecked_mut().func }
- }
-}
-
impl<T: FnOnce()> WorkItem for ClosureWork<T> {
type Pointer = Pin<KBox<Self>>;
fn run(mut this: Pin<KBox<Self>>) {
- if let Some(func) = this.as_mut().project().take() {
+ if let Some(func) = this.as_mut().project().func.take() {
(func)()
}
}
diff --git a/rust/pin-init/README.md b/rust/pin-init/README.md
index a4c01a8d78b2..723e275445d4 100644
--- a/rust/pin-init/README.md
+++ b/rust/pin-init/README.md
@@ -6,6 +6,18 @@
![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/Rust-for-Linux/pin-init/test.yml)
# `pin-init`
+> [!NOTE]
+>
+> This crate was originally named [`pinned-init`], but the migration to
+> `pin-init` is not yet complete. The `legcay` branch contains the current
+> version of the `pinned-init` crate & the `main` branch already incorporates
+> the rename to `pin-init`.
+>
+> There are still some changes needed on the kernel side before the migration
+> can be completed.
+
+[`pinned-init`]: https://crates.io/crates/pinned-init
+
<!-- cargo-rdme start -->
Library to safely and fallibly initialize pinned `struct`s using in-place constructors.
diff --git a/rust/pin-init/examples/error.rs b/rust/pin-init/examples/error.rs
index e0cc258746ce..8f4e135eb8ba 100644
--- a/rust/pin-init/examples/error.rs
+++ b/rust/pin-init/examples/error.rs
@@ -24,4 +24,6 @@ impl From<AllocError> for Error {
}
#[allow(dead_code)]
-fn main() {}
+fn main() {
+ let _ = Error;
+}
diff --git a/rust/pin-init/src/lib.rs b/rust/pin-init/src/lib.rs
index 62e013a5cc20..dd553212836e 100644
--- a/rust/pin-init/src/lib.rs
+++ b/rust/pin-init/src/lib.rs
@@ -740,6 +740,8 @@ macro_rules! stack_try_pin_init {
/// As already mentioned in the examples above, inside of `pin_init!` a `struct` initializer with
/// the following modifications is expected:
/// - Fields that you want to initialize in-place have to use `<-` instead of `:`.
+/// - You can use `_: { /* run any user-code here */ },` anywhere where you can place fields in
+/// order to run arbitrary code.
/// - In front of the initializer you can write `&this in` to have access to a [`NonNull<Self>`]
/// pointer named `this` inside of the initializer.
/// - Using struct update syntax one can place `..Zeroable::init_zeroed()` at the very end of the
@@ -994,7 +996,7 @@ macro_rules! try_init {
/// }
///
/// impl<T> Foo<T> {
-/// fn project(self: Pin<&mut Self>) -> Pin<&mut T> {
+/// fn project_this(self: Pin<&mut Self>) -> Pin<&mut T> {
/// assert_pinned!(Foo<T>, elem, T, inline);
///
/// // SAFETY: The field is structurally pinned.
diff --git a/rust/pin-init/src/macros.rs b/rust/pin-init/src/macros.rs
index 9ced630737b8..d6acf2cd291e 100644
--- a/rust/pin-init/src/macros.rs
+++ b/rust/pin-init/src/macros.rs
@@ -831,6 +831,17 @@ macro_rules! __pin_data {
$($fields)*
}
+ $crate::__pin_data!(make_pin_projections:
+ @vis($vis),
+ @name($name),
+ @impl_generics($($impl_generics)*),
+ @ty_generics($($ty_generics)*),
+ @decl_generics($($decl_generics)*),
+ @where($($whr)*),
+ @pinned($($pinned)*),
+ @not_pinned($($not_pinned)*),
+ );
+
// We put the rest into this const item, because it then will not be accessible to anything
// outside.
const _: () = {
@@ -980,6 +991,56 @@ macro_rules! __pin_data {
stringify!($($rest)*),
);
};
+ (make_pin_projections:
+ @vis($vis:vis),
+ @name($name:ident),
+ @impl_generics($($impl_generics:tt)*),
+ @ty_generics($($ty_generics:tt)*),
+ @decl_generics($($decl_generics:tt)*),
+ @where($($whr:tt)*),
+ @pinned($($(#[$($p_attr:tt)*])* $pvis:vis $p_field:ident : $p_type:ty),* $(,)?),
+ @not_pinned($($(#[$($attr:tt)*])* $fvis:vis $field:ident : $type:ty),* $(,)?),
+ ) => {
+ $crate::macros::paste! {
+ #[doc(hidden)]
+ $vis struct [< $name Projection >] <'__pin, $($decl_generics)*> {
+ $($(#[$($p_attr)*])* $pvis $p_field : ::core::pin::Pin<&'__pin mut $p_type>,)*
+ $($(#[$($attr)*])* $fvis $field : &'__pin mut $type,)*
+ ___pin_phantom_data: ::core::marker::PhantomData<&'__pin mut ()>,
+ }
+
+ impl<$($impl_generics)*> $name<$($ty_generics)*>
+ where $($whr)*
+ {
+ /// Pin-projects all fields of `Self`.
+ ///
+ /// These fields are structurally pinned:
+ $(#[doc = ::core::concat!(" - `", ::core::stringify!($p_field), "`")])*
+ ///
+ /// These fields are **not** structurally pinned:
+ $(#[doc = ::core::concat!(" - `", ::core::stringify!($field), "`")])*
+ #[inline]
+ $vis fn project<'__pin>(
+ self: ::core::pin::Pin<&'__pin mut Self>,
+ ) -> [< $name Projection >] <'__pin, $($ty_generics)*> {
+ // SAFETY: we only give access to `&mut` for fields not structurally pinned.
+ let this = unsafe { ::core::pin::Pin::get_unchecked_mut(self) };
+ [< $name Projection >] {
+ $(
+ // SAFETY: `$p_field` is structurally pinned.
+ $(#[$($p_attr)*])*
+ $p_field : unsafe { ::core::pin::Pin::new_unchecked(&mut this.$p_field) },
+ )*
+ $(
+ $(#[$($attr)*])*
+ $field : &mut this.$field,
+ )*
+ ___pin_phantom_data: ::core::marker::PhantomData,
+ }
+ }
+ }
+ }
+ };
(make_pin_data:
@pin_data($pin_data:ident),
@impl_generics($($impl_generics:tt)*),
@@ -988,38 +1049,56 @@ macro_rules! __pin_data {
@pinned($($(#[$($p_attr:tt)*])* $pvis:vis $p_field:ident : $p_type:ty),* $(,)?),
@not_pinned($($(#[$($attr:tt)*])* $fvis:vis $field:ident : $type:ty),* $(,)?),
) => {
- // For every field, we create a projection function according to its projection type. If a
- // field is structurally pinned, then it must be initialized via `PinInit`, if it is not
- // structurally pinned, then it can be initialized via `Init`.
- //
- // The functions are `unsafe` to prevent accidentally calling them.
- #[allow(dead_code)]
- #[expect(clippy::missing_safety_doc)]
- impl<$($impl_generics)*> $pin_data<$($ty_generics)*>
- where $($whr)*
- {
- $(
- $(#[$($p_attr)*])*
- $pvis unsafe fn $p_field<E>(
- self,
- slot: *mut $p_type,
- init: impl $crate::PinInit<$p_type, E>,
- ) -> ::core::result::Result<(), E> {
- // SAFETY: TODO.
- unsafe { $crate::PinInit::__pinned_init(init, slot) }
- }
- )*
- $(
- $(#[$($attr)*])*
- $fvis unsafe fn $field<E>(
- self,
- slot: *mut $type,
- init: impl $crate::Init<$type, E>,
- ) -> ::core::result::Result<(), E> {
- // SAFETY: TODO.
- unsafe { $crate::Init::__init(init, slot) }
- }
- )*
+ $crate::macros::paste! {
+ // For every field, we create a projection function according to its projection type. If a
+ // field is structurally pinned, then it must be initialized via `PinInit`, if it is not
+ // structurally pinned, then it can be initialized via `Init`.
+ //
+ // The functions are `unsafe` to prevent accidentally calling them.
+ #[allow(dead_code)]
+ #[expect(clippy::missing_safety_doc)]
+ impl<$($impl_generics)*> $pin_data<$($ty_generics)*>
+ where $($whr)*
+ {
+ $(
+ $(#[$($p_attr)*])*
+ $pvis unsafe fn $p_field<E>(
+ self,
+ slot: *mut $p_type,
+ init: impl $crate::PinInit<$p_type, E>,
+ ) -> ::core::result::Result<(), E> {
+ // SAFETY: TODO.
+ unsafe { $crate::PinInit::__pinned_init(init, slot) }
+ }
+
+ $(#[$($p_attr)*])*
+ $pvis unsafe fn [<__project_ $p_field>]<'__slot>(
+ self,
+ slot: &'__slot mut $p_type,
+ ) -> ::core::pin::Pin<&'__slot mut $p_type> {
+ ::core::pin::Pin::new_unchecked(slot)
+ }
+ )*
+ $(
+ $(#[$($attr)*])*
+ $fvis unsafe fn $field<E>(
+ self,
+ slot: *mut $type,
+ init: impl $crate::Init<$type, E>,
+ ) -> ::core::result::Result<(), E> {
+ // SAFETY: TODO.
+ unsafe { $crate::Init::__init(init, slot) }
+ }
+
+ $(#[$($attr)*])*
+ $fvis unsafe fn [<__project_ $field>]<'__slot>(
+ self,
+ slot: &'__slot mut $type,
+ ) -> &'__slot mut $type {
+ slot
+ }
+ )*
+ }
}
};
}
@@ -1202,6 +1281,21 @@ macro_rules! __init_internal {
// have been initialized. Therefore we can now dismiss the guards by forgetting them.
$(::core::mem::forget($guards);)*
};
+ (init_slot($($use_data:ident)?):
+ @data($data:ident),
+ @slot($slot:ident),
+ @guards($($guards:ident,)*),
+ // arbitrary code block
+ @munch_fields(_: { $($code:tt)* }, $($rest:tt)*),
+ ) => {
+ { $($code)* }
+ $crate::__init_internal!(init_slot($($use_data)?):
+ @data($data),
+ @slot($slot),
+ @guards($($guards,)*),
+ @munch_fields($($rest)*),
+ );
+ };
(init_slot($use_data:ident): // `use_data` is present, so we use the `data` to init fields.
@data($data:ident),
@slot($slot:ident),
@@ -1216,6 +1310,13 @@ macro_rules! __init_internal {
// return when an error/panic occurs.
// We also use the `data` to require the correct trait (`Init` or `PinInit`) for `$field`.
unsafe { $data.$field(::core::ptr::addr_of_mut!((*$slot).$field), init)? };
+ // SAFETY:
+ // - the project function does the correct field projection,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ #[allow(unused_variables)]
+ let $field = $crate::macros::paste!(unsafe { $data.[< __project_ $field >](&mut (*$slot).$field) });
+
// Create the drop guard:
//
// We rely on macro hygiene to make it impossible for users to access this local variable.
@@ -1247,6 +1348,14 @@ macro_rules! __init_internal {
// SAFETY: `slot` is valid, because we are inside of an initializer closure, we
// return when an error/panic occurs.
unsafe { $crate::Init::__init(init, ::core::ptr::addr_of_mut!((*$slot).$field))? };
+
+ // SAFETY:
+ // - the field is not structurally pinned, since the line above must compile,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ #[allow(unused_variables)]
+ let $field = unsafe { &mut (*$slot).$field };
+
// Create the drop guard:
//
// We rely on macro hygiene to make it impossible for users to access this local variable.
@@ -1265,7 +1374,48 @@ macro_rules! __init_internal {
);
}
};
- (init_slot($($use_data:ident)?):
+ (init_slot(): // No `use_data`, so all fields are not structurally pinned
+ @data($data:ident),
+ @slot($slot:ident),
+ @guards($($guards:ident,)*),
+ // Init by-value.
+ @munch_fields($field:ident $(: $val:expr)?, $($rest:tt)*),
+ ) => {
+ {
+ $(let $field = $val;)?
+ // Initialize the field.
+ //
+ // SAFETY: The memory at `slot` is uninitialized.
+ unsafe { ::core::ptr::write(::core::ptr::addr_of_mut!((*$slot).$field), $field) };
+ }
+
+ #[allow(unused_variables)]
+ // SAFETY:
+ // - the field is not structurally pinned, since no `use_data` was required to create this
+ // initializer,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ let $field = unsafe { &mut (*$slot).$field };
+
+ // Create the drop guard:
+ //
+ // We rely on macro hygiene to make it impossible for users to access this local variable.
+ // We use `paste!` to create new hygiene for `$field`.
+ $crate::macros::paste! {
+ // SAFETY: We forget the guard later when initialization has succeeded.
+ let [< __ $field _guard >] = unsafe {
+ $crate::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field))
+ };
+
+ $crate::__init_internal!(init_slot():
+ @data($data),
+ @slot($slot),
+ @guards([< __ $field _guard >], $($guards,)*),
+ @munch_fields($($rest)*),
+ );
+ }
+ };
+ (init_slot($use_data:ident):
@data($data:ident),
@slot($slot:ident),
@guards($($guards:ident,)*),
@@ -1279,6 +1429,13 @@ macro_rules! __init_internal {
// SAFETY: The memory at `slot` is uninitialized.
unsafe { ::core::ptr::write(::core::ptr::addr_of_mut!((*$slot).$field), $field) };
}
+ // SAFETY:
+ // - the project function does the correct field projection,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ #[allow(unused_variables)]
+ let $field = $crate::macros::paste!(unsafe { $data.[< __project_ $field >](&mut (*$slot).$field) });
+
// Create the drop guard:
//
// We rely on macro hygiene to make it impossible for users to access this local variable.
@@ -1289,7 +1446,7 @@ macro_rules! __init_internal {
$crate::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field))
};
- $crate::__init_internal!(init_slot($($use_data)?):
+ $crate::__init_internal!(init_slot($use_data):
@data($data),
@slot($slot),
@guards([< __ $field _guard >], $($guards,)*),
@@ -1300,6 +1457,20 @@ macro_rules! __init_internal {
(make_initializer:
@slot($slot:ident),
@type_name($t:path),
+ @munch_fields(_: { $($code:tt)* }, $($rest:tt)*),
+ @acc($($acc:tt)*),
+ ) => {
+ // code blocks are ignored for the initializer check
+ $crate::__init_internal!(make_initializer:
+ @slot($slot),
+ @type_name($t),
+ @munch_fields($($rest)*),
+ @acc($($acc)*),
+ );
+ };
+ (make_initializer:
+ @slot($slot:ident),
+ @type_name($t:path),
@munch_fields(..Zeroable::init_zeroed() $(,)?),
@acc($($acc:tt)*),
) => {
diff --git a/rust/uapi/uapi_helper.h b/rust/uapi/uapi_helper.h
index 1409441359f5..d4a239cf2a64 100644
--- a/rust/uapi/uapi_helper.h
+++ b/rust/uapi/uapi_helper.h
@@ -9,6 +9,7 @@
#include <uapi/asm-generic/ioctl.h>
#include <uapi/drm/drm.h>
#include <uapi/drm/nova_drm.h>
+#include <uapi/drm/panthor_drm.h>
#include <uapi/linux/mdio.h>
#include <uapi/linux/mii.h>
#include <uapi/linux/ethtool.h>